query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Store the value with the given key. Hash collisions should be handled with Linked List Chaining. Implement this.
def put(self, key, value): hi = self.hash_index(key) if self.storage[hi]: current = self.storage[hi] while current.next and current.key != key: current = current.next if current.key == key: current.value = value else: current.next = HashTableEntry(key, value) self.numberOfItems += 1 else: self.storage[hi] = HashTableEntry(key, value) self.numberOfItems += 1 self.calculateLoad()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, key, value):\n ha = self.myhash(key)\n if key not in self.hashmap[ha][0]:\n self.hashmap[ha][0].append(key)\n self.hashmap[ha][1].append(value)\n else:\n self.hashmap[ha][1][self.hashmap[ha][0].index(key)] = value", "def put(self, key, value):\n index = key % self.size\n\n if not self.hash_table[index]:\n # We do not have anything in this bin, just create a new node\n self.hash_table[index] = ListNode(key, value)\n else:\n # We do have something in this bin, traverse it checking to see if we have a matching key.\n # If not just append a node to the end of the bin\n curr_node = self.hash_table[index]\n\n while curr_node:\n if curr_node.key == key:\n curr_node.value = value\n return\n if not curr_node.next:\n break\n curr_node = curr_node.next\n # Did not find a matching key here, so append a key, value pair in this bin\n curr_node.next = ListNode(key, value)", "def put(self, key: int, value: int) -> None:\n hashvalue = key % 1000\n \n if self.hashset[hashvalue] ==None:\n \n self.hashset[hashvalue]=Node((key,value))\n else :\n head= self.hashset[hashvalue]\n while head:\n k,v = head.data \n if k==key:\n head.data = (key,value)\n return \n if head.next==None:\n break\n else:\n head = head.next\n head.next= Node((key,value))", "def put(self, key: int, value: int) -> None:\n pos = key % self.space\n head = self.hash_table[pos]\n curr = head\n\n while curr.next:\n if curr.next.key == key:\n curr.next.val = value\n return\n curr = curr.next\n\n curr.next = Node(key, value)", "def put(self, key: int, value: int) -> None:\n\n index = self.hash(key)\n\n # If there's no other values here, just chuck it in!\n if not self.map[index]:\n self.map[index] = lNode((key, value))\n\n # Otherwise, we either need to rewrite \n # existing node or chuck on end of list - i.e. we have collision\n else:\n # Check head first since we'll need curr.next\n curr = self.map[index]\n\n # rewrite value if it exists\n if curr.val[0] == key:\n curr.val = (key, value)\n\n # Otherwise chuck on end of list\n else:\n while curr.next:\n\n # Forgot condition here...\n if curr.next.val[0] == key:\n curr.next.val = (key, value)\n return\n\n curr = curr.next\n\n curr.next = lNode((key,value))", "def put(self, key: int, value: int) -> None:\n idx = key % self.size\n\n if self.hashmap[idx] == None:\n self.hashmap[idx] = ListNode(key, value)\n else:\n curr = self.hashmap[idx]\n while True:\n if curr.key == key:\n curr.value = value\n return\n if curr.next == None:\n break\n curr = curr.next\n curr.next = ListNode(key, value)", "def put(self, key, value):\n # Your code here\n index = self.hash_index(key)\n # check for linkedlist if it is empty\n if self.data[index].head == None:\n self.data[index].head = HashTableEntry(key, value) # similar to node class we used before\n self.count +=1\n # #-----\n # self.loadfactor = self.count / self.capacity\n # if self.loadfactor > 0.7:\n # self.resize(self.capacity * 2)\n \n else:\n # Linklist is not empty \n # create reference for the head node\n cur= self.data[index].head\n\n while cur.next:\n # checking if the key already exist then we will just override the value\n if cur.key == key:\n cur.value == value\n # checking each node of the Linkedlist till we break the while loop \n cur= cur.next\n # if key is not found, add the new hashtableentry(key, value) to the linkedlist\n cur.next = HashTableEntry(key, value)\n self.count +=1", "def put(self, key, value):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h].val = value\n else:\n a[h] = ListNode(value)", "def put(self, key, value):\n\n node = self._get_node(key) # Get the node with the key (if it exists)\n\n if node is None:\n index = self._get_index(key)\n self._buckets[index].add_front(key, value) # Key was not found (add it to the front of the list)\n self.size += 1\n else:\n node.value = value # Key was found (update the value)", "def put(self, key, value):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n bucket[i]=(key,value)\n return\n bucket.append((key,value))", "def put(self, key: int, value: int) -> None:\n t = key % 20011\n for item in self.hash[t]:\n if item[0] == key:\n item[1] = value\n return\n self.hash[t].append([key, value])", "def put(self, key, value):\n # Your code here\n\n idx = self.hash_index(key)\n entry = HashTableEntry(key, value)\n match = None\n\n # if there's already something stored at this index\n if self.hash_table[idx] is not None:\n curr_node = self.hash_table[idx]\n\n # look for an existing entry with the same key\n while match is None:\n if curr_node.key == key:\n match = curr_node\n else:\n curr_node = curr_node.next\n \n # if we reached the tail and still haven't found an existing match, break out of the loop\n if curr_node is None:\n break\n\n # if we reached the end and didn't find an existing entry matching the provided key\n # add a new entry at the end\n if match is None and curr_node.next is None:\n curr_node.next = entry\n self.total_items+= 1\n self.determine_resize_type()\n\n # if an entry with the provided key already exists, just update its value with the provided value\n elif match is not None:\n match.value = value\n \n \n # if nothing exists as this index, just add the entry\n else:\n self.hash_table[idx] = entry\n self.total_items+= 1\n self.determine_resize_type()", "def put(self, key, value):\n index = key % self.size\n\n if not self.bucket[index]:\n self.bucket[index] = ListNode(key , value)\n else:\n cur = self.bucket[index]\n\n while cur:\n if cur.key == key:\n cur.val = value\n return\n if not cur.next: break\n cur = cur.next\n cur.next = ListNode(key, value)", "def store(self, key, value):\n pass", "def put(self, key, value):\r\n\r\n\r\n\t\tindex = self.get_index(key) # get the index\r\n\t\tcur_list = self._buckets[index] # this is the linked list\r\n\r\n\t\t# remove the key and assign the returned boolean in removed\r\n\t\tremoved = cur_list.remove(key)\r\n\t\tcur_list.add_front(key, value) # re-add the key with updated value\r\n\r\n\t\t# if removed is false, then a new key was added so increase size by 1\r\n\t\tif not removed:\r\n\t\t\tself.size += 1", "def __setitem__(self, key, value):\n\n # Get hashed key\n i = self.hash(key)\n\n # If key not already in hash map\n if key not in self.keys_set:\n\n # Increment size\n self.size += 1\n\n # Add key to set of keys for reference\n self.keys_set.add(key)\n\n # Append key location to keys reference hash table\n self.keys_ref[i].append(key)\n\n # Apped value to hash map chain\n self.table[i].append(value)\n\n # If key already in hash map\n else:\n\n # Get key index for hash map chain\n chain_idx = self.keys_ref[i].index(key)\n\n # Overwrite value in hash map\n self.table[i][chain_idx] = value\n\n # If load greater than maximum load factor\n if self.load() >= self.max_load_factor:\n\n # Call resize for hash map\n self.resize()", "def put(self, key, value):\n # O(1) in best case and O(n) in worst case Time Complexity\n # O(1) in best case and O(n) in worst case Space Complexity\n\n currentNode = self.getElement(key)\n if (currentNode.next == None):\n currentNode.next = Node(key, value)\n else:\n currentNode.next.v = value\n return", "def put(self, key: int, value: int) -> None:\n hashKey = key % 1000\n if not self.bucket[hashKey]:\n self.bucket[hashKey] = LinkNode(key, value)\n else:\n node = self.bucket[hashKey]\n while node:\n if node.pair[0] == key:\n node.pair[1] = value\n return\n if not node.next:\n node.next = LinkNode(key, value)\n node = node.next", "def set(self, key, value):\n # First, look for the key in the cache using `self.get()`\n # If not exists (returns None), add key-value to head\n # If exists, pop old key-value from list, add new value to head\n pass", "def put(self, key, value):\n hash_key = key % self.key_space\n self.hash_table[hash_key].update(key, value)", "def put(self, key: int, value: int) -> None:\n index = key % 10000\n head = self.array[index]\n while head.next:\n head = head.next\n if head.key == key:\n head.value = value\n return\n head.next = LinkedListNode(key, value)", "def put(self, key, value):\n self._store[key] = value", "def set_value(self, key: keyType, new_value: valueType) -> None:\n self.validate(key, new_value)\n head_node_index, chain_node_index = self.exist_key(key)\n # \"head_node_index is equal to -1\" means that 'key' doesn't exist in dictionary object.\n if head_node_index == -1:\n self.add(key, new_value)\n else:\n self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values = [new_value]", "def __setitem__(self, key_to_store: int, value: Any):\n hash = self.compute_hash(key_to_store)\n\n # First time we're seeing this\n if hash not in self.storage:\n self.storage[hash] = list()\n\n self.storage[hash].append((key_to_store, value))", "def put(self, k: Any, v: Any):\n i = abs(hash(k)) % self.size\n current = self.data[i]\n while current is not None:\n if current.key == k:\n current.value = v\n return\n current = current.next\n new_node = self.Node(k, v)\n new_node.next = self.data[i]\n self.data[i] = new_node", "def add(self, key: keyType, value: valueType) -> None:\n\n self.validate(key, value)\n hash_address = self.get_hash_address(key)\n head_node = self.hashTable[hash_address]\n\n # To uniform form of key\n uniform_key = key\n if isinstance(key, (list, set)):\n uniform_key = tuple(key)\n # else:\n # uniform_key = key\n # Create a new node and assign values.\n node_new = ChainNode()\n node_new.key = uniform_key\n node_new.values.append(value)\n\n # 'head_node.count == 0' means that there is no collision.\n if head_node.count == 0:\n head_node.singlyLinkedList.append(node_new)\n head_node.count = 1\n head_node.keys.append(uniform_key)\n else:\n # To deal with collision.\n if uniform_key not in head_node.keys:\n head_node.singlyLinkedList.append(node_new)\n head_node.keys.append(uniform_key)\n head_node.count = head_node.count + 1\n else:\n # For the same 'key', determine whether 'value' already exists. If not, then store.\n for index in range(len(head_node.singlyLinkedList)):\n if uniform_key == head_node.singlyLinkedList[index].key:\n if value not in head_node.singlyLinkedList[index].values:\n head_node.singlyLinkedList[index].values.append(value)\n head_node.count = head_node.count + 1\n break\n logger.info(\"Successfully add a new element.\")", "def put(self, key: int, value: int) -> None:\n idx = key % 1000\n if not self.map[idx]:\n self.map[idx] = Node(key, value)\n else:\n curr = self.map[idx]\n while True:\n if curr.key == key:\n curr.val = value\n return\n if not curr.next:\n break\n curr = curr.next\n curr.next = Node(key, value)", "def set(self, key, value):\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n if self.get(key) is None:\n self.bucket_list[number % self.bucket_number].insert(stored_key, value)", "def set(self, key, value):\n hk = hash(key)\n h = self._hash(hk)\n i = 0\n while i < self.size:\n if self.slot[h] == None:\n self.slot[h] = key\n self.data[h] = value\n break\n i += 1\n h = self._rehash(hk, i)", "def store(self, key, value):\n self._cache[key] = value" ]
[ "0.81106144", "0.80772465", "0.8076398", "0.80682015", "0.8036524", "0.8018422", "0.79838794", "0.7905589", "0.7890816", "0.7876302", "0.78534526", "0.781982", "0.77786344", "0.77710927", "0.77464396", "0.76788867", "0.7675746", "0.7673336", "0.7644221", "0.76339984", "0.76021016", "0.7564971", "0.7562352", "0.7544747", "0.7497845", "0.74859893", "0.7462402", "0.74502635", "0.742286", "0.7416171" ]
0.8438726
0
Write a program that takes as input a BST and an interger k, and returns the k largest elements in the BST in decreasing order.
def find_k_largest_in_bst_recursively(tree, k): def find_k_largest_in_bst_helper(tree): if tree and len(k_largest_elements) < k: # Smart: Recursion iff we don't have K largest elements collected find_k_largest_in_bst_helper(tree.right) if len(k_largest_elements) < k: k_largest_elements.append(tree.data) find_k_largest_in_bst_helper(tree.left) k_largest_elements = [] find_k_largest_in_bst_helper(tree) return k_largest_elements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findKthLargest(self, nums: List[int], k: int) -> int:\n return sorted(nums)[-k]", "def _get_k_largest(lst, k):\n sorted_lst = sorted([(val, index) for index, val in enumerate(lst)])\n return list(reversed(sorted_lst[-k:]))", "def kth_largest(arr: list, k: int):\n # Do not search if k is larger than total number of elements\n if k > len(arr):\n raise IndexError\n # Count all numbers\n nums = Counter(arr)\n # Go from the largest to smaller ones\n for key in sorted(nums, reverse=True):\n if nums[key] >= k:\n return key\n else:\n k -= nums[key]", "def test_find_kth_smallest(self):\n sortedValues = sorted(self.values)[::-1]\n for index, value in enumerate(sortedValues):\n valueFound = self.tree.findKthGreatest(index + 1)\n self.assertEquals(value, valueFound)", "def _kth_largest(self, k: int, start : int, end : int):\n # 1. select a random pivot\n pivotIndex = self.get_pivot_index(start, end)\n # print('Before partition start, end, pivotIndex ', start, end, pivotIndex)\n pivotIndex = self.partition(pivotIndex, start, end)\n # print(f'After partition , data = {data}, pivotIndex = {pivotIndex}')\n\n if end-pivotIndex >=k:\n return self._kth_largest(k, pivotIndex+1, end)\n \n elif end-pivotIndex+1 == k:\n return pivotIndex, self.data[pivotIndex]\n else:\n return self._kth_largest(k-1-(end-pivotIndex), start , pivotIndex-1)", "def kth_smallest(root, k=1):\n\n node_status = defaultdict(lambda: 'unvisited')\n\n count = 0\n stack = [root] # A list is a stack.\n\n while len(stack) != 0:\n do_increment = False\n current_node = stack[-1] # ie peek\n\n node_status[current_node] = 'visited'\n\n if current_node.left is not None:\n\n left_status = node_status[current_node.left]\n if left_status == 'exited':\n stack.pop()\n do_increment = True\n\n elif left_status == 'unvisited':\n stack.append(current_node.left)\n continue\n else:\n stack.pop()\n do_increment = True\n\n if current_node.right is not None:\n stack.append(current_node.right)\n\n if do_increment:\n node_status[current_node] = 'exited'\n count += 1\n if count == k:\n return current_node.key\n\n raise IndexError(\"Tree is too small! Only found: {} nodes\".format(count))", "def top_k(self, k):\n with self.mutating:\n topk = heapq.nsmallest(k, self.queue)\n return topk", "def top_k(self, k = 1):\n\t if self.shapley_rank == {}:\n\t \treturn []\n\n\t n = self.nodes\n\t topknodes = []\n\t i = 0\n\t count = 0\n\t while count < k and not i == n:\n\t if self.shapley_rank[i][0] not in topknodes and not self.is_adj(self.shapley_rank[i][0], topknodes):\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t i = 0\n\t if not count == k:\n\t while not count == k:\n\t if self.shapley_rank[i][0] not in topknodes:\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t return topknodes", "def find_order(input, k):\n if ( k >= len(input) or k < 0 ):\n return None\n elif len(input) == 0:\n return None\n elif len(input) == 1:\n # k should be 0\n assert k == 0\n return input[0]\n else:\n pivot = input[len(input)//2]\n left = [x for x in input if x <= pivot]\n left.remove(pivot)\n \n if len(left) == k:\n return pivot\n elif len(left) > k:\n return find_order(left, k)\n else:\n right = [x for x in input if x > pivot]\n return find_order(right, k-1-len(left))", "def largestSubarray(self, nums: List[int], k: int) -> List[int]:\n\n if k == 1:\n return [max(nums)]\n\n hash_map = {}\n for i, n in enumerate(nums):\n hash_map[n] = i\n\n candidates = nums[: len(nums) - k + 1]\n print(candidates)\n mx = max(candidates)\n mx_idx = hash_map[mx]\n op = nums[mx_idx : mx_idx + k]\n return op", "def heapdown(h, k):\n\n #put this value in the correct place\n v = h[k]\n\n while 2 * k < len(h):\n\n #assign j to be the left child\n j = 2 * k\n\n #is there a child to the right\n if j + 1 < len(h):\n\n #is the left child smaller than the right child\n if h[j] < h[j+1]:\n j = j + 1\n\n #if v is greater than its larger child\n if v >= h[j]:\n break\n else:\n h[k] = h[j]\n k = j\n\n h[k] = v", "def kth_to_last_recursive(node: Node, k: int):\n\n if node == None:\n return 0\n\n index = kth_to_last_recursive(node.next, k) + 1\n\n if index == k:\n print(node.value)\n\n return index", "def swap(root, k):\n q = deque([(root, 1)])\n while q:\n node, level = q.popleft()\n if node is None:\n continue\n if level % k == 0:\n node.left, node.right = node.right, node.left\n q.append((node.left, level+1))\n q.append((node.right, level+1))", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n \n # making a copy of the original list\n temp = PositionalList()\n for item in self._data:\n temp.add_last(item)\n\n # repeatedly find, report, and remove element with largest count\n for j in range(k):\n # find and report next highest from temp\n highPos = temp.first()\n walk = temp.after(highPos)\n while walk is not None:\n if walk.element()._count > highPos.element()._count:\n highPos = walk\n walk = temp.after(walk)\n # found the element with highest count\n yield highPos.element()._value # report element to user\n temp.delete(highPos)", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('illegal value for k')\n\n # making a copy of the original list\n temp = PositionalList()\n for item in self._data: # positional lists support iteration\n temp.add_last(item)\n\n # repeatedly find, report and remove element with largest count\n for j in range(k):\n # find and report next highest from temp\n highPos = temp.first()\n walk = temp.after(highPos)\n while walk is not None:\n if walk.element()._count > highPos.element()._count:\n highPos = walk\n walk = temp.after(walk)\n # found the element with highest count\n yield highPos.element()._value # report element to user\n temp.delete(highPos) # remove from temp list", "def kth_to_last_recursive_1(self, k):\n class Index(object):\n def __init__(self):\n self.index = 0\n idx = Index()\n node = self._kth_to_last_recursive_1(self.head, k, idx)\n if node is None:\n raise IndexError('List index out of range')\n else:\n return node.value", "def find_max_in_array(arr, k):\r\n print(\" Amazon interview question\")\r\n arr[:] = sorted(arr)\r\n return ((arr[len(arr)-k]))", "def twoSumLessThanK(nums, k):\n nums.sort()\n start = 0\n end = len(nums) - 1\n max_sum = -1\n while start < end:\n curr_sum = nums[start] + nums[end]\n if curr_sum < k:\n max_sum = max(curr_sum, max_sum)\n if curr_sum >= k:\n end -= 1\n else:\n start += 1\n\n return max_sum", "def top_k(a, k=1):\n if not type(a) is Blob:\n raise ValueError('`a` should be neoml.Blob.')\n\n if int(k) <= 0:\n raise ValueError('`k` should be > 0.')\n\n if a.size == 0:\n raise ValueError(\"The blob shouldn't be empty.\")\n \n return Blob(PythonWrapper.blob_top_k(a._internal, int(k)))", "def kth_to_last_recursive(self, k):\n node, _ = self._kth_to_last_recursive(self.head, k)\n if node is None:\n raise IndexError('List index out of range')\n else:\n return node.value", "def _kth_to_last_recursive(self, head, k):\n if head is None:\n return None, 0 \n node, index = self._kth_to_last_recursive(head.next_node, k) \n index += 1 \n if index == k:\n return head, index\n return node, index", "def get_top_N(root,n):\n\n\tglobal files\n\tfind_files(root)\n\theapq.heapify(files)\n\tret = heapq.nlargest(n,files)\n\tfiles = []\n\treturn ret", "def fetch_top_k(vect, mat, k):\n resultant = np.dot(mat, vect)\n arglist = np.argsort(resultant)\n arglist = arglist[-1:(-1 - k):-1]\n return arglist, resultant", "def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n max_heap = []\n item_dict = {}\n result = []\n\n for i in range(len(nums)):\n item = [-nums[i], False]\n heapq.heappush(max_heap, item)\n\n if nums[i] not in item_dict:\n item_dict[nums[i]] = [item]\n else:\n item_dict[nums[i]].append(item)\n\n if i - k >= 0:\n # \"remove\" element from heap\n item_to_remove = nums[i - k]\n\n in_heap_item = item_dict[item_to_remove].pop()\n in_heap_item[1] = True # mark as removed\n\n while max_heap[0][1] is True:\n heapq.heappop(max_heap)\n\n result.append(-max_heap[0][0])\n if i == k - 1:\n # handle the first window\n result.append(-max_heap[0][0])\n\n return result", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError(\"Illegal value for k\")\n temp = PositionalList()\n for item in self.data:\n temp.add_last(item)\n\n for j in range(k):\n high_pos = temp.first()\n walk = temp.after(high_pos)\n while walk is not None:\n if walk.element()._count > high_pos.element()._count:\n high_pos = walk\n walk = temp.after(walk)\n yield high_pos.element()._value\n temp.delete(high_pos)", "def next_larger(self, k):\n node = self.find(k)\n return node and node.next_larger()", "def top(self, array, key, k):\n\n return heapq.nlargest(\n k,\n range(len(array)),\n key\n )", "def maxSlidingWindow_v2(self, nums: List[int], k: int) -> List[int]:\n m_queue = deque()\n result = []\n\n for i in range(len(nums)):\n while m_queue and m_queue[-1] < nums[i]:\n m_queue.pop()\n m_queue.append(nums[i])\n\n if i - k >= 0:\n item_to_remove = nums[i - k]\n\n if item_to_remove == m_queue[0]:\n m_queue.popleft()\n\n if i >= k - 1:\n result.append(m_queue[0])\n\n return result", "def kth_to_last_recursive(head, k):\n if not head:\n return 0\n\n index = kth_to_last_recursive(head.next(), k)\n\n if index == k:\n print(head.value())\n\n index += 1\n return index", "def kthSmallest(A, k):\n if k >= len(A):\n return None\n return select(A, 0, len(A) - 1, k)" ]
[ "0.7045558", "0.701398", "0.6677336", "0.6673362", "0.66666937", "0.65226465", "0.65028167", "0.646503", "0.6402724", "0.63980407", "0.6357561", "0.6304622", "0.617777", "0.61401814", "0.6116958", "0.6113665", "0.6077666", "0.60618454", "0.60614383", "0.60428995", "0.60170496", "0.59952575", "0.59936005", "0.597438", "0.59521353", "0.5933596", "0.5875272", "0.5846028", "0.5825565", "0.5808315" ]
0.8523908
0
Calculate upper percentile MCP cutoff for avoiding Andor saturation Andor begins saturating at ~5000 for 'signal' value. Set percentile cutoff of incident fluence (mcp) to the percentile where Andor reaches 4000, well before saturation. If that is greater than the 99.9th percentile, set percentile cutoff to 99.9 to eliminate potential strong outliers in incident fluence.
def _calculate_percentile_cutoff(run_numbers): mcp_values = [] andor_values = [] for run_number in run_numbers: current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5']) f = h5py.File(current_data_path, 'r') current_phot = _get_photon_energy(f, run_number) current_mcp = np.array(f['Acqiris2']['acq']) current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)] mcp_values.extend(current_mcp) current_andor = np.array(f['Andor']['signal']) current_andor = current_andor[(current_phot > 781) & (current_phot < 782)] andor_values.extend(current_andor) #plt.figure() #plt.scatter(mcp_values, andor_values) mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9]) return mcp_percentile_cutoff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cut_spectrum(input_spectrum, desired_frequency_range):\n channels_ip = []\n for ip in input_spectrum.GetChannels():\n channel_ip = []\n channel_op = []\n for n, i in enumerate(ip):\n if n > desired_frequency_range[0] / input_spectrum.GetResolution() and n < desired_frequency_range[1] / \\\n input_spectrum.GetResolution():\n channel_ip.append(i)\n else:\n channel_ip.append(0.0)\n channel_op.append(0.0)\n channels_ip.append(tuple(channel_ip))\n input_spectrum_modified = sumpf.Spectrum(channels=tuple(channels_ip), resolution=input_spectrum.GetResolution(),\n labels=input_spectrum.GetLabels())\n return input_spectrum_modified", "def get_silence_threshold(sound, lower_quantile):\n soundint = sound.to_intensity()\n max_intensity = call(soundint, 'Get quantile', 0.0, 0.0, 1)\n sil_intensity = call(soundint, 'Get quantile', 0.0, 0.0, lower_quantile)\n return sil_intensity - max_intensity", "def sigmaCorrectionFactor(baselineMode, npts, percentile):\n edgeValue = (npts/128.)**0.08\n if (baselineMode == 'edge'):\n return(edgeValue)\n value = edgeValue*2.8*(percentile/10.)**-0.25\n casalogPost(\"sigmaCorrectionFactor using percentile = %g to get sCF=%g\" % (percentile, value), debug=True)\n return(value)", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def low_cut_filter(x, fs, cutoff=70):\n nyquist = fs // 2\n norm_cutoff = cutoff / nyquist\n\n # low cut filter\n fil = firwin(255, norm_cutoff, pass_zero=False)\n lcf_x = lfilter(fil, 1, x)\n\n return lcf_x", "def iqr_outlier_cutoff(myseries, multiplier=1.5):\n # calculate IQR\n q1 = myseries.quantile(.25)\n q3 = myseries.quantile(.75)\n iqr = q3 - q1\n\n # get outlier cutoff\n cutoff = q3 + iqr*multiplier\n\n return cutoff", "def compute_percentile(value, cutoffs):\n\tif value < cutoffs[0]:\n\t\treturn 0.0\n\n\tfor i, cutoff in enumerate(cutoffs):\n\t\tif value < cutoff:\n\t\t\treturn math.floor(100 * (float(i)/(len(cutoffs))))\n\t\t\tbreak\n\treturn 100.0", "def _standardize_cutoff(cutoff):\n cutoff = np.asarray(cutoff)\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff", "def butter_bandstop_filter(data, lowcut, highcut, fs, order):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n i, u = sg.butter(order, (low, high), btype='bandstop')\n y = sg.filtfilt(i, u, data)\n return y", "def get_thresh(amp,c): \n #Helper functions for fitting the psychometric curve, need to be\n #defined within the local scope, so that they can grok the data:\n \n def weib_fit(pars):\n thresh,slope = pars\n return weibull(x,thresh,slope,guess,flake)\n\n def err_func(pars):\n return y-weib_fit(pars)\n\n #Throw away the None's:\n hit_amps = amp[c==1]\n miss_amps = amp[c==0]\n\n # Get rid of floating point error:\n hit_amps = defloaterrorize(hit_amps)\n miss_amps = defloaterrorize(miss_amps)\n\n all_amps = np.hstack([hit_amps,miss_amps])\n stim_intensities = np.unique(all_amps)\n\n n_correct = [len(np.where(hit_amps==i)[0]) for i in stim_intensities]\n n_trials = [len(np.where(all_amps==i)[0]) for i in stim_intensities]\n Data = zip(stim_intensities,n_correct,n_trials)\n x = []\n y = []\n n = []\n for idx,this in enumerate(Data):\n #Take only cases where there were at least n_up observations:\n if n_trials[idx]>=self.n_up:\n #Contrast values: \n x = np.hstack([x,this[2] * [this[0]]])\n #% correct:\n y = np.hstack([y,this[2] * [this[1]/float(this[2])]])\n\n initial = np.mean(x),slope\n this_fit , msg = leastsq(err_func,initial)\n return this_fit,x,y", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def clip_signal(signal, clipping_thresh=1000, clipped_value=215):\n index_factor = rate / CHUNK\n while index_factor * np.argmax(signal) >= clipping_thresh:\n signal[np.argmax(signal)] = 0\n return signal", "def _adjust_screening_percentile(screening_percentile, mask_img, verbose=0):\n original_screening_percentile = screening_percentile\n # correct screening_percentile according to the volume of the data mask\n mask_volume = _get_mask_volume(mask_img)\n if mask_volume > 1.1 * MNI152_BRAIN_VOLUME:\n warnings.warn(\n \"Brain mask is bigger than the volume of a standard \"\n \"human brain. This object is probably not tuned to \"\n \"be used on such data.\",\n stacklevel=2,\n )\n elif mask_volume < 0.005 * MNI152_BRAIN_VOLUME:\n warnings.warn(\n \"Brain mask is smaller than .5% of the volume \"\n \"human brain. This object is probably not tuned to \"\n \"be used on such data.\",\n stacklevel=2,\n )\n\n if screening_percentile < 100.0:\n screening_percentile = screening_percentile * (\n MNI152_BRAIN_VOLUME / mask_volume\n )\n screening_percentile = min(screening_percentile, 100.0)\n # if screening_percentile is 100, we don't do anything\n\n if verbose > 1:\n print(\n f\"Mask volume = {mask_volume:g}mm^3 = {mask_volume / 1000.0:g}cm^3\"\n )\n print(\n \"Standard brain volume \"\n f\"= {MNI152_BRAIN_VOLUME:g}mm^3 \"\n f\"= {MNI152_BRAIN_VOLUME / 1.0e3:g}cm^3\"\n )\n print(\n f\"Original screening-percentile: {original_screening_percentile:g}\"\n )\n print(\n f\"Volume-corrected screening-percentile: {screening_percentile:g}\"\n )\n return screening_percentile", "def filter_pupil(self, hp=0.01, lp=10.0):\r\n self.logger.info('Band-pass filtering of pupil signals, hp = %2.3f, lp = %2.3f'%(hp, lp))\r\n\r\n self.lp_filt_pupil = _butter_lowpass(data=self.interpolated_pupil.astype('float64'), highcut=lp, fs=self.sample_rate, order=3)\r\n self.hp_filt_pupil = _butter_highpass(data=self.interpolated_pupil.astype('float64'), lowcut=hp, fs=self.sample_rate, order=3)\r\n self.bp_filt_pupil = self.hp_filt_pupil - (self.interpolated_pupil-self.lp_filt_pupil)\r\n self.baseline_filt_pupil = self.lp_filt_pupil - self.bp_filt_pupil\r\n\r\n # import mne\r\n # from mne import filter\r\n # self.lp_filt_pupil = mne.filter.low_pass_filter(x=self.interpolated_pupil.astype('float64'), Fs=self.sample_rate, Fp=lp, filter_length=None, method='iir', iir_params={'ftype':'butter', 'order':3}, picks=None, n_jobs=1, copy=True, verbose=None)\r\n # self.hp_filt_pupil = mne.filter.high_pass_filter(x=self.interpolated_pupil.astype('float64'), Fs=self.sample_rate, Fp=hp, filter_length=None, method='iir', iir_params={'ftype':'butter', 'order':3}, picks=None, n_jobs=1, copy=True, verbose=None)\r\n # self.bp_filt_pupil = self.hp_filt_pupil - (self.interpolated_pupil-self.lp_filt_pupil)\r\n # self.baseline_filt_pupil = self.lp_filt_pupil - self.bp_filt_pupil\r", "def filterHighs( adata, bandlimit = 5000) :\n\n # TODO: compute Fourier representation of data\n\n fig = plt.figure()\n fig.add_subplot(2, 1, 1)\n plt.plot( np.real(fdata))\n plt.xlim( [0,adata.size])\n\n # TODO: filter data\n\n fig.add_subplot(2, 1, 2)\n plt.plot( np.real(fdata))\n plt.xlim( [0,adata.size])\n plt.show()\n\n # TODO: restore time domain representation of data\n\n return adata_filtered", "def outlier_thresholds(dataframe, col_name, low_quantile, up_quantile):\n quartile1 = dataframe[col_name].quantile(low_quantile)\n quartile3 = dataframe[col_name].quantile(up_quantile)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y", "def heat_chi2_cut(stream, df):\n heat_chi2_threshold = quality_parameters[stream]['heat_chi2_threshold']\n chi2_heat = df['chi2_heat']\n energy_adu_heat = df['energy_adu_heat']\n \n # chi2_threshold = heat_chi2_threshold * ( 1 + (energy_adu_heat/2e3)**2 )\n chi2_threshold = heat_chi2_threshold_function(\n heat_chi2_threshold,\n energy_adu_heat\n )\n \n df['chi2_heat_cut'] = (chi2_heat < chi2_threshold)\n \n return None", "def telluric_mask(data, sigma=2.5, lsf=4.8, pwv=None, pixel_start=10, pixel_end=-30, outlier_rejection=2.5, diagnostic=True, save_to_path='./'):\n\n\tdata.flux = data.flux[pixel_start:pixel_end]\n\tdata.wave = data.wave[pixel_start:pixel_end]\n\tdata.noise = data.noise[pixel_start:pixel_end]\n\n\tdata0 = copy.deepcopy(data)\n\n\t# take the closest airmass from the header\n\tairmass = float(round(data.header['AIRMASS']*2)/2)\n\tif airmass > 3.0: airmass = 3.0\n\n\t# simple chi2 comparison with different pwv\n\tif pwv is None:\n\t\tpwvs = [0.5, 1.0, 1.5, 2.5, 3.5, 5.0, 7.5, 10.0, 20.0]\n\t\tpwv_chi2 = []\n\t\t\n\t\tfor pwv in pwvs:\n\t\t\tdata_tmp = copy.deepcopy(data)\n\t\n\t\t\t#data_tmp = smart.continuumTelluric(data=data_tmp, model=model_tmp)\n\t\t\tmodel_tmp = tellurics.makeTelluricModel(lsf=lsf, airmass=airmass, pwv=pwv, flux_offset=0, wave_offset=0, data=data_tmp, deg=10)\n\t\n\t\t\tmodel_tmp.flux = np.array(smart.integralResample(xh=model_tmp.wave, yh=model_tmp.flux, xl=data_tmp.wave))\n\t\t\tmodel_tmp.wave = data_tmp.wave\n\n\t\t\t#plt.plot(data_tmp.wave, data_tmp.flux, 'k-')\n\t\t\t#plt.plot(model_tmp.wave, model_tmp.flux, 'r-')\n\t\t\t#plt.show()\n\t\t\t#plt.close()\n\t\n\t\t\tpwv_chi2.append(smart.chisquare(data_tmp, model_tmp))\n\t\t# find the pwv with minimum chisquare\n\t\tpwv_chi2_array = np.array(pwv_chi2)\n\t\t\n\t\tif diagnostic:\n\t\t\tplt.plot(pwvs, pwv_chi2)\n\t\t\tplt.xlabel('pwv (mm)', fontsize=15)\n\t\t\tplt.ylabel('$\\chi^2$', fontsize=15)\n\t\t\tplt.tight_layout()\n\t\t\tplt.savefig(save_to_path+'pwv_chi2.png')\n\t\t\t#plt.show()\n\t\t\tplt.close()\n\n\tpwv_min_index = np.where(pwv_chi2_array == np.min(pwv_chi2_array))[0][0]\n\tpwv = pwvs[pwv_min_index]\n\n\tdata_tmp = copy.deepcopy(data)\n\n\tmodel = tellurics.makeTelluricModel(lsf=lsf, airmass=airmass, pwv=pwv, flux_offset=0, wave_offset=0, data=data_tmp)\n\n\tmodel_0 = copy.deepcopy(model)\n\n\t# generate the mask based on sigma clipping\n\tpixel = np.delete(np.arange(len(data.oriWave)), data.mask)[pixel_start: pixel_end]\n\t#pixel = np.delete(np.arange(len(data_tmp.oriWave)),data_tmp.mask)\n\tmask = pixel[np.where(np.abs(data_tmp.flux-model.flux) > outlier_rejection*np.std(data_tmp.flux-model.flux))]\n\n\t#plt.plot(data_tmp.wave, data_tmp.flux, 'k-')\n\t#plt.plot(model.wave, model.flux, 'r-')\n\t#plt.show()\n\t#plt.close()\n\n\tdata_tmp.mask_custom(mask)\n\tdata_tmp.flux = data_tmp.flux[pixel_start:pixel_end]\n\tdata_tmp.wave = data_tmp.wave[pixel_start:pixel_end]\n\tdata_tmp.noise = data_tmp.noise[pixel_start:pixel_end]\n\n\t#plt.plot(data_tmp.wave, data_tmp.flux, 'k-')\n\t#plt.plot(model.wave, model.flux, 'r-')\n\t#plt.show()\n\t#plt.close()\n\n\t# use curve_fit\n\tdef tell_model_fit(wave, airmass, pwv, flux_offset, wave_offset):\n\t\tmodel = tellurics.makeTelluricModel(lsf=lsf, airmass=airmass, pwv=pwv, flux_offset=flux_offset, wave_offset=wave_offset, data=data_tmp)\n\t\treturn model.flux\n\n\tprint('initial airmass and pwv', airmass, pwv)\n\tflux_med = np.median(data_tmp.flux)\n\tp0 = [airmass, pwv, 0, 0]\n\tbounds = ([airmass-0.5, pwv-0.5, -flux_med*0.05, -0.05], [airmass+0.5, pwv+0.5, flux_med*0.05, 0.05])\n\n\tpopt, pcov = curve_fit(tell_model_fit, data_tmp.wave, data_tmp.flux, p0=p0, bounds=bounds)\n\n\tairmass, pwv, flux_offset, wave_offset = popt[0], popt[1], popt[2], popt[3]\n\tprint('best-fit airmass, pwv, flux_offset, wave_offset', airmass, pwv, flux_offset, wave_offset)\n\tmodel = tellurics.makeTelluricModel(lsf=lsf, airmass=airmass, pwv=pwv, flux_offset=0, wave_offset=0, data=data_tmp)\n\tprint('old telluric mask', mask)\n\tpixel = np.delete(np.arange(len(data_tmp.oriWave)), mask)[pixel_start: pixel_end]\n\t#print('len pixel, data, model', len(pixel), len(data_tmp.wave), len(model.wave))\n\tmask = pixel[np.where(np.abs(data_tmp.flux-model.flux) > outlier_rejection*np.std(data_tmp.flux-model.flux))]\n\t# combine the masks\n\tmask = np.union1d(mask,np.array(data_tmp.mask))\n\tprint('new telluric mask', mask)\n\n\tdata.mask_custom(mask)\n\tdata.flux = data.flux[pixel_start:pixel_end]\n\tdata.wave = data.wave[pixel_start:pixel_end]\n\tdata.noise = data.noise[pixel_start:pixel_end]\n\tprint(data.mask)\n\n\t#plt.plot(data.wave, data.flux, 'k-')\n\t#plt.plot(model.wave, model.flux, 'r-')\n\t#plt.plot(model_0.wave, model_0.flux, 'b-', alpha=0.5)\n\t#plt.show()\n\t#plt.close()\n\n\tif diagnostic:\n\t\tdata.flux = data.flux[pixel_start:pixel_end]\n\t\tdata.wave = data.wave[pixel_start:pixel_end]\n\t\tdata.noise = data.noise[pixel_start:pixel_end]\n\t\t\n\t\tmodel = tellurics.makeTelluricModel(lsf=lsf, airmass=airmass, pwv=pwv, flux_offset=0, wave_offset=0, data=data)\n\t\t\n\t\tplt.plot(data0.wave, data0.flux, 'k-', label='original data', alpha=0.5)\n\t\tplt.plot(data.wave, data.flux, 'k-', label='masked data')\n\t\tplt.plot(model.wave, model.flux, 'r-', alpha=0.7)\n\t\tplt.plot(data.wave, data.flux-model.flux, 'r-')\n\t\tplt.xlabel('$\\lambda (\\AA)$')\n\t\tplt.ylabel('$F_{\\lambda}$')\n\t\tplt.savefig(save_to_path+'telluric_data_model_mask.png')\n\t\t#plt.show()\n\t\tplt.close()\n\n\treturn mask.tolist(), pwv, airmass", "def computeMedianCorrectionFactor(baselineMode, percentile):\n if (baselineMode == 'edge'):\n return(0)\n return(6.3*(5.0/percentile)**0.5)", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):\r\n\t\r\n\twork_arr = np.ravel(input_arr)\r\n\told_sky = np.median(work_arr)\r\n\toldStaDesviation = work_arr.std()\r\n\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\tif low_cut and high_cut:\r\n\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\telse:\r\n\t\tif low_cut:\r\n\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tindices = np.where((work_arr < upper_limit))\r\n\twork_arr = work_arr[indices]\r\n\tnew_sky = np.median(work_arr)\r\n\titeration = 0\r\n\twhile ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :\r\n\t\titeration += 1\r\n\t\told_sky = new_sky\r\n\t\toldStaDesviation = work_arr.std()\r\n\t\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\t\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\t\tif low_cut and high_cut:\r\n\t\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tif low_cut:\r\n\t\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\t\telse:\r\n\t\t\t\tindices = np.where((work_arr < upper_limit))\r\n\t\twork_arr = work_arr[indices]\r\n\t\tnew_sky = np.median(work_arr)\r\n\treturn (new_sky, iteration)", "def calc_iqr(sig):\n # iqr = subtract(*percentile(sig, [75, 25]))\n return np.percentile(sig, 75) - np.percentile(sig, 25)", "def percentile_filter(data, feats):\n\n # Determines the fraction of nonzero values in the data.\n sparsity_frac = feats['nonzeros'] / (feats['nrows'] * feats['ncols'])\n\n # NOTE: Added convenience term derived from experience.\n thresh = np.percentile(data.ravel(), q=100 * (1 - (sparsity_frac + 0.1)))\n\n # Replace p-values below threshold with zero.\n data[(data > 0) & (data < thresh)] = 0\n\n return data", "def limit(filename,threshold,makeup,wout=True,plot=False):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n dataL,dataL_bit=compress(filename,threshold,1000.0,makeup,1.0,500.0,wout=False,plot=plot)\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_limit.wav',dataL_bit,44100,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataL,dataL_bit", "def filter_sat(img_sat_ch, threshold = (170,255)):\n assert img_sat_ch.ndim==2\n sat_binary = np.zeros_like(img_sat_ch)\n scaled_s_ch = np.uint8(255*img_sat_ch/np.max(img_sat_ch))\n sat_binary[(scaled_s_ch >= threshold[0]) & (scaled_s_ch <= threshold[1])] = 1\n return sat_binary", "def perc(data,percentile):\n clip_value = np.percentile(data,percentile)\n data = clip(data,clip_value)\n return data", "def _compute_cutoffs(self):\n self._cutoffidx=np.zeros(self.nsamples,dtype=np.int)\n # Find the inlfection point\n # TODO: check robustness of this method against fluctuations in the data\n self.samplesdatadiff=np.diff(self.samplesdata,axis=0)\n flex=np.argmax(self.samplesdatadiff,axis=0)\n # if the detected cycles is the last one, then the flex has not yet been reached, warn.\n for i,f in enumerate(flex):\n #self._message(\"(%s) Preanalysis - detection of inflection point.\"%(self.samples[i])) \n if f==(self.nvalues-1):\n self._cutoffidx[i]=f\n self._message(\"Warning: (%s) Inflection point not detected. Using all fluorescent values available (%d cycles).\"%(self.samples[i],f)) \n elif f<10:\n self._message(\"Warning: (%s) Early inflection point (cycle %d).\"%(self.samples[i],f))\n else: \n self._cutoffidx[i]=np.minimum(f+2,self.nvalues)\n #self._message(\"(%s) Inflection point found at cycle %d).\"%(self.samples[i],f)) ", "def set_min_uncertainty(signal, threshold=0.05):\n # Increase Hirex-Sr uncertainties to be a rel error of 5% minimum (JUST FOR TESTING)\n corrected_unc=signal.std_y/signal.y<=0.05\n signal.std_y[corrected_unc]=0.05*signal.y[corrected_unc]\n\n # correction for normalized uncertainties\n if signal.s/signal.m<=0.05:\n signal.s=0.05*signal.m\n\n signal.std_y_norm=scipy.sqrt((signal.std_y / signal.m)**2.0 + ((signal.y / signal.m)*(signal.s / signal.m))**2.0)", "def findScalingCoeffsLimiter(self, PFC, lqCN, lqCF):\n # Get R and Z vectors at the midplane\n# R_omp_sol = PFC.ep.g['lcfs'][:,0].max()\n R_omp_sol = self.map_R_psi(1.0,PFC)\n R_omp_min = R_omp_sol #this is a limited discharge so Rmin = Rlcfs\n if lqCN > lqCF:\n lqMax = lqCN\n else:\n lqMax = lqCF\n R_omp_max = R_omp_sol + 20.0*lqMax #already in m\n #if R_omp_max is outside EFIT grid, cap at maximum R of grid\n if R_omp_max > max(PFC.ep.g['R']):\n R_omp_max = max(PFC.ep.g['R']) #in meters now\n R_omp = np.linspace(R_omp_min, R_omp_max, 1000)\n Z_omp = np.zeros(R_omp.shape)\n\n # Evaluate B at outboard midplane\n Bp_omp = PFC.ep.BpFunc.ev(R_omp,Z_omp)\n Bt_omp = PFC.ep.BtFunc.ev(R_omp,Z_omp)\n B_omp = np.sqrt(Bp_omp**2 + Bt_omp**2)\n\n #Find coordinate transformation vector at midplane\n psiaxis = PFC.ep.g['psiAxis']\n psiedge = PFC.ep.g['psiSep']\n deltaPsi = np.abs(psiedge - psiaxis)\n gradPsi = Bp_omp*R_omp\n xfm = gradPsi / deltaPsi\n\n # transform hf width into flux space\n lqCN_hat = lqCN*xfm\n lqCF_hat = lqCF*xfm\n\n\n #Calculate flux at midplane using gfile\n psiN = PFC.ep.psiFunc.ev(R_omp,Z_omp)\n psi = psiN*(psiedge - psiaxis) + psiaxis\n PFC.psiMinLCFS = PFC.ep.psiFunc.ev(R_omp_sol,0.0)\n s_hat = psiN - PFC.psiMinLCFS\n\n\n print('psiMinLCFS: {:f}'.format(PFC.psiMinLCFS))\n# print('un-normalized psiMinLCFS: {:f}'.format(PFC.ep.psiFunc_noN.ev(R_omp_sol,0.0)))\n print('Minimum s_hat: {:f}'.format(s_hat.min()))\n\n\n #integral in flux space\n qCN_hat = np.exp(-s_hat / lqCN_hat)\n qCF_hat = np.exp(-s_hat / lqCF_hat)\n\n #note: simps integration will fail if x variable (psi) is not monotonic\n #reinke method\n #intCN = simps(qCN_hat / B_omp, psi)\n #intCF = simps(qCF_hat / B_omp, psi)\n #menard method\n intCN = simps(qCN_hat, psi)\n intCF = simps(qCF_hat, psi)\n\n P0 = 2*np.pi * (intCN*self.fracCN + intCF*self.fracCF)\n #account for nonphysical power\n if P0 < 0: P0 = -P0\n #Scale to input power\n q0 = self.Psol/P0\n\n #old method left for reference\n #q0 = (self.Psol/(2*np.pi)) / (intCN*self.fracCN + intCF*self.fracCF)\n\n return q0" ]
[ "0.5653417", "0.5633713", "0.5491142", "0.54786575", "0.54064196", "0.53240645", "0.52246594", "0.52203375", "0.52071315", "0.51993823", "0.5179031", "0.51522595", "0.51460844", "0.51184833", "0.50988865", "0.5083905", "0.5054955", "0.5052072", "0.50464624", "0.50445575", "0.5017279", "0.5010275", "0.4993915", "0.49769405", "0.49761632", "0.49754384", "0.49731877", "0.49703842", "0.49702385", "0.49601427" ]
0.59004277
0
Return a list of actions related to plugin
def get_plugin_actions(self): return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getActions():\n return getPlugins(IRenamingAction, plugins)", "def get_actions(self):\n return []", "def get_list_of_actions(self):\n return self.actions", "def actions(self):\n return self._action_list", "def actions(self):\n from moztrap.view.lists.actions import actions\n return actions", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def actions(self):\r\n return self.puzzle.actions", "def get_actions(self):\n return self.agent.get_actions()", "def get_available_actions(self):\n return self.actions", "def GetCustomActions(debug, verbose, explicit_configurations):\r\n\r\n return []", "def actions(self):\n return self._actions", "def actions(self):\r\n return actions.Actions(self)", "def getActions(self):\n actions = self.actions[:]\n return actions", "def get_custom_actions(self, containers):\n\n def sorter(Plugin):\n \"\"\"Sort based on order attribute of the plugin\"\"\"\n return Plugin.order\n\n # Fedd an empty dict if no selection, this will ensure the compat\n # lookup always work, so plugin can interact with Scene Inventory\n # reversely.\n containers = containers or [dict()]\n\n # Check which action will be available in the menu\n Plugins = api.discover(api.InventoryAction)\n compatible = [p() for p in Plugins if\n any(p.is_compatible(c) for c in containers)]\n\n return sorted(compatible, key=sorter)", "def actions(self):\n self._actions = {}\n self._actions['getItems'] = ('FileCrawler', None)\n #self._actions['getContents'] = ('ParseContents', ('path'))\n return self._actions", "def list(self):\n\n return list(\n filter(\n lambda x: x.get('type') != 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )", "def actions(self):\n\n return self._actions.getSlice(0)", "def actions(self):\n\n return self._actions.getSlice(0)", "def _generate_actions(self) -> list:\n pass", "def get_actions(self):\n actions = []\n for section in self._sections:\n for (sec, action) in self._actions:\n if sec == section:\n actions.append(action)\n\n actions.append(MENU_SEPARATOR)\n return actions", "def get_plugins_by_action(self, action, **kwargs):\n\n plugins = list()\n for plugin_name, plugin in self.plugins.iteritems():\n if action.pid and not plugin.id == action.pid:\n continue\n if plugin.has_action(action.request_name):\n plugins.append(plugin_name)\n return plugins", "def actions(self):\n isinst = isinstance\n return [c.widget for c in self.children() if isinst(c, QtAction)]", "def get_actions(self):\n\n if self.description == exceptions.NotAvailableError:\n raise exceptions.NotAvailableError('Can\\'t get actions because a description for this service is'\n ' not available.')\n return list(self.actions.values())", "def actions(self):\n raise NotImplementedError", "def actions(self):\n actions = []\n\n for name, item in self._definition.get('actions', {}).items():\n name = self._get_name('action', name)\n actions.append(Action(name, item, self._resource_defs))\n\n return actions", "def list(self):\n return list(\n filter(\n lambda x: x.get('type') == 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )", "def actions(cls):\n return [m for m in cls.__dict__ if not \"__\" in m]", "def actions(self):\r\n return Actions(self)" ]
[ "0.8130526", "0.76124007", "0.7425578", "0.73972076", "0.72777", "0.7205521", "0.7205521", "0.7205521", "0.7167486", "0.7161599", "0.7116936", "0.7075639", "0.70122087", "0.68798953", "0.67999965", "0.6772967", "0.6758088", "0.67547417", "0.6751595", "0.6751595", "0.67393315", "0.6720449", "0.6703838", "0.6685096", "0.6682575", "0.6659212", "0.6631779", "0.6573402", "0.64993525", "0.6465212" ]
0.8701119
1
Register plugin in Spyder's main window
def register_plugin(self): self.edit_goto.connect(self.main.editor.load) self.redirect_stdio.connect(self.main.redirect_internalshell_stdio) self.main.add_dockwidget(self) unittesting_act = create_action(self, _("Run unit tests"), icon=get_icon('profiler.png'), triggered=self.run_unittesting) unittesting_act.setEnabled(is_unittesting_installed()) fixed_shortcut("Ctrl+Shift+F11", self.main, self.run_unittesting) self.main.run_menu_actions += [unittesting_act] self.main.editor.pythonfile_dependent_actions += [unittesting_act]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_plugin(self):\n self.create_toggle_view_action()\n\n self.main.add_dockwidget(self)", "def test_addplugin(self):\n app = QApplication(sys.argv)\n data = (np.random.rand(30, 31, 32) * 100).astype(np.int)\n data[15:40, 13:20, 10:18] += 50\n se = seededitorqt.QTSeedEditor(data)\n wg0 = seededitorqt.plugin.SampleThresholdPlugin()\n se.addPlugin(wg0)\n # se.exec_()\n # self.assertTrue(False)", "def plugh():", "def on_first_registration(self):\n self.main.tabify_plugins(self.main.help, self)\n self.dockwidget.hide()", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def plugins():\n pass", "def pyang_plugin_init():\n plugin.register_plugin(OpenConfigPlugin())", "def plugin_register(api):\n api.range_tool_register('Example/01', MyPlugin)\n return True", "def get_plugin(self, name):", "def run_plugin(self, plugin_name, *args, nb_path=None, **kwargs):\n nb_path = self._resolve_nb_path(nb_path)\n if plugin_name in self._plugin_collections:\n pc = self._plugin_collections[plugin_name]\n else:\n pc = PluginCollection([plugin_name], nb_path, {})\n self._plugin_collections[plugin_name] = pc\n pc.run(\"from_notebook\", *args, **kwargs)", "def show():\n from siding.addons import ui\n ui.show()", "def test_show_editor(self):\n app = QApplication(sys.argv)\n data = (np.random.rand(30, 31, 32) * 100).astype(np.int)\n data[15:40, 13:20, 10:18] += 50\n se = seededitorqt.QTSeedEditor(data)\n wg0 = seededitorqt.plugin.SampleThresholdPlugin()\n wg1 = seededitorqt.plugin.SampleThresholdPlugin()\n se.addPlugin(wg0)\n se.addPlugin(wg1)\n se.exec_()\n # self.assertTrue(False)", "def register() -> None:\n plot_backend = _get_plot_backend(\"matplotlib\")\n plot_backend.register()", "def connect(self, plug):", "def register(check_environ=False):\n from mundi.loader import register\n from mundi.types.region import REGION_PLUGINS\n\n if check_environ:\n import os\n\n if os.environ.get(\"MUNDI_DEMOGRAPHY\", \"on\").lower() in (\"off\", \"false\", \"no\"):\n return\n\n for k, v in FUNCTIONS.items():\n register(k, v)\n\n REGION_PLUGINS[\"population\"] = lambda x: population(x.id)\n REGION_PLUGINS[\"age_distribution\"] = lambda x: age_distribution(x.id)\n REGION_PLUGINS[\"age_pyramid\"] = lambda x: age_pyramid(x.id)", "def core_plugin(self):\n pass", "def __init__(self):\n\n self.plugin_name = 'Yum'", "def get_plugin_interface(self):", "def register():\n # DeepOBS\n deepobs.pytorch.testproblems.two_d_quadratic = two_d_quadratic\n\n # for CockpitPlotter\n if \"scalar\" in deepobs.config.DATA_SET_NAMING.keys():\n assert deepobs.config.DATA_SET_NAMING[\"scalar\"] == \"Scalar\"\n else:\n deepobs.config.DATA_SET_NAMING[\"scalar\"] = \"Scalar\"\n\n if \"deep\" in deepobs.config.TP_NAMING.keys():\n assert deepobs.config.TP_NAMING[\"deep\"] == \"Deep\"\n else:\n deepobs.config.TP_NAMING[\"deep\"] = \"deep\"\n\n # BackOBS\n backobs.utils.ALL += (two_d_quadratic,)\n backobs.utils.REGRESSION += (two_d_quadratic,)\n backobs.utils.SUPPORTED += (two_d_quadratic,)\n backobs.integration.SUPPORTED += (two_d_quadratic,)", "def main():\n PLUGIN_ENTRY().run(\"\")", "def init():\n global PLUGIN_NAME\n PLUGIN_NAME = inspect.currentframe().f_code.co_filename", "def _installed_apps_add(self):\n config.add_plugin(self.module_path)", "def launch_new_instance():\n import IPython\n\n IPython.Shell.start().mainloop()", "def run(self):\n\n self.window.run_command(\"show_panel\", {\"panel\": \"output.reg_replace\"})", "def startup():\n menuhook.register(\n \"quickpreview\",\n \"howtos\",\n quickpreview,\n menu=[\"&Scripting\", \"Python3 Development\", \"How To\"],\n text=\"Create a quick preview\",\n tooltip=\"Create a quick preview\")", "def add_plugin(self, plugin):\n\n if not plugin:\n return\n\n plugin_btn = PluginButton(project=self._project, plugin=plugin)\n plugin_btn.clicked.connect(self.openPlugin.emit)\n self._flow_layout.addWidget(plugin_btn)", "def register(name, value):\n\n return Plugins.register(name, value)", "def new_plugin(ctx, **defaults):\n from .quickstart import plugin_quickstart\n\n project = ctx.get_project(silent=True)\n plugin_quickstart(defaults, project=project)", "def test():\r\n from spyderlib.utils.qthelpers import qapplication\r\n app = qapplication()\r\n widget = BreakpointWidget(None)\r\n widget.show()\r\n sys.exit(app.exec_())", "def setup(bot):\n bot.logger.debug(\n 'Registering extension \"Quiz\"'\n )\n bot.add_cog(QuizCog(bot))" ]
[ "0.69762045", "0.60747755", "0.58828604", "0.5874758", "0.58635104", "0.57819545", "0.5741525", "0.5716186", "0.5689403", "0.56821126", "0.5669613", "0.56537426", "0.55752265", "0.5545013", "0.55267227", "0.55135757", "0.5477841", "0.546501", "0.5450378", "0.54207486", "0.53620833", "0.5334271", "0.53303087", "0.53240544", "0.5322914", "0.53001785", "0.5299282", "0.5293697", "0.5288459", "0.5284721" ]
0.65753347
1
Apply configuration file's plugin settings
def apply_plugin_settings(self, options): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)", "def apply_config(filename):\n with open(filename) as config_file:\n config = json.load(config_file)\n for setting, value in config.items():\n CoreConfig.__dict__[setting] = value", "def configure(self, options, conf):", "def conf_update(self):\n pass", "def update_config(self):\n if self.integration is None:\n return\n self.enabled = self.integration.has_option(self.get_config_name())\n self.pedantic = self.integration.configuration.get_bool(\n 'filter.mrproper')", "def plugin_configure(config):\n collectd.info('Configuring collectd-mlab plugin.')\n parse_config(config)", "def configure(self, options, conf):\n pass", "def _configure(self):\n path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'config.yml'\n )\n\n with open(path) as file:\n defaultconfig = yaml.load(file)\n\n self.config = merge_dict(self.config, defaultconfig)\n\n if 'logging' in self.config:\n logging.config.dictConfig(self.config['logging'])\n else:\n logging.getLogger('sirbot').setLevel('INFO')", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def plugin_settings(settings): # pylint: disable=unused-argument\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def update_configs(self, config):\n for what in self.plugins: # backend, repo etc.\n for key in self.plugins[what]: # s3, filesystem etc.\n # print(\"Updating configuration of\", what, key)\n self.plugins[what][key].config(what='set', params=config)\n return", "def config(ctx, flox: Flox, scope, profile, plugin, without_secrets, without_parameters):\n if ctx.invoked_subcommand:\n return\n\n if not plugin:\n raise ConfigurationException(\"You need to specify at least one plugin to be configured\",\n extra=\"Use flox config --plugin=<plugin-name>\")\n\n if not flox.initiated and scope == \"project\":\n warning(\"Unable to use scope project outside of project directory. Changing scope to 'user'\")\n scope = \"user\"\n\n for name in plugin:\n if not flox.plugins.has(name):\n raise MissingPluginException(name)\n\n info(f\"Starting configuration of {name} for '{scope}' scope\" + (f\" and '{profile}' profile\" if profile else \"\"))\n\n if not without_parameters:\n interactive_parameters(flox, name, flox.plugins.get(name), scope, profile)\n\n if not without_secrets:\n if scope == \"system\" and not click.confirm(warning(\"Flox can't manage secrets on the system level. \"\n \"If you like to continue all secrets would be stored at \"\n \"user level\", no_print=True)):\n raise Abort\n interactive_secrets(flox, name, flox.plugins.get(name), scope, profile)\n\n execute_stages(\n flox, \"configuration_change\", features=[name]\n )", "def apply(self) -> None:\n _ba.apply_config()", "def apply_settings(self):\n return True", "def configure(self, conf):\n return", "def setup_plugins(self, cfg, path):\n\n if cfg:\n with open(path, \"w\") as f:\n print(\"DOCUMENTATION='''\", file=f)\n print(\"---\", file=f)\n for key in cfg:\n print(f\"{key}: {cfg[key]}\", file=f)\n print(\"'''\", file=f)", "def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)", "def apply_configuration(self, properties):\n\n (\n self.__enabled_plugins_for_starting_new_file,\n self.__enabled_plugins_for_next_token,\n self.__enabled_plugins_for_next_line,\n self.__enabled_plugins_for_completed_file,\n ) = ([], [], [], [])\n\n for next_plugin in self.__enabled_plugins:\n try:\n plugin_specific_facade = self.__find_configuration_for_plugin(\n next_plugin, properties, always_return_facade=True\n )\n\n next_plugin.plugin_instance.set_configuration_map(\n plugin_specific_facade\n )\n next_plugin.plugin_instance.initialize_from_config()\n except Exception as this_exception:\n raise BadPluginError(\n next_plugin.plugin_id,\n inspect.stack()[0].function,\n cause=this_exception,\n ) from this_exception\n\n if next_plugin.plugin_instance.is_next_token_implemented_in_plugin:\n self.__enabled_plugins_for_next_token.append(next_plugin)\n if next_plugin.plugin_instance.is_next_line_implemented_in_plugin:\n self.__enabled_plugins_for_next_line.append(next_plugin)\n if next_plugin.plugin_instance.is_completed_file_implemented_in_plugin:\n self.__enabled_plugins_for_completed_file.append(next_plugin)\n if next_plugin.plugin_instance.is_starting_new_file_implemented_in_plugin:\n self.__enabled_plugins_for_starting_new_file.append(next_plugin)", "def configure(self, args):\n pass", "def apply_config_defaults():\n\n # don't worry about broken settings, validate_config() will take\n # care of them\n\n if 'pre_action_callbacks' not in nori.cfg:\n nori.cfg['pre_action_callbacks'] = [\n (pre_action_drupal_readonly, [], {})\n ]\n\n if 'post_action_callbacks' not in nori.cfg:\n nori.cfg['post_action_callbacks'] = [\n (post_action_drupal_readonly, [], {}, True)\n ]\n\n if 'source_type' not in nori.cfg:\n nori.cfg['source_type'] = 'generic'\n\n if 'source_query_func' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_func'] = generic_db_query\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_func'] = drupal_db_query\n\n if 'source_query_defaulter' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_defaulter'] = None\n\n if 'source_query_validator' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_validator'] = validate_generic_args\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_validator'] = validate_drupal_args\n\n if 'source_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_template_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'source_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_global_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'dest_type' not in nori.cfg:\n nori.cfg['dest_type'] = 'generic'\n\n if 'dest_query_func' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_func'] = generic_db_query\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_func'] = drupal_db_query\n\n if 'dest_query_defaulter' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_defaulter'] = None\n\n if 'dest_query_validator' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_validator'] = validate_generic_args\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_validator'] = validate_drupal_args\n\n if 'dest_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_template_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'dest_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_global_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'templates' not in nori.core.cfg:\n return\n if not isinstance(nori.core.cfg['templates'],\n nori.core.MAIN_SEQUENCE_TYPES):\n return\n\n for i, template in enumerate(nori.core.cfg['templates']):\n if not isinstance(nori.core.cfg['templates'][i],\n nori.core.MAPPING_TYPES):\n continue\n\n if T_MULTIPLE_KEY not in template:\n nori.core.cfg['templates'][i][T_MULTIPLE_KEY] = False\n\n if T_S_QUERY_ARGS_KEY in template:\n args_t = template[T_S_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['source_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_D_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_D_FUNC_KEY] = None\n\n if T_S_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_S_NO_REPL_KEY] = False\n\n if T_S_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_S_CHANGE_CB_KEY] = []\n\n if T_D_QUERY_ARGS_KEY in template:\n args_t = template[T_D_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['dest_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_S_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_S_FUNC_KEY] = None\n\n if T_D_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_D_NO_REPL_KEY] = False\n\n if T_D_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_D_CHANGE_CB_KEY] = []\n\n if T_KEY_MODE_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_MODE_KEY] = 'all'\n\n if T_KEY_LIST_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_LIST_KEY] = []", "def apply_config_file(self, filename):\n def extractor(template, options):\n \"\"\"Ignore things that are existing non default values\"\"\"\n for name, val in options:\n normalised = self.normalise_key(name)\n if normalised in self.values and not isinstance(self.values[normalised], Default):\n continue\n else:\n yield name, val\n\n items = json.load(open(filename)).items()\n self.use_options(items, extractor)", "def configuration():", "def update_config_file(invoker: AirflowInvoker) -> None:\n airflow_cfg_path = invoker.files[\"config\"]\n logging.debug(f\"Generated default '{str(airflow_cfg_path)}'\")\n\n # open the configuration and update it\n # now we let's update the config to use our stubs\n airflow_cfg = configparser.ConfigParser()\n\n with airflow_cfg_path.open() as cfg:\n airflow_cfg.read_file(cfg)\n logging.debug(f\"Loaded '{str(airflow_cfg_path)}'\")\n\n config = invoker.plugin_config_processed\n for section, cfg in config.items():\n airflow_cfg[section].update(cfg)\n logging.debug(f\"\\tUpdated section [{section}] with {cfg}\")\n\n with airflow_cfg_path.open(\"w\") as cfg:\n airflow_cfg.write(cfg)\n logging.debug(f\"Saved '{str(airflow_cfg_path)}'\")", "def configure(self, section):", "def update(self):\n self.save_config_file()", "def configure(self, config: dict):\n self.config.update(config)", "def configure(self, config_name, action, contents):\n config = self.default_config.copy()\n config.update(contents)\n\n _log.debug(\"Configuring Agent\")\n\n try:\n setting1 = int(config[\"setting1\"])\n setting2 = config[\"setting2\"]\n except ValueError as e:\n _log.error(\"ERROR PROCESSING CONFIGURATION: {}\".format(e))\n return\n\n for x in self.setting2:\n self._create_subscriptions(str(x))\n print(str(x))", "def apply_config(self, responsible, paths, arg=None):\n self.warning(\"Reconfiguring NTP server (called with paths %s)\" % paths)\n return self.updateRunningConf(responsible)", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def _configure_addon(self):\n cfg = None\n try:\n data_dir = os.path.split(self.props.data_dir)\n\n cfg = Configuration(jobtype='Blender', \n data_path=data_dir[0],\n log_level=int(self.props.log_level),\n name=self.props.ini_file,\n datadir=data_dir[1])\n \n except (InvalidConfigException, IndexError) as exp:\n self.log.warning(\"Warning failed to load config file, \"\n \"creating new default config.\")\n self.log.warning(str(exp))\n \n finally:\n\n if not os.path.isdir(self.props.data_dir):\n raise EnvironmentError(\"Data directory not created - \"\n \"please ensure you have adequate permissions.\")\n\n if not cfg:\n cfg = Configuration(jobtype='Blender', log_level='warning')\n\n if self.props.endpoint:\n cfg = override_config(cfg, endpoint=self.props.endpoint)\n if self.props.account:\n cfg = override_config(cfg, account=self.props.account)\n if self.props.key:\n cfg = override_config(cfg, key=self.props.key)\n if self.props.client_id:\n cfg = override_config(cfg, client_id=self.props.client_id)\n if self.props.tenant:\n cfg = override_config(cfg, tenant=self.props.tenant)\n if self.props.redirect:\n cfg = override_config(cfg, redirect=self.props.redirect)\n\n cfg.save_config()\n return cfg" ]
[ "0.67779297", "0.6747643", "0.66346806", "0.6428851", "0.6423092", "0.6381086", "0.63534784", "0.6292545", "0.62646264", "0.6239537", "0.6237905", "0.6227397", "0.6210529", "0.6172642", "0.61720854", "0.6128758", "0.60786307", "0.6069815", "0.60622776", "0.6050749", "0.6043863", "0.60065943", "0.599283", "0.59613764", "0.5952286", "0.59340423", "0.5918914", "0.59161264", "0.591283", "0.59114" ]
0.7636843
0
Used to allow a user to edit their own profile.
def edit_profile(request): user = request.user profile = Profile.objects.for_user(user) if request.method != 'POST': profile_form = ProfileForm(instance=profile) user_form = UserForm(instance=user) else: profile_form = ProfileForm(request.POST, instance=profile) user_form = UserForm(request.POST, instance=user) if profile_form.is_valid() and user_form.is_valid(): profile_form.save() user_form.save() return HttpResponseRedirect(reverse('epic.core.views.view_profile', kwargs={})) return render_to_response( 'core/edit_profile.html', {'profile_form': profile_form, 'user_form': user_form,}, context_instance=RequestContext(request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_profile(self, name, username, email):\n return self.app.post('/_editProfile', data = dict(\n name = name,\n username = username,\n email = email\n ), follow_redirects = True)", "def edit_user():\n if CURR_USER_KEY in session:\n user = g.user\n form = ProfileEditForm(obj=user)\n\n if form.validate_on_submit():\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n user.description = form.description.data\n user.email = form.email.data\n user.image_url = form.image_url.data or \"/static/images/default-pic.png\"\n\n db.session.commit()\n\n flash(\"Profile edited.\")\n return redirect(\"/profile\")\n\n return render_template('/profile/edit-form.html', form=form)\n else:\n return redirect('/login')", "def edit_profile(request, pk=None):\n profiledetails = UserProfile.objects.filter(user=request.user).first()\n if UserProfile.objects.filter(user=request.user or request.user.is_superuser):\n\n if request.method == \"POST\":\n profile_details_form = UserProfileForm(request.POST, request.FILES, instance=profiledetails)\n if profile_details_form.is_valid():\n profiledetails = profile_details_form.save()\n messages.success(request, 'Your profile has been updated!')\n return redirect(user_profile)\n else:\n profile_details_form = UserProfileForm(instance=profiledetails)\n else:\n return HttpResponseForbidden()\n \n return render(request, 'newprofiledetails.html', {'profile_details_form': profile_details_form})", "def edit_profile(request):\n if request.method == 'POST':\n form = EditProfileForm(request.POST, instance=request.user)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'Profile updated successfully.')\n return redirect('profile')\n\n else:\n messages.error(request, 'Invalid entry, please try again.')\n return redirect('edit_profile')\n else:\n form = EditProfileForm(instance=request.user)\n return render(request, 'accounts/edit_profile.html', {'form': form})", "def viewprofile():\n user = current_user\n form = UserUpdateForm(obj=user)\n form.populate_obj(user)\n if form.validate_on_submit():\n form.populate_obj(user)\n\n db.session.commit()\n\n flash('You have successfully edited your profile!')\n return render_template('user/user.html', title=\"View Profile\",\n user=user, form=form, action='Edit')", "def view_profile():\n user_id = session.get(\"user_id\")\n \n user = User.query.get(session[\"user_id\"])\n \n return render_template(\"editable_profile_page.html\", user=user)", "def edit_profile(profile_id):\n # This check is in place to avoid users trying to edit a profile via the dashboard\n # when they have not created one. If not the option is not displayed\n user = mongo.db.user.find_one({'username': session['username']})\n chck = mongo.db.profiles.find_one_or_404({'user_id': user['_id']})\n if chck: \n profile = mongo.db.profiles.find_one(\n {'_id': ObjectId(profile_id)})\n \n form=ProfileForm()\n form.headline.data = profile['headline']\n form.bio.data = profile['bio']\n form.xp.data = profile['xp']\n form.interests.data = profile['interests']\n form.stack.data = profile['stack']\n form.languages.data = profile['languages']\n form.frameworks.data = profile['frameworks']\n form.github.data = profile['github']\n form.linkedin.data = profile['linkedin']\n \n return render_template('pages/editprofile.html', form=form, profile=profile, legend='Edit your Profile')", "def edit_profile(request):\n profile_to_edit = get_object_or_404(UserProfile, user=request.user)\n if request.method == \"POST\":\n form = UserProfileForm(request.POST, instance=profile_to_edit)\n if form.is_valid:\n form.save()\n messages.success(request, \"Profile updated succesfully\")\n return redirect('profile')\n else:\n messages.error(request, \"Updated failed. \\\n Please ensure the form is valid\")\n else:\n profile_form = UserProfileForm(instance=profile_to_edit)\n template = 'profiles/edit_profile.html'\n context = {\n 'form': profile_form,\n }\n return render(request, template, context)", "def profile():\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n form = UserEditForm(obj=g.user)\n\n if form.validate_on_submit():\n if not User.authenticate(g.user.username, form.data[\"password\"]):\n flash(\"Invalid password.\", \"danger\")\n return render_template('/users/edit.html', form=form) \n # data = {k:v for k,v in form.data.items() if k != \"csrf_token\"}\n # data[\"image_url\"] = data[\"image_url\"] or None\n # data[\"header_image_url\"] = data[\"header_image_url\"] or None\n\n g.user.username = form.data[\"username\"]\n g.user.email = form.data[\"email\"]\n g.user.image_url = form.data[\"image_url\"] or None\n g.user.header_image_url = form.data[\"header_image_url\"] or None\n g.user.bio = form.data[\"bio\"]\n\n db.session.commit()\n\n flash(\"Profile edited!\", \"success\")\n return redirect(f'/users/{g.user.id}')\n\n return render_template('/users/edit.html', form=form)", "def test_that_a_user_can_edit_their_profile(self):\n self.authorize_user(self.user_login_details)\n url = self.profiles_url + \\\n '{}'.format(self.user['user']['username']) + \"/\"\n response = self.client.patch(url, data=self.user_bio)\n self.assertEqual(response.data['bio'], \"You are a peculiar man.\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def profile():\n\n form = EditUserForm(obj=g.user)\n\n if form.validate_on_submit():\n if User.authenticate(g.user.username, form.password.data):\n g.user.username = form.username.data\n g.user.email = form.email.data\n g.user.image_url = form.image_url.data\n g.user.header_image_url = form.header_image_url.data\n g.user.bio = form.bio.data\n g.user.private = form.private.data\n db.session.commit()\n return redirect(f'/users/{g.user.id}')\n flash('Incorrect password', 'danger')\n return render_template('users/edit.html', user_id=g.user.id, form=form)", "def can_edit_profile(user: User, owner: User) -> bool:\n\n return has_permission(user, \"edit_profiles\") or user == owner", "def profile_edit():\n form = ProfileForm(obj=current_user)\n\n if form.validate_on_submit():\n form.populate_obj(current_user)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Profile updated correctly'), 'success')\n\n return render_template('admin/profile/edit.html', form=form)\n\n except IntegrityError:\n # Email already exists\n correct = False\n form.errors.email.append(_('Email is already registered'))\n\n return render_template('admin/profile/edit.html', form=form)\n\n except Exception:\n # Catch anything unknown\n correct = False\n\n flash(_('Failed to update profile, contact an administrator'), 'error')\n\n return render_template('admin/profile/edit.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/edit.html', form=form)", "def edit_profile(request, userid):\n woofer_user = User.objects.get(id=userid)\n current_profile = Profile.objects.get(user=woofer_user)\n if woofer_user.id != request.user.id:\n return HttpResponseRedirect(reverse('view-profile', args=[userid]))\n\n if request.method == 'POST':\n form = ProfileForm(request.POST)\n if form.is_valid():\n new_profile = form.save(commit=False)\n # copy the ID of the User's current profile to the new profile so\n # Django performs an update when we call .save()\n new_profile.id = current_profile.id\n new_profile.user = woofer_user\n new_profile.save()\n return HttpResponseRedirect(reverse('view-profile', args=[userid]))\n else:\n form = ProfileForm(instance=current_profile)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('edit-profile', args=[userid]),\n 'title' : \"Edit Profile\"\n })", "def edit_profile(request):\n profile = request.user.profile\n form = forms.ProfileForm(instance=profile)\n\n if request.method == 'POST':\n if settings.SYSTEM_MAINTENANCE_NO_UPLOAD:\n # Allow submitting the form, but do not allow the photo to\n # be modified.\n if 'delete_photo' in request.POST or request.FILES:\n raise ServiceUnavailable()\n\n if 'edit_profile' in request.POST:\n # Update the profile and return to the same page. Place a message\n # at the top of the page: 'your profile has been updated'\n form = forms.ProfileForm(data=request.POST, files=request.FILES,\n instance=profile)\n if form.is_valid():\n form.save()\n messages.success(request, 'Your profile has been updated.')\n elif 'delete_photo' in request.POST:\n profile.delete_photo()\n messages.success(request, 'Your profile photo has been deleted.')\n\n if not form.errors:\n form = forms.ProfileForm(instance=profile)\n\n return render(request, 'user/edit_profile.html', {'form':form})", "def edit_profile(request):\n form = ProfileForm(instance=request.user.profile)\n if request.method == \"POST\":\n form = ProfileForm(data=request.POST, files=request.FILES,\n instance=request.user.profile)\n if form.is_valid():\n form.save()\n return redirect('profile')\n return render(request, 'accounts/forms.html', {'form': form})", "def edit_user_profile(request):\n user = request.user\n user_profile = UserProfile.objects.filter(user=user)[0]\n if request.method == 'POST':\n form = MemberProfileForm(request.POST)\n additional_form = MemberAdditionalProfileForm(request.POST)\n if form.is_valid() and additional_form.is_valid():\n cd = form.cleaned_data\n user.first_name = cd['first_name']\n user.last_name = cd['last_name']\n user.email = cd['email']\n user.save()\n if 'picture' in request.FILES:\n file = request.FILES['picture']\n user_profile.picture.save(file.name, file, save=True)\n user_profile.gravatar = additional_form.cleaned_data['gravatar']\n user_profile.save()\n return HttpResponseRedirect('/')\n else:\n form = MemberProfileForm(instance=request.user)\n additional_form = MemberAdditionalProfileForm(instance=user_profile)\n return render_to_response('edit_profile.html', locals())", "def edit_profile():\n form = EditProfileForm()\n if request.method == 'GET':\n form.first_name.data = current_user.first_name\n form.first_name.data = current_user.first_name\n form.last_name.data = current_user.last_name\n form.email.data = current_user.email\n form.address_1.data = current_user.address_1\n form.address_2.data = current_user.address_2\n form.city.data = current_user.city\n form.state.data = current_user.state\n form.zipcode.data = current_user.zipcode\n form.telephone.data = current_user.telephone\n if form.validate_on_submit():\n form.last_name.data = form.last_name.data\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n current_user.email = form.email.data\n current_user.address_1 = form.address_1.data\n current_user.address_2 = form.address_2.data\n current_user.city = form.city.data\n current_user.state = form.state.data\n current_user.zipcode = form.zipcode.data\n current_user.telephone = form.telephone.data\n db.session.commit()\n flash(('Your changes have been saved.'))\n\n return redirect(url_for('edit_profile'))\n\n return render_template('edit_profile.html', title=('Edit Profile'),\n form=form)", "def edit_basic_info(request):\n if request.POST:\n request.user.first_name = request.POST['first_name']\n request.user.last_name = request.POST['last_name']\n request.user.email = request.POST['email']\n request.user.save()\n request.user.userprofile.phone_number = request.POST['phone']\n request.user.userprofile.save()\n messages.add_message(request, messages.SUCCESS, 'Your changes have been saved.')\n return redirect('base_dashboard')\n\n return render(request, 'edit_basic_info.html', {'the_user': request.user})", "def home_edituser():\n\tpass", "def profile_edit(request):\n if request.user.is_authenticated:\n u = request.user\n context = ProfileContext(u).get_context_with_form()\n context = calculate_char_in_textarea(context)\n return render(request, 'wantedly_app/profile_edit.html', context)\n else:\n return redirect('home')", "def profile(request):\n profile = request.user.profile\n return render(request, 'accounts/profile.html', {'profile': profile})", "def edit_profile_post(request, pk=None):\n profilepost = get_object_or_404(ProfilePost, pk=pk) \n if (request.user == profilepost.user or\n request.user.is_superuser):\n if request.method == \"POST\":\n profile_post_form = ProfilePostForm(request.POST, request.FILES, instance=profilepost)\n if profile_post_form.is_valid():\n profilepost = profile_post_form.save()\n messages.success(request, 'Your post has been updated!') \n return redirect(reverse('profile'))\n else:\n profile_post_form = ProfilePostForm(instance=profilepost)\n else:\n return HttpResponseForbidden()\n\n return render(request, 'newprofilepost.html', {'profile_post_form': profile_post_form})", "def show_profile(request, profile_uuid):\n context = context_assign_user(request.user)\n context['profile_form'] = ProfileForm(instance=context['current_user'])\n if request.method == 'POST':\n form = ProfileForm(request.POST)\n if form.is_valid():\n Profile.objects.filter(pk=context['current_user'].id).update(bio=request.POST['bio'], palette=request.POST['palette'], iex_api_key=request.POST['iex_api_key'])\n messages.success(request, 'Your settings have been saved.')\n return redirect('dash:dashboard')\n errors = form.errors\n form = ProfileForm(request, request.POST)\n messages.warning(request, f\"There's a problem with the form: {errors}\")\n return render(request, 'dash/users/show_profile.html', context)", "def user_view(cls, user, profile):\r\n pass", "def user_profile():\n user = current_user\n user_is_valid = True\n if not user.active:\n flash('This user account is under review. Please update your profile '\n + ' and contact the organizing team to access all functions of '\n + 'this platform.', 'warning')\n\n form = UserForm(obj=user, next=request.args.get('next'))\n form.roles.choices = [(r.id, r.name) for r in Role.query.order_by('name')]\n\n # Check conflicting PKs\n if form.email.data != user.email:\n if User.query.filter_by(email=form.email.data).first() is not None:\n flash('This e-mail address is already registered.', 'error')\n user_is_valid = False\n\n if user.sso_id:\n # Do not allow changing password on SSO\n del form.password\n\n # Validation has passed\n if form.is_submitted() and form.validate() and user_is_valid:\n # Assign roles\n user.roles = [Role.query.filter_by(\n id=r).first() for r in form.roles.data]\n del form.roles\n\n # Sanitize username\n user.username = sanitize_input(form.username.data)\n del form.username\n\n # Assign password if changed\n originalhash = user.password\n form.populate_obj(user)\n # Do not allow changing password on SSO\n if not user.sso_id:\n if form.password.data:\n user.set_password(form.password.data)\n else:\n user.password = originalhash\n\n user.updated_at = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n user.socialize()\n flash('Profile updated.', 'success')\n return redirect(url_for('public.user', username=user.username))\n\n if not form.roles.choices:\n del form.roles\n else:\n form.roles.data = [(r.id) for r in user.roles]\n return render_template('public/useredit.html',\n oauth_type=oauth_type(),\n user=user, form=form,\n active='profile')", "def edit_user(request, user_id):\n profile = get_object_or_404(UserProfile, user=request.user)\n\n # make sure only managers and admins can add a team\n if profile.level == 'admin' or profile.level == 'manager':\n\n user = get_object_or_404(UserProfile, pk=user_id)\n if request.method == 'POST':\n form = UserProfileForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n form.save()\n messages.success(request, 'User edited successfully')\n\n users = UserProfile.objects.filter(company_id=profile.company_id)\n template = 'profiles/user_management.html'\n context = {\n 'users': users,\n 'profile': profile\n }\n return render(request, template, context)\n\n else:\n print(\"failed\")\n else:\n form = UserProfileForm(instance=user)\n\n template = 'profiles/profile.html'\n context = {\n 'form': form,\n 'profile': user,\n }\n\n return render(request, template, context)\n\n else:\n messages.info(request, \"Sorry, you are not authorized to edit users. Ask a Manager or Admin.\")\n\n return redirect(reverse('planning', ))", "def profile():\n if g.user:\n return render_template('profile.html', user=g.user)\n return redirect(url_for('login'))", "def user_view(cls, user, profile):\n pass", "def test_user_edit_profile(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser_id\n\n address = \"1215 Brookview Ave, Kettering, Ohio 45409\"\n\n resp = c.get(f\"/users/8989/\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\n '<h1 class=\"Display-4 text-center mt-3\"><b>Profile Information:</b></h1>',\n html,\n )\n self.assertIn(\"<p>testuser</p>\", html)\n self.assertIn(\"<p>[email protected]</p>\", html)\n self.assertIn(\"<p>662-996-3356</p>\", html)\n self.assertIn(\n '<a class=\"font-weight-bold btn winter-neva-gradient color-block btn-block my-4 waves-effect z-depth-0\" href=\"/users/8989/edit\">Edit Profile</a>',\n html,\n )" ]
[ "0.7839375", "0.76361674", "0.75991297", "0.757305", "0.7543597", "0.74999654", "0.7481396", "0.74729866", "0.7393925", "0.73854417", "0.7290376", "0.7259955", "0.7223237", "0.72067004", "0.72060645", "0.7162687", "0.7130685", "0.7072604", "0.7040199", "0.70098627", "0.6973461", "0.6972686", "0.69697833", "0.6929408", "0.69286853", "0.68818706", "0.68772167", "0.6828612", "0.6822478", "0.67614275" ]
0.7651815
1
Return three Keras HDF5Matrix instances for the input, groundtruth density map and groundtruth segmentation mask in a compact TrainingSet
def get_matrices(training_set_path): if os.path.isfile(training_set_path): X = HDF5Matrix(training_set_path, 'input/input') y = HDF5Matrix(training_set_path, 'target/target') y_seg = HDF5Matrix(training_set_path, 'seg_map/seg_map') return X, y, y_seg else: raise Exception('Training set file not found.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform_to_h5():\n # this took about 10 minutes for set1\n for setname in ['set1', 'set2']:\n filename = os.path.join(ltrc_dirname, '{}.h5'.format(setname))\n f = h5py.File(filename, 'w')\n\n for name in ['train', 'valid', 'test']:\n g = f.create_group(name)\n filename = os.path.join(ltrc_dirname, '{}.{}.txt'.format(setname, name))\n X, y, q = load_svmlight_file(filename, query_id=True)\n g.create_dataset('X', data=X.todense(), compression='gzip')\n g.create_dataset('y', data=y, compression='gzip')\n g.create_dataset('q', data=q, compression='gzip')\n f.close()\n # Now you can do this\n # f['/valid/X'].shape\n # Out[24]: (71083, 699)", "def create_dev_matrix(self):\n if os.path.exists(self.dev_matrix_h5_path):\n print(\"[LOGGING]: \" + self.dev_matrix_h5_path + \" exists!\")\n return\n\n with h5py.File(self.dev_matrix_h5_path, 'w') as f:\n\n for mode in ['train', 'test']:\n for device in ['a', 'b', 'c']:\n grp = f.create_group(mode + '/' + device)\n grp['data'], grp['label'] = self.extract_npy(mode=mode, devices=device)\n # add parallel data as separate device p\n grp = f.create_group(mode + '/p')\n grp['data'], grp['label'] = self.extrac_para_npy(mode=mode)\n\n # add neg parallel data as device A\n grp = f.create_group(mode + '/A')\n grp['data'], grp['label'] = self.extract_neg_para_npy(mode=mode)\n f.close()", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def buildDataSet():\n (x_train_origin, y_train_origin), (x_test_origin, y_test_origin) = mnist.load_data()\n\n assert K.image_data_format() == 'channels_last'\n x_train_origin = x_train_origin.reshape(x_train_origin.shape[0], h, w, 1)\n x_test_origin = x_test_origin.reshape(x_test_origin.shape[0], h, w, 1)\n\n dataset_train = []\n dataset_test = []\n\n #Sorting images by classes and normalize values 0=>1\n for n in range(nb_classes):\n images_class_n = np.asarray([row for idx,row in enumerate(x_train_origin) if y_train_origin[idx]==n])\n dataset_train.append(images_class_n/255)\n\n images_class_n = np.asarray([row for idx,row in enumerate(x_test_origin) if y_test_origin[idx]==n])\n dataset_test.append(images_class_n/255)\n\n return dataset_train,dataset_test,x_train_origin,y_train_origin,x_test_origin,y_test_origin", "def test_hdf5_design_matrix():\n skip_if_no_h5py()\n import h5py\n\n # save random data to HDF5\n handle, filename = tempfile.mkstemp()\n dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),\n num_examples=10, dim=5,\n num_classes=3)\n with h5py.File(filename, 'w') as f:\n f.create_dataset('X', data=dataset.get_design_matrix())\n f.create_dataset('y', data=dataset.get_targets())\n\n # instantiate Train object\n trainer = yaml_parse.load(design_matrix_yaml % {'filename': filename})\n trainer.main_loop()\n\n # cleanup\n os.remove(filename)", "def data_split():\n x_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"images.npy\"))\n y_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"label.npy\"))\n x_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"images.npy\"))\n y_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"label.npy\"))\n\n print(x_train.shape)\n print(x_test.shape)\n\n img_rows, img_cols = x_train.shape[1], x_train.shape[2]\n num_classes = 10 # starts with 1 not 0\n\n y_test1 = y_test.reshape((y_test.shape[0],))\n y_test1 = [y - 1 for y in y_test1]\n\n y_train1 = y_train.reshape((y_train.shape[0],))\n y_train1 = [y - 1 for y in y_train1]\n\n input_shape = (img_rows, img_cols, 3)\n\n X_train = x_train.astype('float32')\n X_test = x_test.astype('float32')\n\n X_train /= 255\n X_test /= 255\n print('x_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train2 = keras.utils.to_categorical(y_train1, num_classes)\n y_test2 = keras.utils.to_categorical(y_test1, num_classes)\n\n y_train2 = y_train2.astype('int32')\n y_test2 = y_test2.astype('int32')\n\n print(\n \"after process: X train shape: {}, X test shape: {}, y train shape: {}, y test shape: {}\".format(x_train.shape,\n x_test.shape,\n y_train2.shape,\n y_test2.shape))\n return input_shape, X_train, X_test, y_train2, y_test2", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def build_deepsets_joint_representation_model():\n # We first create the embedding model\n test_input = tf.keras.layers.Input(shape=(_NUM_INPUTS.value,))\n train_input = tf.keras.layers.Input(shape=(_NUM_INPUTS.value,))\n train_label = tf.keras.layers.Input(shape=(1,))\n\n # Obtain a mask variable. Output dimension [1, _NUM_INPUTS.value]\n mask = tf.ones((1, _NUM_INPUTS.value))\n one_row = tf.reshape(tf.gather(train_input, [0], axis=0), [-1])\n mask = mask * tf.cast(tf.not_equal(one_row, _PAD_VALUE.value), tf.float32)\n\n # Calibrate input if haven't done so\n calibrated_train_input = train_input\n calibrated_test_input = test_input\n calibration = tfl.layers.PWLCalibration(\n input_keypoints=np.linspace(0.0, 1.0, _NUM_CALIB_KEYS.value),\n units=_NUM_INPUTS.value,\n output_min=0.0,\n output_max=1.0,\n impute_missing=True,\n missing_input_value=_MISSING_VALUE.value,\n name=\"input_calibration\")\n calibrated_train_input = calibration(train_input)\n calibrated_test_input = calibration(test_input)\n\n # Reshape the input to pair-wise format.\n # Output dimension [_BATCH_SIZE.value, _NUM_INPUTS.value**2, 2]\n pairwise_train_input = get_pairwise_inputs(calibrated_train_input)\n pairwise_test_input = get_pairwise_inputs(calibrated_test_input)\n\n # Obtain pairwise masks. Output dimesion [_NUM_INPUTS.value**2,]\n pairwise_mask = get_pairwise_inputs(mask)\n pairwise_mask = tf.reshape(tf.reduce_prod(pairwise_mask, axis=-1), [-1])\n\n # Obtain pairwise labels.\n # Output dimension\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2, _MAX_NUM_CLASSES.value]\n one_hot_train_label = tf.one_hot(\n tf.cast(train_label, tf.int32), _MAX_NUM_CLASSES.value)\n pairwise_train_label = tf.tile(one_hot_train_label,\n tf.constant([1, _NUM_INPUTS.value**2, 1]))\n\n # Concatenate pairwise inputs and labels.\n # Output dimension\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2, _MAX_NUM_CLASSES.value + 2]\n pairwise_train_input = tf.concat([pairwise_train_input, pairwise_train_label],\n axis=-1)\n\n # Obtain distribution representation. Output dimension\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2,\n # _DISTRIBUTION_REPRESENTATION_DIM.value]\n batch_embedding = tf.keras.layers.Dense(\n _DISTRIBUTION_REPRESENTATION_DIM.value, activation=\"relu\")(\n pairwise_train_input)\n for _ in range(_HIDDEN_LAYER.value - 1):\n batch_embedding = tf.keras.layers.Dense(\n _DISTRIBUTION_REPRESENTATION_DIM.value, activation=\"relu\")(\n batch_embedding)\n\n # Average embeddings over the batch. Output dimension\n # [_NUM_INPUTS.value**2, _DISTRIBUTION_REPRESENTATION_DIM.value].\n mean_distribution_embedding = tf.reduce_mean(batch_embedding, axis=0)\n\n outputs = []\n for pairwise_input in [pairwise_test_input, pairwise_train_input]:\n # [_NUM_INPUTS.value**2, _DISTRIBUTION_REPRESENTATION_DIM.value] ->\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2,\n # _DISTRIBUTION_REPRESENTATION_DIM.value] via repetition.\n distribution_embedding = tf.tile(\n [mean_distribution_embedding],\n tf.stack([tf.shape(pairwise_input)[0],\n tf.constant(1),\n tf.constant(1)]))\n # Concatenate pairwise inputs and embeddings. Output shape\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2,\n # 2 + _DISTRIBUTION_REPRESENTATION_DIM.value]\n concat_input = tf.concat([pairwise_input, distribution_embedding], axis=-1)\n\n # Apply a common function to each pair. Output shape\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2, _DEEPSETS_LAYER_UNITS.value]\n pairwise_output = tf.keras.layers.Dense(\n _DEEPSETS_LAYER_UNITS.value, activation=\"relu\")(\n concat_input)\n for _ in range(_HIDDEN_LAYER.value - 1):\n pairwise_output = tf.keras.layers.Dense(\n _DEEPSETS_LAYER_UNITS.value, activation=\"relu\")(\n pairwise_output)\n\n # Average pair-wise outputs across valid pairs.\n # Output shape [_BATCH_SIZE.value, _DEEPSETS_LAYER_UNITS.value]\n average_outputs = tf.tensordot(pairwise_mask, pairwise_output, [[0], [1]])\n average_outputs = average_outputs / tf.reduce_sum(pairwise_mask)\n\n # Use several dense layers to get the final output\n final_output = tf.keras.layers.Dense(\n _OUTPUT_LAYER_UNITS.value, activation=\"relu\")(\n average_outputs)\n for i in range(_HIDDEN_LAYER.value - 1):\n final_output = tf.keras.layers.Dense(\n _OUTPUT_LAYER_UNITS.value, activation=\"relu\")(\n final_output)\n outputs.append(final_output)\n\n test_outputs = tf.math.l2_normalize(outputs[0], axis=1)\n train_outputs = tf.math.l2_normalize(outputs[1], axis=1)\n similarity_matrix = tf.exp(\n tf.matmul(test_outputs, tf.transpose(train_outputs)))\n\n similarity_list = []\n for i in range(_MAX_NUM_CLASSES.value):\n mask = tf.cast(tf.squeeze(tf.equal(train_label, i)), tf.float32)\n similarity_list.append(similarity_matrix * mask)\n\n similarity = [\n tf.reduce_mean(s, axis=1, keepdims=True) for s in similarity_list\n ]\n sum_similarity = tf.reduce_sum(\n tf.concat(similarity, axis=1), axis=1, keepdims=True)\n final_output = [similarity / sum_similarity for similarity in similarity_list]\n final_output = tf.concat(final_output, axis=1)\n\n keras_model = tf.keras.models.Model(\n inputs=[test_input, train_input, train_label], outputs=final_output)\n compile_keras_model(keras_model)\n return keras_model", "def create_training_matrices(experiment, sample_times):\n # Labels:\n location_labels = read_location_labels(experiment)\n locations = get_location_of_persons_at_samples(location_labels, sample_times, experiment)\n\n p1_scene_features = read_roi_scene_recognitions_for_person(experiment, \"P1\")\n p2_scene_features = read_roi_scene_recognitions_for_person(experiment, \"P2\")\n p3_scene_features = read_roi_scene_recognitions_for_person(experiment, \"P3\")\n p4_scene_features = read_roi_scene_recognitions_for_person(experiment, \"P4\")\n\n print(p1_scene_features.loc[0]['predictions'].shape[0])\n print(len(p1_scene_features['predictions'][0]))\n data = {\n \"P1\": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0])),\n \"P2\": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0])),\n \"P3\": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0])),\n \"P4\": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0]))\n }\n\n for index, loc_label in enumerate(locations):\n t = loc_label[0]\n\n t_et = convert_timestamps(exp_root, t, \"video\", \"P1_eyetracker\")\n p1 = np.mean(p1_scene_features[p1_scene_features['timestamp'].between(t_et - sample_step/2, t_et + sample_step/2)]['predictions'].as_matrix(), axis=0)\n\n\n t_et = convert_timestamps(exp_root, t, \"video\", \"P2_eyetracker\")\n p2 = np.mean(\n p2_scene_features[p2_scene_features['timestamp'].between(t_et - sample_step / 2, t_et + sample_step / 2)][\n 'predictions'].as_matrix(), axis=0)\n\n t_et = convert_timestamps(exp_root, t, \"video\", \"P3_eyetracker\")\n p3 = np.mean(\n p3_scene_features[p3_scene_features['timestamp'].between(t_et - sample_step / 2, t_et + sample_step / 2)][\n 'predictions'].as_matrix(), axis=0)\n\n t_et = convert_timestamps(exp_root, t, \"video\", \"P4_eyetracker\")\n p4 = np.mean(\n p4_scene_features[p4_scene_features['timestamp'].between(t_et - sample_step / 2, t_et + sample_step / 2)][\n 'predictions'].as_matrix(), axis=0)\n\n data[\"P1\"][index, 0] = t\n data[\"P1\"][index, 1:-1] = p1\n data[\"P1\"][index, -1] = loc_label[1]\n\n data[\"P2\"][index, 0] = t\n data[\"P2\"][index, 1:-1] = p2\n data[\"P2\"][index, -1] = loc_label[2]\n\n data[\"P3\"][index, 0] = t\n data[\"P3\"][index, 1:-1] = p3\n data[\"P3\"][index, -1] = loc_label[3]\n\n data[\"P4\"][index, 0] = t\n data[\"P4\"][index, 1:-1] = p4\n data[\"P4\"][index, -1] = loc_label[4]\n\n return data", "def get_ky_mat_pack(hyps: np.ndarray, name: str,\n s1: int, e1: int, s2: int, e2: int,\n same: bool, kernel, cutoffs, hyps_mask):\n\n\n # initialize matrices\n training_data = _global_training_data[name]\n size1 = (e1-s1)*3\n size2 = (e2-s2)*3\n k_mat = np.zeros([size1, size2])\n\n ds = [1, 2, 3]\n\n # calculate elements\n args = from_mask_to_args(hyps, hyps_mask, cutoffs)\n\n for m_index in range(size1):\n x_1 = training_data[int(math.floor(m_index / 3))+s1]\n d_1 = ds[m_index % 3]\n if (same):\n lowbound = m_index\n else:\n lowbound = 0\n for n_index in range(lowbound, size2):\n x_2 = training_data[int(math.floor(n_index / 3))+s2]\n d_2 = ds[n_index % 3]\n kern_curr = kernel(x_1, x_2, d_1, d_2, *args)\n # store kernel value\n k_mat[m_index, n_index] = kern_curr\n if (same):\n k_mat[n_index, m_index] = kern_curr\n\n return k_mat", "def prepare_metadata(devkit_archive, test_groundtruth_path):\n # Read what's necessary from the development kit.\n synsets, cost_matrix, raw_valid_groundtruth = read_devkit(devkit_archive)\n\n # Mapping to take WordNet IDs to our internal 0-999 encoding.\n wnid_map = dict(zip((s.decode('utf8') for s in synsets['WNID']),\n xrange(1000)))\n\n # Map the 'ILSVRC2010 ID' to our zero-based ID.\n ilsvrc_id_to_zero_based = dict(zip(synsets['ILSVRC2010_ID'],\n xrange(len(synsets))))\n\n # Map the validation set groundtruth to 0-999 labels.\n valid_groundtruth = [ilsvrc_id_to_zero_based[id_]\n for id_ in raw_valid_groundtruth]\n\n # Raw test data groundtruth, ILSVRC2010 IDs.\n raw_test_groundtruth = numpy.loadtxt(test_groundtruth_path,\n dtype=numpy.int16)\n\n # Map the test set groundtruth to 0-999 labels.\n test_groundtruth = [ilsvrc_id_to_zero_based[id_]\n for id_ in raw_test_groundtruth]\n\n # Ascertain the number of filenames to prepare appropriate sized\n # arrays.\n n_train = int(synsets['num_train_images'].sum())\n log.info('Training set: {} images'.format(n_train))\n log.info('Validation set: {} images'.format(len(valid_groundtruth)))\n log.info('Test set: {} images'.format(len(test_groundtruth)))\n n_total = n_train + len(valid_groundtruth) + len(test_groundtruth)\n log.info('Total (train/valid/test): {} images'.format(n_total))\n return n_train, valid_groundtruth, test_groundtruth, wnid_map", "def create_dataset(img_rows=128, img_cols=128):\n print('Creating original dataset from the raw data')\n # first, get the patients directory names located in the data/ directory. These names (e.g. 'patient0001') will\n # be used for indexing (also avoid hidden files & folders)\n patients = [name for name in os.listdir(os.path.join(os.curdir, 'data/')) if not name.startswith('.')]\n\n # We sort this list to get the patients id in increasing order\n patients.sort(key=lambda s: s[-3:]) # sort according to last 3 characters\n\n # create an empty numpy.ndarray which will contain the images (resized to (img_rows, img_cols))\n images = np.ndarray((2 * len(patients), img_rows, img_cols), dtype=np.uint8) # 2 images per patient\n masks = np.ndarray((2 * len(patients), img_rows, img_cols), dtype=np.uint8) # 2 masks per patient\n\n # we now go through each patient's directory :\n idx = 0\n for patient in patients:\n\n for phase in ['ED', 'ES']:\n\n # read image & mask\n img, _, _, _ = load_mhd_data('data/{pa}/{pa}_4CH_{ph}.mhd'.format(pa=patient, ph=phase))\n mask, _, _, _ = load_mhd_data('data/{pa}/{pa}_4CH_{ph}_gt.mhd'.format(pa=patient, ph=phase))\n\n # resize the img & the mask to (img_rows, img_cols) to keep the network input manageable\n img = resize(img, (img_cols, img_rows), mode='reflect', preserve_range=True)\n mask = resize(mask, (img_cols, img_rows), mode='reflect', preserve_range=True)\n\n # now, save the resized image to the images np.ndarray\n images[idx] = img\n\n # save the corresponding mask to masks np.ndarray (at the same index)\n masks[idx] = mask\n\n idx += 1\n\n print('Created 2 np.ndarrays containing images & masks.')\n\n # Create directory to store files.\n directory = os.path.join(os.getcwd(), 'output/processed_data/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # save all ndarrays to a .npy files (for faster loading later)\n np.save('output/processed_data/images.npy', images)\n np.save('output/processed_data/masks.npy', masks)\n print('Saving to .npy files done: see files\\noutput/processed_data/images.npy & \\noutput/processed_data/masks.npy.')", "def _generate_dataset(self):\n # create train images\n train_path = os.path.join(self.root_dir, \"shapes\", \"train\", \"good\")\n os.makedirs(train_path, exist_ok=True)\n for i in range(self.num_train):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n image = result[\"image\"]\n imsave(os.path.join(train_path, f\"{i:03}.png\"), image, check_contrast=False)\n\n # create test images\n for test_category in self.test_shapes:\n test_path = os.path.join(self.root_dir, \"shapes\", \"test\", test_category)\n mask_path = os.path.join(self.root_dir, \"shapes\", \"ground_truth\", test_category)\n os.makedirs(test_path, exist_ok=True)\n os.makedirs(mask_path, exist_ok=True)\n # anomaly and masks. The idea is to superimpose anomalous shapes on top of correct ones\n for i in range(self.num_test):\n correct_shapes = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=[test_category],\n generate_mask=True,\n )\n correct_shapes = correct_shapes[\"image\"]\n image, mask = result[\"image\"], result[\"mask\"]\n image = np.minimum(image, correct_shapes) # since 255 is white\n imsave(os.path.join(test_path, f\"{i:03}.png\"), image, check_contrast=False)\n imsave(os.path.join(mask_path, f\"{i:03}_mask.png\"), mask, check_contrast=False)\n # good test\n test_good = os.path.join(self.root_dir, \"shapes\", \"test\", \"good\")\n os.makedirs(test_good, exist_ok=True)\n for i in range(self.num_test):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n )\n image = result[\"image\"]\n imsave(os.path.join(test_good, f\"{i:03}.png\"), image, check_contrast=False)", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def preprocessing(train_raw_, test_raw_):\n \n undef = np.float64(-999.0)\n pred_dict = {'s':'1','b':'0', '?':'-1'}\n # drop 1st column (Id) and also 1st row with column names (\"[1:,\") \n train_raw = train_raw_[1:, :]\n test_raw = test_raw_[1:, :] \n \n # Change s(signal) and b(background) for s:1 and b:0, and change '?' for -1\n train_raw[:,1] = np.vectorize(pred_dict.get)(train_raw[:,1].astype(str))\n test_raw[:,1] = np.vectorize(pred_dict.get)(test_raw[:,1].astype(str))\n \n # Divide the dataset in four according to PRI_jet_num feature and cast to float\n train_data_jets = divide_dataset_by_jet(train_raw)\n test_data_jets = divide_dataset_by_jet(test_raw)\n \n # Remove columns with nan values or with standard deviation of 0\n test_data_jets, train_data_jets = clean_features(test_data_jets, train_data_jets, undef)\n \n # Standardize train and test sets to have mean=0 and std=1\n train_data_jets, test_data_jets = standardize(train_data_jets, test_data_jets)\n \n # Replace remaining undefined values by mean, median or zero\n train_data_mean, train_data_median, train_data_null = replace_nan(train_data_jets)\n test_data_mean, test_data_median, test_data_null = replace_nan(test_data_jets)\n \n return train_data_mean, train_data_median, train_data_null, test_data_mean, test_data_median, test_data_null", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')", "def setUp(self):\n output = np.zeros((1, 5, 2))\n target = np.zeros((1, 5, 2))\n # first channel\n output[0, 0] = [10, 4]\n target[0, 0] = [10, 0]\n # second channel\n output[0, 1] = [10, 18]\n target[0, 1] = [10, 10]\n # third channel\n output[0, 2] = [0, 0]\n target[0, 2] = [0, -1]\n # fourth channel\n output[0, 3] = [40, 40]\n target[0, 3] = [30, 30]\n # fifth channel\n output[0, 4] = [20, 10]\n target[0, 4] = [0, 10]\n\n gt_instances = InstanceData()\n gt_instances.keypoints = target\n gt_instances.keypoints_visible = np.array(\n [[True, True, False, True, True]])\n\n pred_instances = InstanceData()\n pred_instances.keypoints = output\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict()\n }\n\n self.data_batch = [data]\n self.data_samples = [data_sample]", "def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def build_model(input_classes,output_classes):\n dimensions = 20\n inputs = []\n embedded_outputs = []\n for i in input_classes:\n input_layer = Input((1,))\n inputs.append(input_layer)\n embedder = Embedding(input_dim=i,output_dim=dimensions,input_length=1,embeddings_constraint=UnitNorm(axis=0))\n embedded_layer = embedder(input_layer)\n embedded_outputs.append(embedded_layer)\n\n embedded_concats = Concatenate()(embedded_outputs)\n flatten_layer = Flatten()\n\n dense_layer = Dense(output_classes)\n\n flattened_output = flatten_layer(embedded_concats)\n dense_output = dense_layer(flattened_output)\n\n # dense_output = dense_layer(embedded_concats)\n\n model = Model(inputs,dense_output)\n print(model.summary())\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n\n return model", "def classify_lenet5(batch_size=500, output_size=20):\n\n rng = numpy.random.RandomState(23455)\n\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 37, 23))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 37, 23),\n filter_shape=(20, 1, 4, 2),\n poolsize=(2, 2),\n )\n\n # layer1 = LeNetConvPoolLayer(\n # rng,\n # input=layer0.output,\n # image_shape=(batch_size, 20, 17, 11),\n # filter_shape=(50, 20, 4, 2),\n # poolsize=(2, 2),\n # )\n #\n # layer4 = LeNetConvPoolLayer(\n # rng,\n # input=layer1.output,\n # image_shape=(batch_size, 50, 7, 5),\n # filter_shape=(100, 50, 4, 2),\n # poolsize=(2, 2),\n # )\n\n layer2_input = layer0.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=3740,\n n_out=output_size,\n activation=T.tanh,\n use_bias=True\n )\n\n # layer5 = HiddenLayer(\n # rng,\n # input=layer2.output,\n # n_in=200,\n # n_out=output_size,\n # activation=T.tanh,\n # use_bias=True\n # )\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer2.output, n_in=output_size, n_out=2)\n\n model_params = pickle.load(open('../model/cnn_dist_'+str(output_size)+'.pkl'))\n #\n layer0.W = theano.shared(\n value=numpy.array(\n model_params[2].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer0.b = theano.shared(\n value=numpy.array(\n model_params[3].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer1.W = theano.shared(\n # value=numpy.array(\n # model_params[-4].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer1.b = theano.shared(\n # value=numpy.array(\n # model_params[-3].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n #\n # layer4.W = theano.shared(\n # value=numpy.array(\n # model_params[-6].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer4.b = theano.shared(\n # value=numpy.array(\n # model_params[-5].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer2.W = theano.shared(\n value=numpy.array(\n model_params[0].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer2.b = theano.shared(\n value=numpy.array(\n model_params[1].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer5.W = theano.shared(\n # value=numpy.array(\n # model_params[-10].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer5.b = theano.shared(\n # value=numpy.array(\n # model_params[-9].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer3.W = theano.shared(\n value=numpy.array(\n model_params[4].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer3.b = theano.shared(\n value=numpy.array(\n model_params[5].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # params = layer3.params + layer5.params + layer2.params + layer4.params + layer1.params + layer0.params\n\n datasets = load_data(None)\n\n sets = ['train', 'dev', 'test']\n dimension = [20000, 20000, 20000]\n for k in range(3):\n if k == 0:\n classify_set_x, classify_set_y, classify_set_z, classify_set_m, classify_set_c, classify_set_b= datasets[k]\n else:\n classify_set_x, classify_set_y, classify_set_z= datasets[k]\n\n # compute number of minibatches for training, validation and testing\n n_classify_batches = classify_set_x.get_value(borrow=True).shape[0]\n n_classify_batches /= batch_size\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n classify = theano.function(\n [index],\n layer2.output,\n givens={\n x: classify_set_x[index * batch_size: (index + 1) * batch_size],\n }\n )\n\n r = []\n\n for i in xrange(n_classify_batches):\n m = classify(i)\n r.extend(m)\n r = np.array(r)\n print r.shape\n r = np.append(r, np.reshape(classify_set_y.eval(),(dimension[k], 1)), 1)\n numpy.savetxt('../extractedInformation/cnn_dist_'+str(output_size)+'/'+sets[k]+'.csv', r, delimiter=\",\")", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def get_models(self, offset=0, sum_=False):\n \n self.models = {}\n self.predict = pd.DataFrame()\n min_value = min(self.conf[\"w_sizes\"].values())\n \n output_width = int(30/self.conf[\"time_step\"])\n \n \n for name in self.conf[\"w_sizes\"].keys():\n \n size = self.conf[\"w_sizes\"][name]\n self.create_train_test(name=name, f_size=size, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n pred = pd.DataFrame({name: model.predict(self.testX).tolist()},\n index=range(size-min_value, len(self.testY)+(size-min_value)))\n \n pred[name] = pred[name].apply(lambda x: np.array(x))\n \n self.predict = pd.concat([self.predict, pred], axis=1)\n \n self.models[name] = model\n \n del model, pred\n \n self.create_train_test(name=\"CNN\", f_size=min_value, offset=offset, output_width=output_width, sum_=sum_)\n self.predict[\"test\"] = self.testY.tolist()\n self.create_train_test(name=\"MLP\", f_size=min_value, offset=offset, output_width=output_width, sum_=sum_)\n self.predict[\"test_dis\"] = self.testY.tolist()\n \n self.predict.dropna(inplace=True)", "def load_data5():\n# dirname = 'cifar-10-batches-py'\n# origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n# path = get_file(dirname, origin=origin, untar=True)\n# path= './cifar-10-batches-py'\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n# Below shows a test class has 999 examples instead of the claimed 1000\n# tclasscount=np.zeros((10,), dtype=int)\n# for i in range(0, len(y_test)-1):\n# tclasscount[y_test[i][0]]= tclasscount[y_test[i][0]] + 1\n# print('Test class count',tclasscount)\n num_train_samples = 50000\n num_5_class = 25000\n num_5_test = 4999 # should be 5000 if all the categories had 1000 in them but they do not. One is missing.\n print('x_train shape orig:', x_train.shape)\n print('More:', x_train.shape[1:])\n print('y_test shape',y_test.shape)\n\n x5_train = np.empty((num_5_class, 32, 32, 3), dtype='uint8')\n y5_train = np.empty((num_5_class,), dtype='uint8')\n\n count=0\n\n for i in range(0, len(y_train)-1):\n if (y_train[i][0] == 2) or (y_train[i][0] == 3) or (y_train[i][0] == 4) or (y_train[i][0] == 5) or (y_train[i][0] == 7):\n x5_train[count]=x_train[i]\n y5_train[count]=y_train[i]\n count=count+1\n \n # find test data of interest\n count=0\n x5_test=np.empty((num_5_test, 32, 32, 3), dtype='uint8')\n y5_test= np.empty((num_5_test,), dtype='uint8')\n\n for i in range(0, len(y_test)-1):\n if (y_test[i][0] == 2) or (y_test[i][0] == 3) or (y_test[i][0] == 4) or (y_test[i][0] == 5) or (y_test[i][0] == 7):\n x5_test[count]=x_test[i]\n y5_test[count]=y_test[i]\n count=count+1\n# Below shows class 7 is only 999 and not 1000 examples!!! One horse got away it seems.\n# if(y_test[i][0] == 2):\n# c2=c2+1\n# if(y_test[i][0] == 3):\n# c3=c3+1\n# if(y_test[i][0] == 4):\n# c4=c4+1\n# if(y_test[i][0] == 5):\n# c5=c5+1\n# if(y_test[i][0] == 7):\n# c7=c7+1\n# print('c2count, c3count, c4count, c5count, c7count',c2,c3,c3,c5,c7)\n# print('y5tstshape',y5_test.shape, count)\n# print('y5tst',y5_test)\n# return (x_train, y_train), (x_test, y_test)\n return (x5_train, y5_train), (x5_test, y5_test)", "def __init__(self, device=\"cuda:0\", *args, **kwargs):\n source_file_wtsd = \"/g/kreshuk/data/leptin/sourabh_data_v1/Segmentation_results_fused_tp_1_ch_0_Masked_WatershedBoundariesMergeTreeFilter_Out1.tif\"\n source_file_wtsd = \"/g/kreshuk/hilt/projects/data/leptin_fused_tp1_ch_0/Masked_WatershedBoundariesMergeTreeFilter_Out1.h5\"\n # wtsd = torch.from_numpy(np.array(imread(source_file_wtsd).astype(np.long))).to(device)\n wtsd = torch.from_numpy(h5py.File(source_file_wtsd, \"r\")[\"data\"][:].astype(np.long)).to(device)\n slices = [0, 157, 316]\n label_1 = [1359, 886, 1240]\n label_2 = [1172, 748, 807]\n label_3 = [364, 1148, 1447]\n m1, m2, m3, m4, m5 = [], [], [], [], []\n self.outer_cntr_ds, self.inner_cntr_ds, self.celltype_1_ds, self.celltype_2_ds, self.celltype_3_ds = [], [], [], [], []\n for slc, l1, l2, l3 in zip(slices, label_1, label_2, label_3):\n bg = wtsd[:, slc, :] == 1\n bg_cnt = find_contours(bg.cpu().numpy(), level=0)\n cnt1 = bg_cnt[0] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[1]\n cnt2 = bg_cnt[1] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[0]\n for m, cnt in zip([m1, m2], [cnt1, cnt2]):\n mask = torch.zeros_like(wtsd[:, slc, :]).cpu()\n mask[np.round(cnt[:, 0]), np.round(cnt[:, 1])] = 1\n m.append(torch.from_numpy(binary_fill_holes(mask.long().cpu().numpy())).to(device).sum().item())\n\n mask = wtsd[:, slc, :] == l1\n m3.append(mask.long().sum().item())\n cnt3 = find_contours(mask.cpu().numpy(), level=0)[0]\n mask = wtsd[:, slc, :] == l2\n m4.append(mask.long().sum().item())\n cnt4 = find_contours(mask.cpu().numpy(), level=0)[0]\n mask = wtsd[:, slc, :] == l3\n m5.append(mask.long().sum().item())\n cnt5 = find_contours(mask.cpu().numpy(), level=0)[0]\n\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt1[:, 0], cnt1[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt2[:, 0], cnt2[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt3[:, 0], cnt3[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt4[:, 0], cnt4[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt5[:, 0], cnt5[:, 1]] = 1\n # plt.imshow(img);plt.show()\n\n self.outer_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt1, tolerance=1.2)).to(device)))\n self.inner_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt2, tolerance=1.2)).to(device)))\n self.celltype_1_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt3, tolerance=1.2)).to(device)))\n self.celltype_2_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt4, tolerance=1.2)).to(device)))\n self.celltype_3_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt5, tolerance=1.2)).to(device)))\n\n self.masses = [np.array(m1).mean(), np.array(m2).mean(), np.array(m3 + m4 + m5).mean()]\n self.fg_shape_descriptors = self.celltype_1_ds + self.celltype_2_ds + self.celltype_3_ds", "def build_multi_blocks_models(feature_dim, feat_group_start_end_dict, output_scheme=\"regression\", num_classes=num_classes):\n\tdivan_feat_dim = feature_dim\n\tfor k in feat_group_start_end_dict:\n\t\tdivan_feat_dim = divan_feat_dim - feat_group_start_end_dict[k][2]\n\tinput_atac = keras.layers.Input(shape=[feat_group_start_end_dict['atac'][2]], name=\"atac_input\")\n\tinput_rnaseq = keras.layers.Input(shape=[feat_group_start_end_dict['rnaseq'][2]], name=\"rnaseq_input\")\n\tinput_wgbs = keras.layers.Input(shape=[feat_group_start_end_dict['wgbs'][2]], name=\"wgbs_input\")\n\tinput_divan = keras.layers.Input(shape=[divan_feat_dim], name=\"divan_input\")\n\n\thidden_atac = keras.layers.Dense(30, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(input_atac)\n\thidden_rnaseq = keras.layers.Dense(100, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(input_rnaseq)\n\thidden_wgbs = keras.layers.Dense(60, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(input_wgbs)\n\thidden_divan_1 = keras.layers.Dense(800, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(input_divan)\n\tdropout1 = keras.layers.AlphaDropout(rate=0.5)(hidden_divan_1)\n\thidden_divan_2 = keras.layers.Dense(200, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout1)\n\tconcat = keras.layers.concatenate([hidden_atac, hidden_rnaseq, hidden_wgbs, hidden_divan_2])\n\thidden3 = keras.layers.Dense(200, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout1)\n\t# trait-specific network\n\thidden_trait_1 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(concat)\n\thidden_trait_2 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(concat)\n\thidden_trait_3 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(concat)\n\thidden_trait_4 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(concat)\n\thidden_trait_5 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(concat)\n\t#bn3 = keras.layers.BatchNormalization()(hidden3)\n\tif (output_scheme==\"regression\"):\n\t\t#outputs = [keras.layers.Dense(1, hidden3) for i in range(5)] #\n\t\tout1 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out1\")(hidden_trait_1)\n\t\tout2 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out2\")(hidden_trait_2)\n\t\tout3 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out3\")(hidden_trait_3)\n\t\tout4 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out4\")(hidden_trait_4)\n\t\tout5 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out5\")(hidden_trait_5)\n\telif (output_scheme == \"ordinal_clf\"):\n\t\tout1 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out1\")(hidden_trait_1)\n\t\tout2 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out2\")(hidden_trait_2)\n\t\tout3 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out3\")(hidden_trait_3)\n\t\tout4 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out4\")(hidden_trait_4)\n\t\tout5 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out5\")(hidden_trait_5)\n\tmodel = keras.models.Model(inputs=[input_atac, input_rnaseq, input_wgbs, input_divan],\n\t\t\t\t\t\t\t outputs=[out1, out2, out3, out4, out5])\n\tif (output_scheme == \"regression\"):\n\t\tmodel.compile(loss=\"mse\", \n\t\t\t\t\t optimizer=tf.keras.optimizers.Adam(0.0001),\n\t\t\t\t\t metrics=['mae', custom_r2_metric])\n\telif (output_scheme == \"ordinal_clf\"):\n\t\tmodel.compile(loss=tf.keras.losses.CategoricalCrossentropy(), \n\t\t\t\t\t optimizer=tf.keras.optimizers.Adam(0.001))\n\tprint(model.summary())\n\treturn model" ]
[ "0.60946685", "0.5913993", "0.58595705", "0.5826413", "0.58137065", "0.5807853", "0.5747172", "0.5703104", "0.5692386", "0.56754106", "0.56699955", "0.56411266", "0.56325924", "0.5626313", "0.56190795", "0.5594807", "0.5573859", "0.55735236", "0.5554242", "0.5549842", "0.5542786", "0.5533272", "0.55326915", "0.55195916", "0.5512725", "0.55043864", "0.5494706", "0.54869723", "0.5476328", "0.5462396" ]
0.63189304
0
Generate outputs from a noncompact TrainingSet to use with Keras' 'fit_generator' function. If 'n_crops' is nonzero, the Iterator crops n_crops 20x20 regions from each image before feeding them.
def flow(self, batch_size=32, output='both', crops=0): while True: for dataset in self.input_sets: X = self.training_set['input/'+dataset] y = self.training_set['target/'+dataset] y_seg = self.training_set['seg_map/'+dataset] for i in range(int(math.ceil(X.shape[0]/2000))): index = list(range(0,X.shape[0])) sample = random.sample(index, batch_size) sample.sort() X_batch = X[sample, ...] y_batch = y[sample, ...] y_seg_batch = y_seg[sample, ...] X_batch = self.augment(X_batch) if crops > 0: (X_batch, y_batch, y_seg_batch) = _augmentors.random_crops( X_batch, y_batch, y_seg_batch, n_crops=crops, crop_dim=20) if output=='both': yield (X_batch, [y_batch, y_seg_batch]) elif output=='seg': yield (X_batch, y_seg) elif output=='density': yield (X_batch, y_batch) else: raise Exception('output must be "density", "seg" or "both"')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)", "def generate_transformed_data(self):\n for each_class in self.classes:\n class_directory = \"data/test/test_folder/\"\n class_directory += each_class\n class_directory += \"_test.jpg\"\n test_image = keras.preprocessing.image.load_img(class_directory)\n image_set = keras.preprocessing.image.img_to_array(test_image)\n image_set = image_set.reshape((1,) + image_set.shape)\n i = 0\n for each_batch in self.transformed_data_generator.flow(image_set, batch_size=1,\n save_to_dir=\"data/test_transformed/test_folder\", save_prefix=each_class, save_format=\"jpeg\"):\n i += 1\n if i > 20:\n break", "def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen", "def random_crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)", "def toy_preprocess_scans(self, scan_ids, width, height, depth, clipping=True, loop=False,\n seed=42, shuffle=False):\n # Initialize image transformer\n kwds_generator = {'rotation_range': 5,\n 'width_shift_range': 0.03,\n 'height_shift_range': 0.03,\n 'zoom_range': 0.03,\n 'data_format': \"channels_first\", # z axis is first\n }\n image_gen = image_prep.ImageDataGenerator(**kwds_generator)\n\n scan_gen = self.preprocess_scans(scan_ids, width, height, depth, clipping, loop, shuffle)\n for ct_scan, origin, spacing in scan_gen:\n yield ct_scan, origin, spacing\n transformed_scan = image_gen.random_transform(ct_scan, seed=seed)\n if seed is not None:\n seed += 1\n yield transformed_scan, origin, spacing", "def generate_n_augmented_images(data_dirname: str, root_dirname: str, n=20) -> None:\n augmentor = set_augmentor()\n preprocessing_fun = lambda *args: pre_processing_function(*args, augmentor=augmentor)\n generator = set_generator_classifier(data_dirname, preprocessing_fun, batch_size=1, number_of_images=n)\n generator.not_batch = True\n # NOTICE: This generator can be used for keras and pytorch in case that instead of saving images one desires to\n # augment images on the fly. Use a number larger than 1 for the batch size when training directly a CNN.\n\n # Save the new generated images\n counter_labels = {}\n for image, label in generator:\n counter = counter_labels.get(label, 0)\n output_filename = label + '_' + str(counter_labels.get(label, 0)) + '.jpg'\n save_dirname = os.path.join(root_dirname, label)\n if not os.path.isdir(save_dirname):\n os.makedirs(save_dirname)\n\n filename = os.path.join(save_dirname, output_filename)\n imsave(filename, image.astype(np.uint8))\n counter_labels[label] = counter + 1\n\n print(f'Finished image generation. The output images were saved in {root_dirname}')", "def build_generator(self):\n noise_shape = (self.dimensions_noise,)\n\n # This block of code can be a little daunting, but essentially it automatically calculates the required starting\n # array size that will be correctly upscaled to our desired image size.\n #\n # We have 5 Upsample2D layers which each double the images width and height, so we can determine the starting\n # x size by taking (x / 2^upsample_count) So for our target image size, 256x192, we do the following:\n # x = (192 / 2^5), y = (256 / 2^5) [x and y are reversed within the model]\n # We also need a 3rd dimension which is chosen relatively arbitrarily, in this case it's 64.\n model = Sequential()\n model.add(\n Dense(\n self.starting_filters\n * (self.img_size[0] // (2 ** self.upsample_layers))\n * (self.img_size[1] // (2 ** self.upsample_layers)),\n activation=\"relu\",\n input_shape=noise_shape,\n )\n )\n model.add(\n Reshape(\n (\n (self.img_size[0] // (2 ** self.upsample_layers)),\n (self.img_size[1] // (2 ** self.upsample_layers)),\n self.starting_filters,\n )\n )\n )\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 6x8 -> 12x16\n model.add(Conv2D(1024, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 12x16 -> 24x32\n model.add(Conv2D(512, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 24x32 -> 48x64\n model.add(Conv2D(256, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 48x64 -> 96x128\n model.add(Conv2D(128, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(UpSampling2D()) # 96x128 -> 192x256\n model.add(Conv2D(64, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2D(32, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n noise = Input(shape=noise_shape)\n img = model(noise)\n\n return Model(noise, img)", "def get_crops(x_train, y_train, offset=4):\n\ttopleft = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, offset, offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\ttopright = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, 4 - offset, offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotleft = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, offset, 4 - offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotright = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, 4 - offset, 4 - offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tcenter = iaa.Sequential([\n\t\tiaa.Crop(px=(2, 2, 2, 2)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\taugs = [topleft, topright, botleft, botright, center]\n\n\taug_imgs = []\n\tfor aug in tqdm(augs):\n\t\taug_imgs.append(aug.augment_images(x_train * 255))\n\n\taug_x_train = [item for sublist in aug_imgs for item in sublist]\n\taug_y_train = y_train * 5\n\n\treturn aug_x_train, aug_y_train", "def genTrainingSet(set_of_CSVs, file_to_classify, train_size = 5):\n set_of_csvs_minus_target = copy.copy(set_of_CSVs)\n # remove the file we want to classify\n set_of_csvs_minus_target.remove(file_to_classify)\n\n # extract out the random noise files\n # first, set the seed\n random.seed(time.time())\n # now sample\n return_list = random.sample(set_of_csvs_minus_target, train_size)\n return return_list", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def generate_one(self, n_images, min_correct=None, p_correct=0.5, n_correct=None, n_caps_per_image=1, configs=None):\n random.seed() # For multiprocessing (TODO: can we avoid this?)\n imgs = np.zeros((n_images, 3, c.DIM, c.DIM), dtype=np.uint8)\n labels = np.zeros((n_images, ), dtype=np.uint8)\n if configs is not None:\n cfg_idx = random.choice(len(configs))\n target_cfg = configs[cfg_idx]\n else:\n # FIXME: Make this OOP to avoid these if statements\n if self.img_type == 'spatial':\n target_cfg = self.random_config_spatial()\n elif self.img_type == 'single':\n target_cfg = self.random_config_single()\n\n if self.data_type == 'concept':\n if n_correct is not None:\n # Fixed number of targets and distractors\n n_target = n_correct\n n_distract = n_images - n_target\n elif min_correct is not None:\n # Minimum number of targets, otherwise sample whatever\n # TODO: combine min_correct and min_incorrect\n n_target = min_correct\n n_distract = 0\n else:\n # Minimum of 2 targets and distractors each\n n_target = 2\n n_distract = 2\n else:\n n_target = 1\n n_distract = n_images # Never run out of distractors\n\n cfgs = []\n shapes = []\n cfgs_to_cap = []\n for i in range(n_images):\n if n_target > 0:\n label = 1\n n_target -= 1\n elif n_distract > 0:\n label = 0\n n_distract -= 1\n else:\n label = (random.random() < p_correct)\n\n if self.img_type == 'spatial':\n new_cfg, new_shapes, cfg_to_cap = self.generate_spatial(target_cfg, label, n_caps_per_image)\n elif self.img_type == 'single':\n new_cfg, new_shapes = self.generate_single(target_cfg, label)\n\n # Create image and draw shapes\n img = self.create_image(new_shapes)\n imgs[i] = img\n labels[i] = label\n cfgs.append(new_cfg)\n shapes.append(new_shapes)\n cfgs_to_cap.append(cfg_to_cap)\n\n return imgs, labels, target_cfg, cfgs, cfgs_to_cap, shapes", "def rand_crop_whole_ct(image, label, res_s, out_s,\n apply_data_aug, augment_times=2):\n if image.shape != (res_s, res_s, res_s) or \\\n label.shape != (res_s, res_s, res_s):\n logging.info(\"Unexpected shapes. \"\n \"image.shape: %s, label.shape: %s\",\n image.shape, label.shape)\n return\n\n if not apply_data_aug:\n # Do not augment data.\n idx = (res_s - out_s) // 2\n image = image[idx:idx + out_s, idx:idx + out_s, idx:idx + out_s]\n label = label[idx:idx + out_s, idx:idx + out_s, idx:idx + out_s]\n yield image, label\n else:\n cut = res_s - out_s\n for _ in range(augment_times):\n for i in [0, cut // 2, cut]:\n for j in [0, cut // 2, cut]:\n for k in [0, cut // 2, cut]:\n image_aug = image[i:i + out_s, j:j + out_s, k:k + out_s].copy()\n label_aug = label[i:i + out_s, j:j + out_s, k:k + out_s].copy()\n image_aug = intensity_change(image_aug)\n yield image_aug, label_aug", "def generate_observations(self, eval_mode, augment_frames=None):\n episode_idx = 0\n augment_frames = (\n augment_frames if augment_frames is not None\n else self._augment_frames and not eval_mode)\n for t, obs in enumerate(self._flat_observations):\n if augment_frames:\n obs = image_utils.random_crop_image(obs)\n if self._split_by_episodes:\n yield obs, episode_idx\n else:\n yield obs\n if self._is_terminal[t]:\n episode_idx += 1", "def gen_test_output(sess, logits, image_folder, image_pl, data_folder,\n learning_phase, image_shape, nw_shape):\n image_paths = glob(os.path.join(data_folder, image_folder, '*.png'))\n for image_file in image_paths[:5]:\n\n in_image = scipy.misc.imread(image_file, mode='RGB')\n image = scipy.misc.imresize(in_image, image_shape)\n\n street_im = get_seg_img(sess, logits, image_pl, image, image_shape,\n nw_shape, learning_phase)\n\n street_im = scipy.misc.imresize(street_im, in_image.shape)\n yield os.path.basename(image_file), np.array(street_im)", "def s2s_data_generator(s2s_df=duplets, all_catalog=catalog_images, batch_size=None):\n orig_index_list = duplets.index.tolist()\n all_shop_index_list = catalog_images.index.tolist()\n dummy = np.zeros((1, 3 * N))\n\n while True:\n \n q_list = list()\n p_list = list()\n n_list = list()\n dummy_list = list()\n \n index_list = copy.copy(orig_index_list)\n \n while len(index_list) > 0:\n\n index = random.choice(index_list)\n product_id = duplets.loc[index, 'product_id']\n \n q_temp = duplets.loc[index, 'street_images']\n q_img = os.path.join(Path, q_temp + '.jpeg')\n \n p_temp = duplets.loc[index, 'shop_images']\n p_img = os.path.join(Path, p_temp + '.jpeg')\n\n while True:\n idx = random.choice(all_shop_index_list)\n prod_idx = catalog_images.loc[idx, 'product_id']\n\n if prod_idx != product_id:\n temp = random.choice(catalog_images.loc[idx, 'shop_images'])\n n_img = os.path.join(Path, temp + '.jpeg')\n\n q_img = os.path.join(Path, q_index + '.jpeg')\n p_img = os.path.join(Path, p_index + '.jpeg')\n n_img = os.path.join(Path, n_index + '.jpeg')\n\n res = bbox_mappings[q_index]\n\n left = res['left']\n top = res['top']\n right = left + res['width']\n bottom = top + res['height']\n\n\n query_img = Image.open(q_img)\n query_crop = query_img.crop((left, top, right, bottom))\n positive_img = Image.open(p_img)\n negative_img = Image.open(n_img)\n\n \n query = np.array(query_crop.resize((300,300), Image.NEAREST))\n positive = np.array(positive_img.resize((300,300), Image.NEAREST))\n negative = np.array(negative_img.resize((300,300), Image.NEAREST))\n \n \n q_list.append(query_array)\n p_list.append(positive_array)\n n_list.append(negative_array)\n dummy_list.append(dummy)\n\n \n index_list.remove(index)\n\n if len(q_list) == batch_size or (len(index_list) == 0 and len(q_list) > 0):\n yield convert_data(q_list, p_list, n_list, dummy_list)\n q_list = list()\n p_list = list()\n n_list = list()\n dummy_list = list()", "def __init__(self, dataset, width=512, height=512, pictures=10, generate_classes=True, generate_objects=True):\n super().__init__(dataset)\n\n cropper = Cropper(width=width, height=height)\n\n dir_name = \"tmp-data-{}x{}-from-{}-pictures\".format(width, height, pictures)\n origins = os.path.join(dir_name, \"origins\")\n classes = os.path.join(dir_name, \"classes\")\n origins_classes_v_join = os.path.join(dir_name, \"origin-classes-v-join\")\n objects = os.path.join(dir_name, \"objects\")\n origins_objects_v_join = os.path.join(dir_name, \"origin-objects-v-join\")\n\n if not os.path.exists(origins):\n os.makedirs(origins)\n\n trains = self.get_iterable_trains()\n vals = self.get_iterable_evals()\n\n selection_set = []\n for _, val in enumerate(trains):\n origin, class_v, object_v = self.get_train_triple(val)\n selection_set.append((origin, class_v, object_v))\n for _, val in enumerate(vals):\n origin, class_v, object_v = self.get_val_triple(val)\n selection_set.append((origin, class_v, object_v))\n\n final_set = random.sample(selection_set, pictures)\n\n if generate_classes:\n if not os.path.exists(classes):\n os.makedirs(classes)\n if not os.path.exists(origins_classes_v_join):\n os.makedirs(origins_classes_v_join)\n\n if generate_objects:\n if not os.path.exists(objects):\n os.makedirs(objects)\n if not os.path.exists(origins_objects_v_join):\n os.makedirs(origins_objects_v_join)\n\n for _, (origin, class_v, object_v) in enumerate(final_set):\n print(\"Processing {}, {}, {}\".format(origin, class_v, object_v))\n cropper.set_imgs(origin, class_v, object_v, add_randomly=5)\n counter = 1\n while not cropper.is_finished:\n origin_i, class_i, object_i = cropper.next_crop()\n # Check that classes are involved\n finded = False\n for l in class_i:\n for pix in l:\n for c in pix:\n if c != 0:\n finded = True\n break\n if finded:\n break\n if finded:\n break\n if not finded:\n continue\n path = \"{}-{}.png\".format(get_origin_name(origin), counter)\n # print(\"Writing: {}\".format(os.path.join(origins, path)))\n cv2.imwrite(os.path.join(origins, path), origin_i)\n if generate_classes:\n cv2.imwrite(os.path.join(classes, path), class_i)\n cv2.imwrite(os.path.join(origins_classes_v_join, path), cv2.hconcat([origin_i, class_i]))\n if generate_objects:\n cv2.imwrite(os.path.join(objects, path), object_i)\n cv2.imwrite(os.path.join(origins_objects_v_join, path), cv2.hconcat([origin_i, object_i]))\n counter += 1\n\n print(\"Generating of {}-pictures-subset done. You find it in: {}\".format(pictures, dir_name))", "def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras", "def image_generator(df,batch_size,plab,augment=True):\n rng = np.random.RandomState(290615)\n if_train = 1 if plab < 1. else 0\n bi,b_list = 0,df.groupby('business_id').apply(get_biz_id,if_train,batch_size)\n b_list = b_list[b_list!=0]\n b_order = rng.permutation(b_list.index)\n pi,p_list = 0, df[df.iloc[:,-1]==0]['photo_id']\n p_order = rng.permutation(p_list.index)\n while True:\n if rng.rand(1)[0] < plab:\n # aggregate biz_id with outdoor-seating\n biz_id_i = b_list.ix[b_order[bi]]\n photo_train = df[df['business_id']==biz_id_i]['photo_id']\n y_batch = np.asarray(df[df['business_id']==biz_id_i].iloc[:,-1])\n # increase/loop indices for next iteration\n if bi < len(b_list)-1:\n bi += 1\n else:\n bi,b_order = 0,rng.permutation(b_list.index)\n else:\n # pic 32 random non-outdoor-seating pictures\n photo_train = p_list[p_order[pi:(pi+batch_size)]]\n y_batch = np.repeat(0, repeats=len(photo_train), axis=0)\n # increase/loop indices for next iteration\n if pi < len(p_list)-1-batch_size:\n pi += batch_size\n else:\n pi,p_order = 0,rng.permutation(p_list.index)\n batch_size_i = len(photo_train)\n # read and augment photos\n X_batch = np.empty((batch_size_i,h,w,ch))\n for i_ in range(batch_size_i):\n f_ = 'data/train_photos/' + str(photo_train.iloc[i_]) + '.jpg'\n im = Image.open(os.path.realpath(f_))\n im_sml = im.resize((w,h))\n # scale inputs [-1,+1]\n xi = np.asarray(im_sml)/128.-1\n if augment:\n # flip coords horizontally (but not vertically)\n if rng.rand(1)[0] > 0.5:\n xi = np.fliplr(xi)\n # rescale slightly within a random range\n jit = w*0.2\n if rng.rand(1)[0] > 0.1:\n xl,xr = rng.uniform(0,jit,1),rng.uniform(w-jit,w,1)\n yu,yd = rng.uniform(0,jit,1),rng.uniform(h-jit,h,1)\n pts1 = np.float32([[xl,yu],[xr,yu],[xl,yd],[xr,yd]])\n pts2 = np.float32([[0,0],[w,0],[0,h],[w,h]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n xi = cv2.warpPerspective(xi,M,(w,h))\n # save individual image to X_batch\n X_batch[i_,:,:,:] = xi\n# plt.imsave('data/aug_%i' % i_,(xi+1)/2);plt.close()\n yield([X_batch],y_batch)", "def reconstructions_sample(self, n=()):\n self.assert_sampled()\n return [[j.sample(sample_shape=n, seed=self.randint).numpy()\n for j in i]\n for i in self._reconstructions]", "def data_generator(dataset, config, shuffle=True, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n diverse=0, no_augmentation_sources=None):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources: augmentation = None\n image, image_meta, gt_class_ids, gt_class_ids2, gt_boxes, gt_rboxes, gt_global_mask, \\\n gt_masks, gt_mask_score, gt_text_embeds, gt_embed_lengths = load_image_gt(dataset, config, image_id,\n augmentation=augmentation)\n\n \n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # Use only positive class_ids\n categories = np.unique(gt_class_ids)\n _idx = categories > 0\n categories = categories[_idx]\n \n if config.MODEL == \"smrcnn\":\n # Use only active classes\n active_categories = []\n for c in categories:\n if any(c == dataset.ACTIVE_CLASSES):\n active_categories.append(c)\n \n # Skiop image if it contains no instance of any active class \n if not np.any(np.array(active_categories) > 0):\n continue\n # Randomly select category\n category = np.random.choice(active_categories)\n \n # NOTE for siamese\n # Generate siamese target crop\n targets = []\n for i in range(config.NUM_TARGETS):\n targets.append(get_one_target(category, dataset, config, augmentation=augmentation))\n # target = np.stack(target, axis=0)\n \n # print(target_class_id)\n target_class_id = category\n target_class_ids = np.array([target_class_id])\n \n idx = gt_class_ids == target_class_id\n siamese_class_ids = idx.astype('int8')\n # print(idx)\n # print(gt_boxes.shape, gt_masks.shape)\n siamese_class_ids = siamese_class_ids[idx]\n gt_class_ids = gt_class_ids[idx]\n gt_boxes = gt_boxes[idx,:]\n gt_masks = gt_masks[:,:,idx]\n image_meta = image_meta[:15] # TODO\n # --------------------------------------------------------------\n\n # RPN Targets\n # if rpn have muiltple label, rewrite here\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_class_ids2, mrcnn_bbox, mrcnn_rbbox, mrcnn_mask,\\\n mrcnn_text_embeds, mrcnn_embed_lengths = build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_rboxes, gt_masks, gt_mask_score, gt_class_ids2, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros((batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros([batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros([batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros((batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_rboxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 5), dtype=np.float32)\n if config.MODEL == \"smrcnn\":\n batch_targets = np.zeros((batch_size, config.NUM_TARGETS) + targets[0].shape, dtype=np.float32)\n batch_gt_masks = np.zeros((batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n batch_gt_class_ids2 = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_text_embeds = np.zeros((batch_size, config.MAX_GT_INSTANCES, config.MAX_LABEL_LENGTH), dtype=np.int32)\n batch_gt_embed_lengths = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n if random_rois:\n batch_rpn_rois = np.zeros((batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros((batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros((batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n \n # ************************* NOTE for 2 label dataset\n if config.NUM_CLASSES2 > 2:\n batch_mrcnn_class_ids2 = np.zeros(\n (batch_size,) + mrcnn_class_ids2.shape, dtype=mrcnn_class_ids.dtype)\n # ************************* NOTE for ocr\n if config.READ:\n batch_mrcnn_text_embeds = np.zeros(\n (batch_size,) + mrcnn_text_embeds.shape, dtype=mrcnn_text_embeds.dtype)\n batch_mrcnn_embed_lengths = np.zeros(\n (batch_size,) + mrcnn_embed_lengths.shape, dtype=mrcnn_text_embeds.dtype)\n batch_mrcnn_bbox = np.zeros((batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_rbbox = np.zeros((batch_size,) + mrcnn_rbbox.shape, dtype=mrcnn_rbbox.dtype)\n batch_mrcnn_mask = np.zeros((batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n siamese_class_ids = siamese_class_ids[ids] # NOTE\n gt_boxes = gt_boxes[ids]\n gt_rboxes = gt_rboxes[ids]\n gt_masks = gt_masks[:, :, ids]\n gt_class_ids2 = gt_class_ids2[ids]\n gt_text_embeds = gt_text_embeds[ids]\n gt_embed_lengths = gt_embed_lengths[ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n # NOTE for siamese\n if config.MODEL == \"smrcnn\":\n batch_targets[b] = np.stack([mold_image(target.astype(np.float32), config) for target in targets], axis=0)\n batch_gt_class_ids[b, :siamese_class_ids.shape[0]] = siamese_class_ids\n else:\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_rboxes[b, :gt_rboxes.shape[0]] = gt_rboxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n batch_gt_class_ids2[b, :gt_class_ids2.shape[0]] = gt_class_ids2\n batch_gt_text_embeds[b, :gt_text_embeds.shape[0], :gt_text_embeds.shape[1]] = gt_text_embeds\n batch_gt_embed_lengths[b, :gt_embed_lengths.shape[0]] = gt_embed_lengths\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_rbbox[b] = mrcnn_rbbox\n batch_mrcnn_mask[b] = mrcnn_mask\n batch_mrcnn_class_ids2[b] = mrcnn_class_ids2\n batch_mrcnn_text_embeds[b] = mrcnn_text_embeds\n batch_mrcnn_embed_lengths[b] = mrcnn_embed_lengths\n b += 1\n # Batch full?\n if b >= batch_size:\n \n\n # NOTE for siamese\n if config.MODEL == \"smrcnn\":\n inputs = [batch_images, batch_image_meta, batch_targets, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_class_ids2, batch_gt_boxes, batch_gt_rboxes, batch_gt_masks,\n batch_gt_text_embeds, batch_gt_embed_lengths]\n else:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_class_ids2, batch_gt_boxes, batch_gt_rboxes, batch_gt_masks,\n batch_gt_text_embeds, batch_gt_embed_lengths]\n outputs = []\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(batch_mrcnn_class_ids, -1) \n \n # ************************* NOTE for 2 label dataset\n # ************************* NOTE for ocr\n if config.RBOX and config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1)\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_rbbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif config.RBOX and config.READ and not config.HAVE_LABEL2:\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_rbbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif config.RBOX and not config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1) \n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_rbbox, batch_mrcnn_mask])\n elif config.RBOX and not config.READ and not config.HAVE_LABEL2:\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_rbbox, batch_mrcnn_mask])\n elif not config.RBOX and config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1)\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif not config.RBOX and config.READ and not config.HAVE_LABEL2:\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif not config.RBOX and not config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1) \n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_mask])\n elif not config.RBOX and not config.READ and not config.HAVE_LABEL2:\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(f\"Error processing image {dataset.image_info[image_id]}\")\n error_count += 1\n if error_count > 5:\n raise", "def generate_batch_1(batch_size, img_shape, itm_size, n_itms, prob_type = 'SD'):\n # np.random.seed(100)\n # Change `problem_type` to SR for spatial relation labels\n data_parameters = {'problem_type': prob_type,\n 'item_size': [itm_size[0],itm_size[1]],\n 'box_extent': [img_shape[0], img_shape[1]],\n 'num_items': n_itms,\n 'num_item_pixel_values': 1,\n 'SD_portion': 0,\n 'SR_portion': 1,\n 'full_size': 2, #whether ground label should be a full-size masked image or concatenated patches from the original image. 0 = merged patches, 1 = masked image, 2 = both\n 'mask': True, #This parameter is introduced in order to generate label of the image as 1 where patch is and 0 else where.\n 'display': False}\n\n\n data_generator = psvrt.psvrt(raw_input_size=img_shape, batch_size=batch_size) # Load generator\n data_generator.initialize_vars(**data_parameters) # Initialize it with image parameters\n\n data = data_generator.single_batch() # Fetch a single batch\n\n stimuli = np.array(data[0]) # Images\n labels = np.array(data[1]) # Labels\n label_positions = np.array(data[2])\n all_items = np.asarray(data[3])\n\n labels_temp = np.array(np.squeeze(labels[:, 0, 0, 0]), np.int64)\n train_data = np.reshape(stimuli, [stimuli.shape[0], stimuli.shape[1] * stimuli.shape[2] * stimuli.shape[3]])\n mask_labels = rgb2grey(np.array([items[0] for items in all_items]))\n\n ret_lbls = np.zeros([batch_size, 2])\n indices_1 = np.where(labels_temp == 1)\n indices_0 = np.where(labels_temp == 0)\n\n ret_lbls[indices_0] = [1,0]\n ret_lbls[indices_1] = [0,1]\n mask_labels = np.reshape(mask_labels, [mask_labels.shape[0], mask_labels.shape[1] * mask_labels.shape[2]])\n # left_mask, right_mask = sep_boxes(img_shape, itm_size, n_itms, mask_labels, labels_temp)\n # left_mask = np.reshape(left_mask, [batch_size,img_shape[0] * img_shape[1]])\n # right_mask = np.reshape(right_mask, [batch_size, img_shape[0] * img_shape[1]])\n\n\n return train_data, mask_labels, ret_lbls", "def resnet_6blocks(pretrained=False, **kwargs):\n return ResnetGenerator(\n 6, **kwargs\n )", "def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect'):\n assert (n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout,\n use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n # model += [nn.Tanh()]\n model += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*model)", "def generate_batch(batch_size, img_shape, itm_size, n_itms, prob_type = 'SD'):\n\t# Change `problem_type` to SR for spatial relation labels\n\tdata_parameters = {'problem_type': prob_type,\n\t\t\t 'item_size': [itm_size[0],itm_size[1]],\n\t\t\t 'box_extent': [img_shape[0], img_shape[1]],\n\t\t\t 'num_items': n_itms,\n\t\t\t 'num_item_pixel_values': 1,\n\t\t\t 'SD_portion': 0,\n\t\t\t 'SR_portion': 1,\n\t\t\t\t'full_size': 2, #whether ground label should be a full-size masked image or concatenated patches from the original image. 0 = merged patches, 1 = masked image, 2 = both\n\t\t\t\t'mask': True, #This parameter is introduced in order to generate label of the image as 1 where patch is and 0 else where.\n\t\t\t 'display': False}\n\n\n\tdata_generator = psvrt.psvrt(raw_input_size=img_shape, batch_size=batch_size) # Load generator\n\tdata_generator.initialize_vars(**data_parameters) # Initialize it with image parameters\n\n\tdata = data_generator.single_batch() # Fetch a single batch\n\n\tstimuli = np.array(data[0]) # Images\n\tlabels = np.array(data[1]) # Labels\n\tlabel_positions = np.array(data[2])\n\tall_items = np.asarray(data[3])\n\n\tlabels_temp = np.array(np.squeeze(labels[:, 0, 0, 0]), np.int64)\n\ttrain_data = np.reshape(stimuli, [stimuli.shape[0], stimuli.shape[1] * stimuli.shape[2] * stimuli.shape[3]])\n\tmask_labels = rgb2grey(np.array([items[0] for items in all_items]))\n\n\tret_lbls = np.zeros([batch_size, 2])\n\tindices_1 = np.where(labels_temp == 1)\n\tindices_0 = np.where(labels_temp == 0)\n\n\tret_lbls[indices_0] = [1,0]\n\tret_lbls[indices_1] = [0,1]\n\n\tleft_mask, right_mask = sep_boxes(img_shape, itm_size, n_itms, mask_labels, labels_temp)\n\n\t\"\"\"This part of the code is written to add 2 dimensions for every pixel, whether that pixel is 0 or 1.\"\"\"\n\t# ret_lmask = np.zeros([batch_size, img_shape[0] * img_shape[1], 2])\n\t# ret_rmask = np.zeros([batch_size, img_shape[0] * img_shape[1], 2])\n\n\tleft_mask = np.reshape(left_mask, [batch_size,img_shape[0] * img_shape[1]])\n\t# indices_lmask_1 = np.where(left_mask == 1)\n\t# indices_lmask_0 = np.where(left_mask == 0)\n\t#\n\t#\n\t# ret_lmask[indices_lmask_0] = [1.,0.]\n\t# ret_lmask[indices_lmask_1] = [0.,1.]\n\n\t# print(ret_lmask)\n\t# left_mask[0, indices_lmask_1] = [0.,1.]\n\n\n\tright_mask = np.reshape(right_mask, [batch_size, img_shape[0] * img_shape[1]])\n\n\t# indices_rmask_1 = np.where(right_mask == 1)\n\t# indices_rmask_0 = np.where(right_mask == 0)\n\t#\n\t# ret_rmask[indices_rmask_0] = [1.,0.]\n\t# ret_rmask[indices_rmask_1] = [0.,1.]\n\n\t# print(left_mask)\n\t# print(right_mask)\n\n\treturn train_data, mask_labels, left_mask, right_mask, ret_lbls", "def generate_images_pred(self, inputs, outputs):\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n # mean-normalized inverse depth from [62] to discourage shrinking of the estimated depth\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def _yield_testing(self, batch_index):\n samples_start = batch_index % self.num_samples\n samples_end = (batch_index+1) % self.num_samples\n if samples_start < samples_end:\n batch_samples = self.test_data[samples_start:samples_end]\n else:\n batch_samples = self.test_data[samples_start:]\n batch_samples.extend(self.test_data[:samples_end])\n images = []\n rois = []\n for sample in batch_samples:\n # 'sample' has this structure:\n # {path: {\n # 'roi_origin_x': test_sample[1]['roi_origin_x'],\n # 'roi_origin_y': test_sample[1]['roi_origin_y'],\n # 'roi_width': test_sample[1]['roi_width'],\n # 'roi_height': test_sample[1]['roi_height'] \n # } \n # }\n img_path = os.path.join(self.dataset_root_path, list(sample.keys())[0])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if the path does not exist or there are problems while reading the image\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n continue\n roi_data = list(sample.values())[0]\n roi = {\n 'upper_left_x': roi_data['roi_origin_x'],\n 'upper_left_y': roi_data['roi_origin_y'],\n 'width': roi_data['roi_width'],\n 'height': roi_data['roi_height']\n }\n img = img.astype('float32')\n images.append(img)\n rois.append(roi)\n return images, rois", "def preprocess_scans(self, scan_ids, width, height, depth, clipping=True, loop=False,\n shuffle=False):\n while True:\n if shuffle:\n random.shuffle(scan_ids)\n for scan_id in scan_ids:\n ct_scan, origin, spacing = self.get_scan(scan_id, resample=True)\n if clipping:\n scan, origin, spacing = self.rescale_scan(\n ct_scan, origin, spacing, width, height, depth, normalize=True\n )\n yield self.clip_scan(scan), origin, spacing\n else:\n yield self.rescale_scan(\n ct_scan, origin, spacing, width, height, depth, normalize=True\n )\n if not loop:\n break", "def gen_batches_functions(data_folder, image_paths, image_shape, out_shape,\n label_folder):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def batch_generator(labels_df, set_kind):\n # Generate training batches\n if set_kind == \"train\" and (labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 64):\n while 1:\n\n for i in range(labels_df.shape[0]//8):\n x_train = np.load('data/train-npy/npdatasetX{}.npy'.format(i))\n y_train = np.load('data/train-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1):\n x_trainj = x_train[j*8:j*8-1,:]\n y_trainj = y_train[j*8:j*8-1,:]\n\n yield (x_trainj, y_trainj)\n\n\n # Generate validation batches\n if set_kind == \"valid\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//4): \n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n y_valid = np.load('data/valid-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1): \n x_validj = x_valid[j*4:j*4-1,:]\n y_validj = y_valid[j*4:j*4-1,:]\n\n yield (x_validj, y_validj)\n\n\n # Generate test batches\n if set_kind == \"test\" and labels_df.shape[0] == 40669:\n while 1:\n\n for i in range(labels_df.shape[0]//4): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(1): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n \n yield (x_validj, y_validj)\n\n if set_kind == \"test\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//8): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(2): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n\n yield x_validj" ]
[ "0.58771247", "0.57928264", "0.57725567", "0.5747757", "0.5570905", "0.5488688", "0.5483506", "0.5471397", "0.5459297", "0.5440349", "0.54253316", "0.54131067", "0.5412875", "0.54017425", "0.5401398", "0.5400203", "0.53945994", "0.5369897", "0.5358676", "0.53234917", "0.53224057", "0.53115916", "0.5304172", "0.53022957", "0.52911717", "0.5286568", "0.5269146", "0.52498746", "0.52493995", "0.52396595" ]
0.6750257
0
generate upstream cherrypick patch files
def generate_patch_files(sha_list: List[str], start_version: int) -> PatchList: upstream_dir = paths.TOOLCHAIN_LLVM_PATH fetch_upstream_once() result = PatchList() for sha in sha_list: if len(sha) < 40: sha = get_full_sha(upstream_dir, sha) file_path = paths.SCRIPTS_DIR / 'patches' / 'cherry' / f'{sha}.patch' assert not file_path.exists(), f'{file_path} already exists' with open(file_path, 'w') as fh: check_call(f'git format-patch -1 {sha} --stdout', stdout=fh, shell=True, cwd=upstream_dir) commit_subject = check_output( f'git log -n1 --format=%s {sha}', shell=True, cwd=upstream_dir) comment = '[UPSTREAM] ' + commit_subject.strip() rel_patch_path = f'cherry/{sha}.patch' end_version = sha_to_revision(sha) result.append(PatchItem(comment, rel_patch_path, start_version, end_version)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_genpatch(self, argv):\n #TODO:\n # - Would an optional [<files> ...] argument be useful or is\n # that overkill? E.g. 'p4 genpatch ./...' (I think that that\n # would be very useful.\n # - Could add '-f' option to only warn on 'out of sync'.\n # - Could add '-d<flag>' option to control to diff format.\n # Context and unified allowed.\n # - Handling binary files that cannot be diff'd\n # - Option to be able to control the base dir so the patch -p#\n # number can be controlled. Dunno what form that should\n # take.\n\n # Process options.\n diffFormat = 'u'\n if diffFormat == 'u':\n prefixes = ('---', '+++')\n elif diffFormat == 'c':\n prefixes = ('***', '---')\n\n # Process args.\n if not argv[1:]:\n change = 'default'\n elif len(argv[1:]) == 1:\n change = argv[1]\n try:\n change = int(change)\n except ValueError: \n # Stupidly, p4win's new Tool %c interpolation will use\n # \"Default\", on which the normal p4.exe client will die.\n change = change.lower()\n if change != 'default':\n sys.stderr.write(\"Invalid changelist number '%s'.\\n\"\\\n % change)\n return 1\n else:\n sys.stderr.write(\"Usage: genpatch [<changelist#>]\\n\")\n sys.stderr.write(\"Missing/wrong number of arguments.\\n\")\n return 1\n\n # Validate the given change number.\n p4 = p4lib.P4( **p4lib.parseOptv(self.__p4optv) )\n submitted = [c['change'] for c in p4.changes(status='submitted')]\n pending = [c['change'] for c in p4.changes(status='pending')]\n if change in submitted:\n status = 'submitted'\n elif change in pending+['default']:\n status = 'pending'\n else:\n sys.stderr.write(\"Change %s unknown.\" % change)\n return 1\n\n # Get list of files to include in patch.\n if status == 'submitted':\n d = p4.describe(change, diffFormat='u')\n desc = d['description']\n files = d['files']\n diffs = d['diff']\n elif status == 'pending':\n files = p4.opened(change=change)\n if change == 'default':\n desc = None\n else:\n desc = p4.change(change=change)['description']\n if files:\n diffs = p4.diff([f['depotFile'] for f in files],\n diffFormat='u')\n else:\n diffs = []\n\n # Make a single string from 'diffs' with appropriate delimiters\n # for the \"patch\" program.\n diffstr = ''\n timestamp = time.asctime()\n for diff in diffs:\n # Perforce std header, e.g.:\n # ==== //depot/apps/px/ReadMe.txt#5 (text) ====\n # or\n # ==== //depot/foo.doc#42 - c:\\trentm\\foo.doc ==== (binary)\n if diff.has_key('localFile'):\n diffstr += \"==== %(depotFile)s#%(rev)s - %(localFile)s ====\"\\\n % diff\n if diff['binary']:\n diffstr += \" (binary)\"\n diffstr += \"\\n\"\n else:\n diffstr += \"==== %(depotFile)s#%(rev)s (%(type)s) ====\\n\"\\\n % diff\n # Patch header, e.g. for unified diffs:\n # Index: apps/px/test/ToDo.txt\n # --- apps/px/test/ToDo.txt.~1~ Fri May 31 21:17:17 2002\n # +++ apps/px/test/ToDo.txt Fri May 31 21:17:17 2002\n # or for context diffs:\n # Index: apps/px/test/ToDo.txt\n # *** apps/px/test/ToDo.txt.~1~ Fri May 31 21:26:47 2002\n # --- apps/px/test/ToDo.txt Fri May 31 21:26:47 2002\n fname = diff['depotFile'][len('//depot/'):]\n\n if diff.has_key('text'):\n diffstr += \"Index: %s\\n\" % fname\n diffstr += \"%s %s.~1~\\t%s\\n\" % (prefixes[0], fname, timestamp)\n diffstr += \"%s %s\\t%s\\n\" % (prefixes[1], fname, timestamp)\n # The diff text.\n diffstr += ''.join(diff['text'])\n if diffstr[-1] != '\\n':\n diffstr += \"\\n\\\\ No newline at end of file\\n\"\n\n # Inline added files into the diff.\n addedfiles = [f for f in files if f['action'] in ('add', 'branch')]\n for f in addedfiles:\n # May have to get file type from 'p4 files'.\n if status == 'submitted':\n f['type'] = p4.files(f['depotFile'])[0]['type']\n # Skip file if it is binary.\n if f['type'].startswith('binary'):\n log.warn(\"Cannot inline '%s' because it is binary.\"\\\n % f['depotFile'])\n continue\n # Get the file contents.\n if status == \"pending\":\n # Read the file contents from disk.\n localFile = p4.where(f['depotFile'])[0]['localFile']\n if not os.path.exists(localFile):\n continue\n lines = open(localFile, 'r').readlines()\n else:\n # Get the file contents via 'p4 print'.\n fnameRev = \"%s#%s\" % (f['depotFile'], f['rev'])\n lines = p4.print_(fnameRev)[0]['text'].split('\\n')\n if not lines[-1]: lines = lines[:-1] # drop empty last line\n lines = [line+'\\n' for line in lines]\n # Inline the file.\n diffstr += \"\\n==== %(depotFile)s#%(rev)s (%(type)s) ====\\n\" % f\n if len(lines) < 2:\n ln = \"\"\n else:\n ln = \",\" + str(len(lines))\n fname = f['depotFile'][len('//depot/'):]\n diffstr += \"Index: %s\\n\" % fname\n diffstr += \"%s %s.~1~\\t%s\\n\" % (prefixes[0], fname, timestamp)\n diffstr += \"%s %s\\t%s\\n\" % (prefixes[1], fname, timestamp)\n diffstr += \"@@ -0,0 +1%s @@\\n\" % ln\n diffstr += '+' + '+'.join(lines)\n if diffstr[-1] != '\\n':\n diffstr += \"\\n\\\\ No newline at end of file\\n\"\n \n if diffstr: # std patch terminator\n diffstr += \"End of Patch.\"\n\n patch = p4lib.makeForm(description=desc, files=files,\n differences=diffstr)\n if patch: # ViM-specific hack to have it colorize patches as diffs.\n patch = \"diff\\n\" + patch\n\n sys.stdout.write(patch)", "def git_upgraded_pkgs(self):\n\n self.extract_from_cachedir()\n self.etc_commits.added.commit()\n\n cherry_pick_sha = None\n if self.etc_commits.cherry_pick.rpaths:\n self.etc_commits.cherry_pick.commit()\n cherry_pick_sha = self.repo.git_cmd('rev-list -1 HEAD --')\n\n # Clean the working area of the files that are not under version\n # control.\n self.repo.git_cmd('clean -d -x -f')\n\n # Update the master-tmp branch with new files.\n if self.master_commits.added.rpaths:\n self.repo.checkout('master-tmp')\n for rpath in self.master_commits.added.rpaths:\n repo_file = os.path.join(self.repodir, rpath)\n if os.path.lexists(repo_file):\n warn('adding %s to the master-tmp branch but this file'\n ' already exists' % rpath)\n copy_file(rpath, self.root_dir, self.repodir,\n repo_file=repo_file)\n self.master_commits.added.commit()\n\n return cherry_pick_sha", "def cherryPickChange(self):\n rev = io.getNumber('Revision number to cherry-pick:')\n bug = io.getNumber('Issue fixed by this change:')\n\n diff = subversion.diff(self.upstream_repos + '/trunk', rev)\n if not diff.strip():\n raise error.ExpectationFailed(\n 'Retrieved diff is empty. '\n 'Did you accidentally cherry-pick a branch change?')\n util.run(['patch', '-p0'], cwd=self.wc.path(self.branch_dir), stdin=diff)\n self.wc.addRemove(self.branch_dir)\n\n yaml_path = self.wc.path(self._branchPath('app/app.yaml'))\n out = []\n updated_patchlevel = False\n for line in io.fileToLines(yaml_path):\n if line.strip().startswith('version: '):\n version = line.strip().split()[-1]\n base, patch = line.rsplit('g', 1)\n new_version = '%sg%d' % (base, int(patch) + 1)\n message = ('Cherry-picked r%d from /p/soc/ to fix issue %d' %\n (rev, bug))\n out.append('version: ' + new_version)\n out.append('# * ' + message)\n updated_patchlevel = True\n else:\n out.append(line)\n\n if not updated_patchlevel:\n log.error('Failed to update Google patch revision')\n log.error('Cherry-picking failed')\n\n io.linesToFile(yaml_path, out)\n\n log.info('Check the diff about to be committed with:')\n log.info('svn diff ' + self.wc.path(self.branch_dir))\n if not io.confirm('Commit this change?'):\n raise error.AbortedByUser('Cherry-pick aborted')\n self.wc.commit(message)\n log.info('Cherry-picked r%d from the Melange trunk.' % rev)", "def cmd_apply_patch(patchfile):\n return ['git', 'apply', patchfile]", "def write_tmp_patches(diffs):\n files = []\n for idx, diff in enumerate(diffs):\n prefix = 'cugit-%s-' % str(idx).zfill(5)\n suffix = '-patch'\n filename = mkstemp(suffix, prefix)[1]\n write_tmp_patch(diff, filename)\n files.append(filename)\n return files", "def copy_patches(root_directory, dist_directory, sdk_directory,\n cpus, families, boards):\n\n def _process(when, contexts):\n for context in contexts:\n for patch in configuration.PATCHES:\n if patch[\"when\"] == when:\n context.update({\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n })\n\n source = templates.from_string(patch[\"source\"], context)\n target = templates.from_string(patch[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Patching '%s'\\n\" % source)\n\n if patch[\"type\"] == \"file\":\n with open(source, \"r\") as fp:\n content = fp.read()\n\n for method in patch[\"methods\"]:\n content = method(source, content)\n\n with open(target, \"w\") as fp:\n fp.write(content)\n elif patch[\"type\"] == \"glob\":\n for source_file in glob.glob(source):\n if os.path.isfile(source_file):\n target_file = os.path.join(\n target, os.path.basename(source_file))\n\n with open(source_file, \"r\") as fp:\n content = fp.read()\n\n for method in patch[\"methods\"]:\n content = method(source, content)\n\n with open(target_file, \"w\") as fp:\n fp.write(content)\n else:\n raise Exception(\"Not supported\")\n\n _process(\"per_family\", families)\n _process(\"per_cpu\", cpus)\n _process(\"per_board\", boards)\n _process(\"per_once\", [{\n \"families\": [family[\"family\"] for family in families],\n \"cpus\": [cpu[\"cpu\"] for cpu in cpus],\n \"boards\": [board[\"board\"] for board in boards]\n }])", "def bump_upstream_sources(**kwargs):\n\n # Find out current tracking branch to bump\n # the services matching the branch:\n oa_folder = kwargs['workdir'] + '/openstack-ansible'\n try:\n remote_branch = tracking_branch_name(oa_folder)\n except ValueError as verr:\n raise SystemExit(verr)\n\n LOGGER.info(\"Each file can take a while to update.\")\n prevline = {}\n reporegex = re.compile('(?P<project>.*)_git_repo: (?P<remote>.*)')\n branchregex = re.compile(('(?P<project>.*)_git_install_branch: '\n '(?P<sha>[0-9a-f]{40}) '\n '# HEAD of \"(?P<branch>.*)\" '\n 'as of .*'))\n\n update_files = glob.glob(\n \"{}/playbooks/defaults/repo_packages/*.yml\".format(oa_folder))\n\n stable_branch_skips = [\n \"openstack_testing.yml\",\n \"nova_consoles.yml\",\n ]\n\n for filename in update_files:\n if remote_branch.startswith(\"stable/\") and \\\n os.path.basename(filename) in stable_branch_skips:\n LOGGER.info(\"Skipping {} for stable branch\".format(filename))\n continue\n LOGGER.info(\"Updating {}\".format(filename))\n for line in fileinput.input(filename, inplace=True):\n rrm = reporegex.match(line)\n if rrm:\n # Extract info of repo line (previous line)\n # for branch line (current line)\n prevline['project'] = rrm.group('project')\n prevline['remote'] = rrm.group('remote')\n print(branchregex.sub(\n lambda x: bump_project_sha_with_comments(x, prevline), line)),\n\n LOGGER.info(\"All files patched !\")\n msg = (\"Update all SHAs for {next_release}\\n\\n\"\n \"This patch updates all the roles to the latest available stable \\n\"\n \"SHA's, copies the release notes from the updated roles into the \\n\"\n \"integrated repo, updates all the OpenStack Service SHA's, and \\n\"\n \"updates the appropriate python requirements pins. \\n\\n\"\n \"Depends-On: {release_changeid}\").format(\n next_release=os.environ.get('next_release', '<NEW VERSION>'),\n release_changeid=os.environ.get('release_changeid', '<TODO>'),)\n if kwargs['commit']:\n repo = Repo(oa_folder)\n repo.git.add('.')\n repo.index.commit(msg)\n click.echo(\"Commit done. Please verify before review.\")\n else:\n click.echo(\"Here is a commit message you could use:\\n\")\n click.echo(msg)", "def patch_files():\n args = parser.parse_args()\n doc = json.load(args.ORIGINAL)\n patch = json.load(args.PATCH)\n result = jsonpatch.apply_patch(doc, patch)\n print(json.dumps(result, indent=args.indent))", "def createPatch(self):\n\n if not CraftCore.cache.findApplication(\"diff\"):\n CraftCore.log.critical(\"could not find diff tool, please run 'craft diffutils'\")\n return False\n\n # get the file paths of the tarballs\n filenames = self.localFileNames()\n\n destdir = self.workDir()\n\n # it makes no sense to make a diff against nothing\n if not os.path.exists(self.sourceDir()):\n CraftCore.log.error(\"source directory doesn't exist, please run unpack first\")\n return False\n\n CraftCore.log.debug(\"unpacking files into work root %s\" % destdir)\n\n # make a temporary directory so the original packages don't overwrite the already existing ones\n with tempfile.TemporaryDirectory() as tmpdir:\n _patchName = f\"{self.package.name}-{self.buildTarget}-{str(datetime.date.today()).replace('-', '')}.diff\"\n\n # unpack all packages\n for filename in filenames:\n CraftCore.log.debug(f\"unpacking this file: {filename}\")\n if not utils.unpackFile(self.__downloadDir, filename, tmpdir):\n return False\n\n patches = self.subinfo.patchesToApply()\n if not isinstance(patches, list):\n patches = [patches]\n for fileName, patchdepth in patches:\n if os.path.basename(fileName) == _patchName:\n CraftCore.log.info(f\"skipping patch {fileName} with patchlevel: {patchdepth}\")\n continue\n CraftCore.log.info(f\"applying patch {fileName} with patchlevel: {patchdepth}\")\n if not self.applyPatch(\n fileName,\n patchdepth,\n os.path.join(tmpdir, os.path.relpath(self.sourceDir(), self.workDir())),\n ):\n return False\n\n srcSubDir = os.path.relpath(self.sourceDir(), self.workDir())\n tmpSourceDir = os.path.join(tmpdir, srcSubDir)\n with io.BytesIO() as out:\n ignores = []\n for x in [\n \"*~\",\n r\"*\\.rej\",\n r\"*\\.orig\",\n r\"*\\.o\",\n r\"*\\.pyc\",\n \"CMakeLists.txt.user\",\n ]:\n ignores += [\"-x\", x]\n\n # TODO: actually we should not accept code 2\n if not utils.system(\n [\"diff\", \"-Nrub\"] + ignores + [tmpSourceDir, self.sourceDir()],\n stdout=out,\n acceptableExitCodes=[0, 1, 2],\n cwd=destdir,\n ):\n return False\n patchContent = out.getvalue()\n # make the patch a -p1 patch\n patchContent = patchContent.replace(tmpSourceDir.encode(), f\"{srcSubDir}.orig\".encode())\n patchContent = patchContent.replace(str(self.sourceDir()).encode(), srcSubDir.encode())\n patchPath = os.path.join(self.packageDir(), _patchName)\n with open(patchPath, \"wb\") as out:\n out.write(patchContent)\n\n CraftCore.log.info(f'Patch created {patchPath} self.patchToApply[\"{self.buildTarget}\"] = [(\"{_patchName}\", 1)]')\n return True", "def push_mockups():\n local('cd ../../cts-ui && grunt')\n local('cp ../../cts-ui/mockups/css/*.css ../../mockups/cts-ui/css/.')\n local('cp -R ../../cts-ui/mockups/css/bootstrap ../../mockups/cts-ui/css/bootstrap')\n local('cp -R ../../cts-ui/mockups/img ../../mockups/cts-ui/img')\n local('cp ../../cts-ui/mockups/*.html ../../mockups/cts-ui/.')\n local('cd ../../mockups/cts-ui && git add *.html')\n local('cd ../../mockups/cts-ui/css && git add *.css')\n local('cd ../../mockups/cts-ui/css && git add bootstrap/*')\n local('cd ../../mockups/cts-ui && git add img/*')\n local('cd ../../mockups && git commit -am \"New cts-ui mockups [fabfile]\"')\n local('cd ../../mockups && git push origin master')", "def apply_patch(self, patch):\n # Remove Chromium WPT directory prefix.\n patch = patch.replace(RELATIVE_WPT_TESTS, '')\n try:\n self.run(['git', 'apply', '-'], input=patch)\n self.run(['git', 'add', '.'])\n except ScriptError as error:\n return error.message\n return ''", "def createPatch(self):\n CraftCore.debug.trace(\"GitSource createPatch\")\n patchFileName = os.path.join(\n self.packageDir(),\n \"%s-%s.patch\" % (self.package.name, str(datetime.date.today()).replace(\"-\", \"\")),\n )\n CraftCore.log.debug(\"git diff %s\" % patchFileName)\n with open(patchFileName, \"wt+\") as patchFile:\n return self.__git(\"diff\", stdout=patchFile)", "def patch_repos(self):", "def platformify(self):\n actions = ['rsync -aP {0} {1}'.format(\n os.path.join(TEMPLATEDIR, self.name, 'files/'), self.builddir\n )]\n patches = glob(os.path.join(TEMPLATEDIR, self.name, \"*.patch\"))\n for patch in patches:\n actions.append('cd {0}/web && patch -p1 < {1}'.format(\n self.builddir, patch)\n )\n\n # In some cases the package updater needs to be run after we've platform-ified the\n # template, so run it a second time. Worst case it's a bit slower to build but doesn't\n # hurt anything.\n actions.extend(self.package_update_actions())\n\n return actions", "def clpatch(ui, repo, clname, **opts):\n\tcl, patch, err = DownloadCL(ui, repo, clname)\n\targv = [\"hgpatch\"]\n\tif opts[\"no_incoming\"]:\n\t\targv += [\"--checksync=false\"]\n\tif err != \"\":\n\t\treturn err\n\ttry:\n\t\tcmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=True)\n\texcept:\n\t\treturn \"hgpatch: \" + ExceptionDetail()\n\tif os.fork() == 0:\n\t\tcmd.stdin.write(patch)\n\t\tos._exit(0)\n\tcmd.stdin.close()\n\tout = cmd.stdout.read()\n\tif cmd.wait() != 0:\n\t\treturn \"hgpatch failed\"\n\tcl.local = True\n\tcl.files = out.strip().split()\n\tfiles = ChangedFiles(ui, repo, [], opts)\n\textra = Sub(cl.files, files)\n\tif extra:\n\t\tui.warn(\"warning: these files were listed in the patch but not changed:\\n\\t\" + \"\\n\\t\".join(extra) + \"\\n\")\n\tcl.Flush(ui, repo)\n\tui.write(cl.PendingText() + \"\\n\")", "def maybe_apply_patches(input_dir, patch_files=None):\n if not patch_files:\n return\n old_dir = os.getcwd()\n os.chdir(input_dir)\n for patch_file in patch_files:\n with open(patch_file) as patch_in:\n patch_proc = subprocess.Popen(['git', 'am', '--3way'],\n stdin=patch_in)\n status = patch_proc.wait()\n if status != 0:\n raise RuntimeError('Patch failed; git output: %s' % status)\n logging.info(\"Finished applying patch %s\", patch_file)\n os.chdir(old_dir)", "def port_patches(\n default_prior_dir,\n default_post_dir,\n resource_prior_patches_dir,\n resource_post_patches_dir,\n resource_post_dir,\n resource_prior_dir=None,\n default_post_patches_dir=None,\n action=None,\n all_patch=False):\n\n default_prior_dir = os.path.expanduser(default_prior_dir)\n default_post_dir = os.path.expanduser(default_post_dir)\n resource_prior_patches_dir = os.path.expanduser(resource_prior_patches_dir)\n resource_post_patches_dir = os.path.expanduser(resource_post_patches_dir)\n resource_post_dir = os.path.expanduser(resource_post_dir)\n if resource_prior_dir:\n resource_prior_dir = os.path.expanduser(resource_prior_dir)\n if default_post_patches_dir:\n default_post_patches_dir = os.path.expanduser(default_post_patches_dir)\n\n os.makedirs(resource_post_patches_dir, exist_ok=True)\n os.makedirs(resource_post_dir, exist_ok=True)\n\n resource_prior_patch_map = {}\n resource_prior_patch_map_path = os.path.join(resource_prior_dir, \"patch_map.json\")\n if os.path.isfile(resource_prior_patch_map_path):\n with open(resource_prior_patch_map_path, \"r\") as resource_prior_patch_map_file:\n resource_prior_patch_map = json.load(resource_prior_patch_map_file)\n\n resource_post_patch_map = {}\n resource_post_patch_map_path = os.path.join(resource_post_dir, \"patch_map.json\")\n if os.path.isfile(resource_post_patch_map_path):\n with open(resource_post_patch_map_path, \"r\") as resource_post_patch_map_file:\n resource_post_patch_map = json.load(resource_post_patch_map_file)\n\n default_post_patch_map = {}\n default_post_patch_map_path = os.path.join(default_post_dir, \"patch_map.json\")\n if os.path.isfile(default_post_patch_map_path):\n with open(default_post_patch_map_path, \"r\") as default_post_patch_map_file:\n default_post_patch_map = json.load(default_post_patch_map_file)\n\n image_hashes = hash_images(default_prior_dir, resource_prior_patches_dir)\n\n # used for printing completion state during long-running task\n file_count = 0\n file_checkpoint = 0\n file_total = sum(len(files) for _, _, files in os.walk(default_post_dir))\n\n # attempt to texture everything in the merged output space\n for file_dir, _, file_names in os.walk(default_post_dir):\n for file_name in file_names:\n file_count += 1\n if file_count / file_total > file_checkpoint:\n print(f\"Remapping status: {file_checkpoint:.0%}\")\n file_checkpoint += .05\n\n # .mcmeta files are also ported when the associated .png is ported\n if not file_name.endswith(\".png\"):\n continue\n\n file_path = os.path.join(file_dir, file_name)\n relative_path = file_path.replace(default_post_dir, \"\")\n\n # full path to the file in the output merged space\n merged_post_resource_path = os.path.join(resource_post_dir, *relative_path.split(os.sep))\n\n # skip if already textured\n if action != 'copy-overwrite' and os.path.exists(merged_post_resource_path):\n continue\n\n # retrieve paths to all resource pack textures for this target image\n with open(file_path, 'rb') as image_file:\n image_hash = hashlib.md5(image_file.read()).hexdigest()\n matches = image_hashes.get(image_hash)\n\n if matches is None:\n continue\n # TODO: evaluate fitness of each match, and choose the best? seems very situational\n best_match = matches[0]\n\n if all_patch:\n default_patch_name, patch_name = 'All', 'All'\n else:\n default_patch_name, patch_name = infer_resource_patch_name(\n default_post_patches_dir,\n resource_post_patches_dir,\n default_post_patch_map,\n resource_prior_patch_map,\n resource_post_patch_map,\n relative_path,\n matches)\n\n if patch_name is None:\n continue\n\n # full path to the file in the output patch space\n post_resource_path = os.path.join(resource_post_patches_dir, patch_name, *relative_path.split(os.sep))\n\n # if source is same as target, then this is a no-op\n prior_resource_path = os.path.join(resource_prior_patches_dir, best_match)\n if prior_resource_path == post_resource_path:\n continue\n\n if os.path.exists(post_resource_path) and action != 'copy-overwrite':\n continue\n\n print()\n if len(matches) > 1:\n print(f\"Multiple matches found ({len(matches)}). Mapping from first:\")\n\n print(f'{action}: {best_match}\\n'\n f' -> {os.path.join(patch_name, *relative_path.split(os.sep))}')\n\n os.makedirs(os.path.dirname(post_resource_path), exist_ok=True)\n\n # move or copy the file\n perform_action(prior_resource_path, post_resource_path, action)\n\n prior_resource_meta_path = prior_resource_path + '.mcmeta'\n if os.path.exists(prior_resource_meta_path):\n print(f'{action}: {prior_resource_meta_path.replace(resource_prior_patches_dir, \"\")}\\n'\n f' -> {os.path.join(*post_resource_path.replace(resource_post_patches_dir, \"\").split(os.sep))}.mcmeta')\n\n perform_action(prior_resource_meta_path, post_resource_path + '.mcmeta', action)\n\n if not all_patch:\n update_mod_json(\n default_post_patches_dir,\n resource_prior_patches_dir,\n resource_post_patches_dir,\n relative_path,\n default_patch_name,\n patch_name)\n\n if os.path.exists(os.path.join(resource_post_patches_dir, UNKNOWN_PATCH_NAME)):\n print(\"Check the _UNKNOWN folder for textures ported into domains that did not belong to a previous patch.\")", "def mspatchc_CreatePatchFile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"OldFileName\", \"NewFileName\", \"PatchFileName\", \"OptionFlags\", \"OptionData\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def cherry_pick_change(branch, revision, parent_repo, dry_run, verbose=False):\n drover = _Drover(branch, revision, parent_repo, dry_run, verbose)\n drover.run()", "def test_resetcherrypick_removes_added_files(\n repository: Repository, paths: Iterator[Path]\n) -> None:\n main = repository.head\n update, _ = createbranches(repository, \"update\", \"latest\")\n path1, path2 = next(paths), next(paths)\n\n repository.checkout(update)\n updatefiles({path1: \"a\", path2: \"\"})\n\n repository.checkout(main)\n updatefile(path1, \"b\")\n\n with pytest.raises(MergeConflictError, match=path1.name):\n repository.cherrypick(update.commit)\n\n repository.resetcherrypick()\n\n assert not path2.exists()", "def update(filepath, github_account):\n repo = _git.clone_from_github(\n _REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)\n _install_dependencies(repo)\n added, deleted, updated = _generate_all_clients(repo)\n if not any([added, deleted, updated]):\n return\n _run_tests(repo)\n commitmsg = _commit_message.build(added, deleted, updated)\n repo.add(['api_names_out.yaml', 'generated'])\n repo.commit(commitmsg, github_account.name, github_account.email)\n repo.push()", "def prep( c ):\n\n copytree( \"vendor\", \"dist/please_respond/timezonefinder\" )", "def test_resetcherrypick_keeps_unrelated_additions(\n repository: Repository, paths: Iterator[Path]\n) -> None:\n main = repository.head\n update, _ = createbranches(repository, \"update\", \"latest\")\n path1, path2 = next(paths), next(paths)\n\n repository.checkout(update)\n updatefile(path1, \"a\")\n\n repository.checkout(main)\n updatefile(path1, \"b\")\n\n path2.touch()\n\n with pytest.raises(MergeConflictError, match=path1.name):\n repository.cherrypick(update.commit)\n\n repository.resetcherrypick()\n\n assert path2.exists()", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def _applyGooglePatches(self):\n # Edit the base template to point users to the Google fork\n # of the Melange codebase instead of the vanilla release.\n tmpl_file = self.wc.path(\n self._branchPath('app/soc/templates/soc/base.html'))\n tmpl = io.fileToLines(tmpl_file)\n for i, line in enumerate(tmpl):\n if 'http://code.google.com/p/soc/source/browse/tags/' in line:\n tmpl[i] = line.replace('/p/soc/', '/p/soc-google/')\n break\n else:\n raise error.ExpectationFailed(\n 'No source code link found in base.html')\n io.linesToFile(tmpl_file, tmpl)\n\n self.wc.commit(\n 'Customize the Melange release link in the sidebar menu')", "def sync_from_upstream(self):\n if not self.missing_branches:\n self.log(f\"All branches are synced, nothing to do here.\")\n return\n\n with tempfile.TemporaryDirectory() as tmpdir:\n src_path = Path(tmpdir) / self.deb_model.src\n self.deb_model.base.clone(cwd=tmpdir)\n for branch in self.missing_branches:\n self.log(f\"Processing branch {branch}\")\n self.deb_model.base.checkout(branch, new_branch=True, cwd=str(src_path))\n\n changelog_fn = src_path / \"debian/changelog\"\n changelog_fn_tpl = src_path / \"debian/changelog.in\"\n\n k8s_major_minor = semver.VersionInfo.parse(branch.lstrip(\"v\"))\n\n changelog_context = {\n \"deb_version\": f\"{str(k8s_major_minor)}-0\",\n }\n\n self.log(f\"Writing template vars {changelog_context}\")\n changelog_out = changelog_fn_tpl.read_text()\n changelog_out = self.render(changelog_fn_tpl, changelog_context)\n changelog_fn.write_text(changelog_out)\n\n self.log(f\"Committing {branch}\")\n self.deb_model.base.add([str(changelog_fn)], cwd=str(src_path))\n self.deb_model.base.commit(\n f\"Creating branch {branch}\", cwd=str(src_path)\n )\n self.deb_model.base.push(ref=branch, cwd=str(src_path))", "def regenerate():\n local('pelican -o {} -r -s pelicanconf.py'.format(env.deploy_path))", "def createPatch(self):\n craftDebug.trace(\"HgSource.createPatch called\")\n ret = False\n if self.enableHg:\n os.chdir(self.sourceDir())\n patchFile = os.path.join(self.packageDir(),\n \"%s-%s.patch\" % (self.package, str(datetime.date.today()).replace('-', '')))\n ret = self.system(self.sourceDir(), \"%s diff > %s\" % (self.hgExecutable, patchFile))\n return ret", "def mspatchc_CreatePatchFileByHandles(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"OldFileHandle\", \"NewFileHandle\", \"PatchFileHandle\", \"OptionFlags\", \"OptionData\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_resetcherrypick_keeps_unrelated_changes(\n repository: Repository, paths: Iterator[Path]\n) -> None:\n main = repository.head\n update, _ = createbranches(repository, \"update\", \"latest\")\n path1, path2 = next(paths), next(paths)\n\n repository.checkout(update)\n updatefile(path1, \"a\")\n\n repository.checkout(main)\n updatefile(path1, \"b\")\n updatefile(path2)\n\n path2.write_text(\"c\")\n\n with pytest.raises(MergeConflictError, match=path1.name):\n repository.cherrypick(update.commit)\n\n repository.resetcherrypick()\n\n assert path2.read_text() == \"c\"" ]
[ "0.6193804", "0.61185837", "0.60817057", "0.587971", "0.5735855", "0.57174575", "0.5645524", "0.55588645", "0.5523853", "0.5493776", "0.54873055", "0.5407881", "0.53941137", "0.5388479", "0.5378879", "0.53761584", "0.5310182", "0.52620775", "0.52522963", "0.52342397", "0.52270097", "0.5216815", "0.5192205", "0.5188725", "0.5174338", "0.51693624", "0.515936", "0.51499003", "0.5139899", "0.5139076" ]
0.6612873
0
Print every package as "ignored".
def _print_ignored(packages): if not packages: print("## No Rez package was set to be ignored") print("No data found") return print("## Every package in this list was explicitly set to ignored by the user") for package, pattern in sorted(packages, key=_get_package_name): print( 'Package: {package.name} - Pattern: "{pattern}"'.format( package=package, pattern=pattern ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_missing(packages, verbose):\n if not packages:\n print(\"## No Rez packages were found.\")\n print(\"No data found\")\n\n return\n\n print(\"## Your command affects these Rez packages.\")\n\n template = \"{package.name}\"\n\n if verbose:\n template = \"{package.name}: {path}\"\n\n for line in sorted(\n template.format(package=package, path=finder.get_package_root(package))\n for package in packages\n ):\n print(line)", "def _print_skips(skips, verbose):\n if not skips:\n print(\"## No packages were skipped\")\n print(\"Every found Rez package can be processed by the command.\")\n\n return\n\n print(\"## Packages were skipped from running a command. Here's the full list:\")\n\n template = \"{issue.package.name}: {issue.reason}\"\n\n if verbose:\n template = \"{issue.package.name}: {issue.path}: {issue.reason}\"\n\n for issue in skips:\n print(template.format(issue=issue))", "def list_packages(self):\n for tag, pkg in PACKAGES.iteritems():\n print \"{tag} - {label}\".format(tag=tag, label=pkg['label'])", "def print_test_deps_not_in_package_deps(self):\n extras = []\n for key, rec_deps in self.recursive_pkg_deps.items():\n any = self.test_imports.get(key, set()).difference(rec_deps, set([key]))\n if any:\n extras.append((key, any))\n\n if extras:\n print(\"Packages whose tests have extra dependencies not listed in `go list -f {{.Deps}}`:\")\n for pkg, deps in extras:\n print(\"\\t{0}: {1}\".format(pkg, \", \".join(deps)))\n print(\"\\n\")", "def ignore_from_package(self, project, package, arch, ignore):\n for binary in binary_list(self.apiurl, project, 'standard', arch, package):\n ignore.add(binary.name)\n\n return ignore", "def output_ignored(self) -> List[str]:\n output = list()\n for ignored in sorted(self.ignored):\n if len(ignored) == 2:\n line = f\"{ignored[0]} - Ignored {ignored[1]}!\"\n else:\n line = f\"{ignored[0]} - {ignored[1]}: Ignored {ignored[2]}!\"\n logger.info(line)\n output.append(line)\n return output", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def show_missing():\n if missing_modules:\n info(\"The following modules are currently not installed and would enable additional tasks:\")\n for pkg_name in missing_modules:\n info(' ' + pkg_name)", "def _filter_pkgs(self, pkgs):\n pkgs = [pkg.strip() for pkg in pkgs]\n return [\n pkg for pkg in pkgs\n if pkg.startswith(self.base_pkg) and not pkg.startswith(os.path.join(self.base_pkg, \"vendor/\"))\n ]", "def ignores(self):\n pass # make ignore_tags unaccessible", "def isIgnoredPackage(self, *args):\n return _libsbml.SBMLDocument_isIgnoredPackage(self, *args)", "def remove_packages(self, packages):", "def disableUnusedPackages(self, *args):\n return _libsbml.SBMLExtensionRegistry_disableUnusedPackages(self, *args)", "def isIgnoredPkg(self, *args):\n return _libsbml.SBMLDocument_isIgnoredPkg(self, *args)", "def suppress(self):\n pass", "def ignore_pyc(root,names):\n return [name for name in names if name.endswith('pyc')]", "def printImported():\r\n ll = sorted([mod for mod in sys.modules if mod.find('pubsub') >= 0])\r\n\r\n py2and3.print_('\\n'.join(ll))", "def gitIgnoreContent( self, pars, directory ):\n\n name = pars['name']\n\n return f\"\"\"\\\n{name}-*.pkg*\nsrc/\npkg/\n\"\"\"", "def cleanup_import(package_name):\n\n for k in list(sys.modules):\n if not isinstance(k, str):\n # Some things will actually do this =_=\n continue\n elif k.startswith('astropy_helpers.tests'):\n # Don't delete imported test modules or else the tests will break,\n # badly\n continue\n if k == package_name or k.startswith(package_name + '.'):\n del sys.modules[k]", "def report_unused_cycle_suppressions(self, reporter):\n for module in self.get_modules():\n for dep in module.get_dependencies():\n if not dep.suppression_used:\n reporter.cyclic_issue(\"unused cycle suppression: {0} -> {1}\".format(module.get_name()[7:], dep.get_other_module().get_name()[7:]))", "def ignore_all(self):\n # type: () -> bool\n return False", "def print_all_separation_algorithms():\n from ..separation import all_separation_algorithms\n print('\\n'.join([a.__name__ for a in all_separation_algorithms]))", "def ignored(*exceptions):\n import logging\n import pprint\n try:\n yield\n except exceptions:\n logging.warning(pprint.pformat(exceptions[0]))\n pass", "def have_package_lists():\n return 'Filename:' in execute('apt-cache', 'show', 'python', check=False, capture=True)", "def ignore_from_repo(self, directory, ignore):\n for filename in os.listdir(directory):\n if not filename.endswith('.rpm'):\n continue\n _, basename = filename.split('-', 1)\n ignore.add(basename[:-4])", "def packages(self):\n return []", "def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")", "def clearWarnings():\n for name, mod in list(sys.modules.items()):\n try:\n reg = getattr(mod, \"__warningregistry__\", None)\n except ImportError:\n continue\n if reg:\n reg.clear()", "def strip_blocks(self):\n print_content = False\n if self.exclude_blocks and len(self.exclude_blocks) > 0:\n for package_name, package in self.packages.items():\n for c, fd in package.components.iteritems():\n for block in self.exclude_blocks:\n JS_BLOCKS = re.compile(r'/[/*]\\s*<' + block + '>.+?<\\/' + block + '>(?:\\s*\\*/)?(?s)', re.MULTILINE)\n fd.content = JS_BLOCKS.sub('/* compat block ' + block + ' removed */', fd.content)\n print_content = True\n if print_content:\n print fd.content", "def show_packagelist(user, repo, packages, distro=False, version=False,\n name=False, match=False, pkgtype=False):\n\n print('Currently {}/{} contains these matching packages:'.format(\n user, repo))\n\n numpkgs = 0\n for package in packages:\n if (distro and not package['distro_version'] == distro) or \\\n (version and not package['version'] == version) or \\\n (name and not package['name'] == name) or \\\n (pkgtype and not package['type'] == pkgtype) or \\\n (match and match not in package['filename']):\n continue\n\n print(fmt_pkg(user, repo, package))\n numpkgs += 1\n\n print(\"Repo contains {} matching packages.\".format(numpkgs))" ]
[ "0.6896404", "0.65300035", "0.64506", "0.6380679", "0.6243997", "0.6215631", "0.6034081", "0.60240585", "0.58716226", "0.5729593", "0.57106125", "0.5704774", "0.56812304", "0.5655815", "0.565425", "0.5575963", "0.5534667", "0.550217", "0.5492941", "0.54724574", "0.547046", "0.5437451", "0.5379577", "0.5352382", "0.5352162", "0.534097", "0.5330411", "0.5326467", "0.5326147", "0.5318716" ]
0.83495414
0
Print all Rez packages that should be run on.
def _print_missing(packages, verbose): if not packages: print("## No Rez packages were found.") print("No data found") return print("## Your command affects these Rez packages.") template = "{package.name}" if verbose: template = "{package.name}: {path}" for line in sorted( template.format(package=package, path=finder.get_package_root(package)) for package in packages ): print(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_packages(self):\n for tag, pkg in PACKAGES.iteritems():\n print \"{tag} - {label}\".format(tag=tag, label=pkg['label'])", "def _print_ignored(packages):\n if not packages:\n print(\"## No Rez package was set to be ignored\")\n print(\"No data found\")\n\n return\n\n print(\"## Every package in this list was explicitly set to ignored by the user\")\n\n for package, pattern in sorted(packages, key=_get_package_name):\n print(\n 'Package: {package.name} - Pattern: \"{pattern}\"'.format(\n package=package, pattern=pattern\n )\n )", "def packages():", "def print_dependencies(pkg):\n\n for deptype in (\"build\", \"link\", \"run\"):\n color.cprint(\"\")\n color.cprint(section_title(\"%s Dependencies:\" % deptype.capitalize()))\n deps = sorted(pkg.dependencies_of_type(deptype))\n if deps:\n colify(deps, indent=4)\n else:\n color.cprint(\" None\")", "def print_dependencies(_run):\n print('Sources:')\n for source, digest in _run.experiment_info['sources']:\n print(' {:<43} {}'.format(source, digest))\n\n print('\\nDependencies:')\n for pack, version in _run.experiment_info['dependencies']:\n print(' {:<20} >= {}'.format(pack, version))", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def print_deps(self):\n\t\tprint self.deps, '\\n'", "def printall():\n print listAll()", "def show_versions():\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n print(\"\\nSystem:\")\n for k, stat in sys_info.items():\n print(\"{k:>10}: {stat}\".format(k=k, stat=stat))\n\n print(\"\\nPython dependencies:\")\n for k, stat in deps_info.items():\n print(\"{k:>13}: {stat}\".format(k=k, stat=stat))", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def report(*packages):\n accepted_commands = ['python','conda']\n for package in packages:\n loc = \"not installed in this environment\"\n ver = \"unknown\"\n\n try:\n module = importlib.import_module(package)\n loc = os.path.dirname(module.__file__)\n\n try:\n ver = str(module.__version__)\n except Exception:\n pass\n \n except (ImportError, ModuleNotFoundError):\n if package in accepted_commands:\n try:\n # See if there is a command by that name and check its --version if so\n try:\n loc = subprocess.check_output(['command','-v', package]).decode().splitlines()[0].strip()\n except:\n # .exe in case powershell (otherwise wouldn't need it)\n loc = subprocess.check_output(['where.exe', package]).decode().splitlines()[0].strip() \n out = \"\"\n try:\n out = subprocess.check_output([package, '--version'], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n out = e.output\n\n # Assume first word in output with a period and digits is the version\n for s in out.decode().split():\n if '.' in s and str.isdigit(s[0]) and sum(str.isdigit(c) for c in s)>=2:\n ver=s.strip()\n break\n except:\n pass\n elif package == 'system':\n try:\n ver = platform.platform(terse=True)\n loc = \"OS: \" + platform.platform()\n except Exception:\n pass\n else:\n pass\n \n print(\"{0:30} # {1}\".format(package + \"=\" + ver,loc))", "def show ( self, **show_kw ):\n for package in self._subdirs.values():\n package.show ( **show_kw )", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def show_packagelist(user, repo, packages, distro=False, version=False,\n name=False, match=False, pkgtype=False):\n\n print('Currently {}/{} contains these matching packages:'.format(\n user, repo))\n\n numpkgs = 0\n for package in packages:\n if (distro and not package['distro_version'] == distro) or \\\n (version and not package['version'] == version) or \\\n (name and not package['name'] == name) or \\\n (pkgtype and not package['type'] == pkgtype) or \\\n (match and match not in package['filename']):\n continue\n\n print(fmt_pkg(user, repo, package))\n numpkgs += 1\n\n print(\"Repo contains {} matching packages.\".format(numpkgs))", "def main():\n for dev in Discover.discover().values():\n print(dev)", "def show_versions():\n sys_info = _get_sys_info()\n versions = _get_autogluon_versions()\n sorted_keys = sorted(versions.keys(), key=lambda x: x.lower())\n\n maxlen = 0 if len(versions) == 0 else max(len(x) for x in versions)\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n for k, v in sys_info.items():\n print(f\"{k:<{maxlen}}: {v}\")\n print(\"\")\n for k in sorted_keys:\n print(f\"{k:<{maxlen}}: {versions[k]}\")", "def _print_skips(skips, verbose):\n if not skips:\n print(\"## No packages were skipped\")\n print(\"Every found Rez package can be processed by the command.\")\n\n return\n\n print(\"## Packages were skipped from running a command. Here's the full list:\")\n\n template = \"{issue.package.name}: {issue.reason}\"\n\n if verbose:\n template = \"{issue.package.name}: {issue.path}: {issue.reason}\"\n\n for issue in skips:\n print(template.format(issue=issue))", "def print_drivers():\n driver_info_dict = get_all_driver_infos()\n keys = sorted(driver_info_dict)\n print \"%-25s%-15s%-9s%-25s\" % (\n \"Module name\", \"Driver name\", \"Version\", \"Status\")\n for d in keys:\n print \" %(module_name)-25s%(driver_name)-15s%(version)-9s%(status)-25s\" % driver_info_dict[d]", "def print_package_info(package):\r\n print(\"******************\")\r\n print(\"Product: %s\" % package[0]['description'])\r\n print(\"Price: %s$ monthly\" % package[0]['prices'][0]['recurringFee'])\r\n print(\"******************\")\r\n return", "def list_packages(self):\n\n # First extract loaded module names from sys.modules\n sys_modules = sys.modules.keys()\n\n packages = {}\n\n # First add moduels in sys.modules (built-ins,\n # preloads and already loaded ones)\n for name in sys_modules:\n d = self.find_package(name)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] }\n\n #import site\n # Loop through all directories in sys.path and check for modules\n # Dont iterate through <prefix>/lib directory\n libdir = os.path.join(sys.prefix, 'lib')\n\n walked = []\n for top_level in self.paths:\n if not os.path.isdir(top_level):\n continue\n\n # Dont iterate through libdir\n if os.path.abspath(top_level) == os.path.abspath(libdir):\n continue\n\n walked.append(top_level)\n for item in os.listdir(top_level):\n\n fullpath = os.path.join(top_level, item)\n if fullpath in walked: continue\n\n walked.append(fullpath)\n # Remove the extension\n idx = item.find('.')\n if idx != -1: item = item[:idx]\n d = self.find_package(item)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] } \n\n for key,item in packages.items():\n print\n print self.pkgTypeInfo(key)\n print\n\n # Print sorted\n listofitems = item.keys()\n listofitems.sort()\n\n for key2 in listofitems:\n print key2,':',item[key2]", "def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info", "def print_paths():\n print(f\"~: {HOME}\")\n print(f\"PYTHON: {PYTHON}\")\n print(f\"IDEAS: {IDEAS}\")\n print(f\"SITE-PACKAGES {SITE_PACKAGES}\")\n if os.path.exists(TESTS):\n print(f\"TESTS: {TESTS}\")", "def troubleshoot():\n libraries = (sys, pd, openpyxl, matplotlib, pip)\n for i in libraries:\n try:\n print(str(i), 'version:', i.__version__)\n except AttributeError:\n pass\n except ModuleNotFoundError:\n print('You do not have', str(i), 'installed.')\n print('You can do so via your interpreter or:')\n print('py -m pip install', '-' + str(i))\n print('in command prompt')", "def printImported():\r\n ll = sorted([mod for mod in sys.modules if mod.find('pubsub') >= 0])\r\n\r\n py2and3.print_('\\n'.join(ll))", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') for it in local_pkgs])\n\n pkgdb_info = pkgdb_pkg_branch()\n\n pkgdb_pkgs = set(pkgdb_info.keys())\n\n ## Commented out as we keep the git of retired packages while they won't\n ## show up in the information retrieved from pkgdb.\n\n #if (local_pkgs - pkgdb_pkgs):\n #print 'Some packages are present locally but not on pkgdb:'\n #print ', '.join(sorted(local_pkgs - pkgdb_pkgs))\n\n if (pkgdb_pkgs - local_pkgs):\n print 'Some packages are present in pkgdb but not locally:'\n print ', '.join(sorted(pkgdb_pkgs - local_pkgs))\n\n tofix = set()\n for pkg in sorted(pkgdb_info):\n pkgdb_branches = pkgdb_info[pkg]\n git_branches = get_git_branch(pkg)\n diff = (pkgdb_branches - git_branches)\n if diff:\n print '%s missing: %s' % (pkg, ','.join(sorted(diff)))\n tofix.add(pkg)\n branch_package(pkg, diff)\n\n if tofix:\n print 'Packages fixed (%s): %s' % (\n len(tofix), ', '.join(sorted(tofix)))", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def print_versions():\n\n print('-=' * 38)\n print(\"PyTables version: %s\" % tb.__version__)\n print(\"HDF5 version: %s\" % tb.which_lib_version(\"hdf5\")[1])\n print(\"NumPy version: %s\" % np.__version__)\n tinfo = tb.which_lib_version(\"zlib\")\n if ne.use_vml:\n # Get only the main version number and strip out all the rest\n vml_version = ne.get_vml_version()\n vml_version = re.findall(\"[0-9.]+\", vml_version)[0]\n vml_avail = \"using VML/MKL %s\" % vml_version\n else:\n vml_avail = \"not using Intel's VML/MKL\"\n print(f\"Numexpr version: {ne.__version__} ({vml_avail})\")\n if tinfo is not None:\n print(f\"Zlib version: {tinfo[1]} (in Python interpreter)\")\n tinfo = tb.which_lib_version(\"lzo\")\n if tinfo is not None:\n print(\"LZO version: {} ({})\".format(tinfo[1], tinfo[2]))\n tinfo = tb.which_lib_version(\"bzip2\")\n if tinfo is not None:\n print(\"BZIP2 version: {} ({})\".format(tinfo[1], tinfo[2]))\n tinfo = tb.which_lib_version(\"blosc\")\n if tinfo is not None:\n blosc_date = tinfo[2].split()[1]\n print(\"Blosc version: {} ({})\".format(tinfo[1], blosc_date))\n blosc_cinfo = tb.blosc_get_complib_info()\n blosc_cinfo = [\n \"{} ({})\".format(k, v[1]) for k, v in sorted(blosc_cinfo.items())\n ]\n print(\"Blosc compressors: %s\" % ', '.join(blosc_cinfo))\n blosc_finfo = ['shuffle', 'bitshuffle']\n print(\"Blosc filters: %s\" % ', '.join(blosc_finfo))\n tinfo = tb.which_lib_version(\"blosc2\")\n if tinfo is not None:\n blosc2_date = tinfo[2].split()[1]\n print(\"Blosc2 version: {} ({})\".format(tinfo[1], blosc2_date))\n blosc2_cinfo = tb.blosc2_get_complib_info()\n blosc2_cinfo = [\n \"{} ({})\".format(k, v[1]) for k, v in sorted(blosc2_cinfo.items())\n ]\n print(\"Blosc2 compressors: %s\" % ', '.join(blosc2_cinfo))\n blosc2_finfo = ['shuffle', 'bitshuffle']\n print(\"Blosc2 filters: %s\" % ', '.join(blosc2_finfo))\n try:\n from Cython import __version__ as cython_version\n print('Cython version: %s' % cython_version)\n except Exception:\n pass\n print('Python version: %s' % sys.version)\n print('Platform: %s' % platform.platform())\n # if os.name == 'posix':\n # (sysname, nodename, release, version, machine) = os.uname()\n # print('Platform: %s-%s' % (sys.platform, machine))\n print('Byte-ordering: %s' % sys.byteorder)\n print('Detected cores: %s' % tb.utils.detect_number_of_cores())\n print('Default encoding: %s' % sys.getdefaultencoding())\n print('Default FS encoding: %s' % sys.getfilesystemencoding())\n print('Default locale: (%s, %s)' % locale.getdefaultlocale())\n print('-=' * 38)\n\n # This should improve readability whan tests are run by CI tools\n sys.stdout.flush()", "def dump(self):\n\n\t\treturn [\n\t\t\tpkg.dump()\n\t\t\tfor pkg in self._packages.values()\n\t\t]", "def init():\n print(\"Installed everything under {0} \"\n \"virtual environment\".format(package_name()))", "def packages(self):\n return []" ]
[ "0.6903446", "0.6681349", "0.64860743", "0.6483986", "0.64436895", "0.63718176", "0.6362368", "0.62700814", "0.62351704", "0.6204767", "0.6189284", "0.61830366", "0.61695254", "0.6160334", "0.6057463", "0.6053403", "0.6039349", "0.5968796", "0.5965892", "0.5961952", "0.59532803", "0.5946831", "0.59426504", "0.59075403", "0.58807504", "0.58419687", "0.5835895", "0.5812998", "0.5797266", "0.579549" ]
0.7438773
0
Print the Rez packages that were skipped automatically by this tool. Skipped packages differ from "invalid" packages in that they are "valid Rez packages but just don't need the command run on". Ignored packages are Rez packages that the user explicitly said to not process. Skipped packages are packages that the user may have meant to process but this tool could not (for some reason or another).
def _print_skips(skips, verbose): if not skips: print("## No packages were skipped") print("Every found Rez package can be processed by the command.") return print("## Packages were skipped from running a command. Here's the full list:") template = "{issue.package.name}: {issue.reason}" if verbose: template = "{issue.package.name}: {issue.path}: {issue.reason}" for issue in skips: print(template.format(issue=issue))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_ignored(packages):\n if not packages:\n print(\"## No Rez package was set to be ignored\")\n print(\"No data found\")\n\n return\n\n print(\"## Every package in this list was explicitly set to ignored by the user\")\n\n for package, pattern in sorted(packages, key=_get_package_name):\n print(\n 'Package: {package.name} - Pattern: \"{pattern}\"'.format(\n package=package, pattern=pattern\n )\n )", "def _print_missing(packages, verbose):\n if not packages:\n print(\"## No Rez packages were found.\")\n print(\"No data found\")\n\n return\n\n print(\"## Your command affects these Rez packages.\")\n\n template = \"{package.name}\"\n\n if verbose:\n template = \"{package.name}: {path}\"\n\n for line in sorted(\n template.format(package=package, path=finder.get_package_root(package))\n for package in packages\n ):\n print(line)", "def print_test_deps_not_in_package_deps(self):\n extras = []\n for key, rec_deps in self.recursive_pkg_deps.items():\n any = self.test_imports.get(key, set()).difference(rec_deps, set([key]))\n if any:\n extras.append((key, any))\n\n if extras:\n print(\"Packages whose tests have extra dependencies not listed in `go list -f {{.Deps}}`:\")\n for pkg, deps in extras:\n print(\"\\t{0}: {1}\".format(pkg, \", \".join(deps)))\n print(\"\\n\")", "def show_missing():\n if missing_modules:\n info(\"The following modules are currently not installed and would enable additional tasks:\")\n for pkg_name in missing_modules:\n info(' ' + pkg_name)", "def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)", "def missing_requirements_command(args, packages=None, exit_on_failure=True):\n requirements_files = (\n args.requirements or Config.requirements_files or []\n )\n ignore_list = (\n args.ignore or Config.ignore_list or []\n )\n\n printer = Printer()\n if not validate_files(\n files=requirements_files,\n printer=printer,\n exit_on_failure=exit_on_failure):\n return False\n\n requirements = RequirementCollection()\n for requirements_file in requirements_files:\n requirements.extend(\n RequirementCollection.from_file(filepath=requirements_file)\n )\n\n packages = (\n packages or dependency_list(ignore_list=ignore_list)\n )\n\n missing = [\n (package, required_by)\n for package, required_by\n in missing_requirements(\n packages=packages,\n requirements=requirements,\n ignore_list=ignore_list\n )\n ]\n\n headers = [\n messages.PACKAGE,\n messages.REQUIRED,\n messages.REQUIRED_BY,\n ]\n\n tabular_data = []\n for package, requirers in missing:\n if requirers:\n for required_by, required_version in requirers:\n tabular_data.append([\n printer.colored_message(\n message=package.key,\n message_color=printer.color_package\n ),\n required_version,\n required_by.key,\n ])\n else:\n tabular_data.append([\n printer.colored_message(\n message=package.key,\n message_color=printer.color_package\n ),\n package.version.specifier,\n \"Requirements\",\n ])\n\n if tabular_data:\n printer.error(messages.MISSING_FOUND)\n printer.table(headers=headers, tabular_data=tabular_data)\n if exit_on_failure:\n sys.exit(1)\n return False\n\n printer.success(messages.MISSING_OK)\n return True", "def test_print_version_repositories_SKIP(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/%s/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, 0, 'all'): DATA,\n '%d.%d/maintained/%d.%d-%d/%s/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, 0, ARCH): DATA,\n })\n tmp = self.u.print_version_repositories()\n self.assertEqual(set((\n 'deb file:///mock/%d.%d/maintained/ %d.%d-%d/%s/' % (MAJOR, MINOR, MAJOR, MINOR, 0, 'all'),\n 'deb file:///mock/%d.%d/maintained/ %d.%d-%d/%s/' % (MAJOR, MINOR, MAJOR, MINOR, 0, ARCH),\n )), set(tmp.splitlines()))", "def test_collect_playbooks_dependencies_skip_unavailable(self, module_repo):\n expected_result = {\n # playbooks:\n (\"Slack\", False),\n (\"Indeni\", True),\n # integrations:\n (\"FeedAlienVault\", False),\n (\"ipinfo\", True),\n (\"FeedAutofocus\", True),\n # scripts:\n (\"GetServerURL\", False),\n (\"HelloWorld\", True),\n }\n test_input = [\n {\n \"Dummy Playbook\": {\n \"name\": \"Dummy Playbook\",\n \"file_path\": \"dummy_path\",\n \"fromversion\": \"dummy_version\",\n \"implementing_scripts\": [\n \"GetServerURL\",\n \"HelloWorldScript\",\n ],\n \"implementing_playbooks\": [\n \"Failed Login Playbook - Slack v2\",\n \"Indeni Demo\",\n ],\n \"command_to_integration\": {\n \"alienvault-get-indicators\": \"\",\n \"ip\": \"ipinfo\",\n \"autofocus-get-indicators\": \"\",\n },\n \"tests\": [\"dummy_playbook\"],\n \"pack\": \"dummy_pack\",\n \"incident_fields\": [],\n \"skippable_tasks\": [\n \"Print\",\n \"Failed Login Playbook - Slack v2\",\n \"alienvault-get-indicators\",\n \"GetServerURL\",\n ],\n }\n },\n ]\n\n found_result = PackDependencies._collect_playbooks_dependencies(\n pack_playbooks=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def show_packagelist(user, repo, packages, distro=False, version=False,\n name=False, match=False, pkgtype=False):\n\n print('Currently {}/{} contains these matching packages:'.format(\n user, repo))\n\n numpkgs = 0\n for package in packages:\n if (distro and not package['distro_version'] == distro) or \\\n (version and not package['version'] == version) or \\\n (name and not package['name'] == name) or \\\n (pkgtype and not package['type'] == pkgtype) or \\\n (match and match not in package['filename']):\n continue\n\n print(fmt_pkg(user, repo, package))\n numpkgs += 1\n\n print(\"Repo contains {} matching packages.\".format(numpkgs))", "def get_not_installed_rpm_packages():\n def is_installed(elem):\n return elem in PMDK_TOOLS and elem in listdir('/usr/bin/') or\\\n elem == \"pmdk\" or elem + '.so' in listdir('/usr/lib64/')\n\n elements = get_libraries_names()\n not_installed_packages = []\n for elem in elements:\n if not is_installed(elem):\n not_installed_packages.append(elem)\n return not_installed_packages", "def __report(arguments, _):\n ignored_packages, other_packages, invalid_packages, skips = __gather_package_data(\n arguments\n )\n\n packages, invalids = worker.report(\n other_packages,\n maximum_repositories=arguments.maximum_repositories,\n maximum_rez_packages=arguments.maximum_rez_packages,\n )\n\n invalids.extend(invalid_packages)\n\n _print_ignored(ignored_packages)\n print(\"\\n\")\n _print_skips(skips, arguments.verbose)\n print(\"\\n\")\n _print_invalids(invalids, arguments.verbose)\n print(\"\\n\")\n _print_missing(packages, arguments.verbose)\n\n sys.exit(0)", "def _print_invalids(invalids, verbose):\n if not invalids:\n print(\"## No Rez package was set as invalid.\")\n print(\"Nothing is invalid. Which is a good thing!\")\n\n return\n\n print(\"## Some packages were marked as invalid. Here's why:\")\n\n template = \"{package.name}: {message}\"\n\n if verbose:\n template = \"{package.name}: {path} {message}: {full}\"\n\n for message in sorted(\n (\n template.format(\n package=error.get_package(),\n path=error.get_path(),\n message=str(error),\n full=error.get_full_message(),\n )\n for error in invalids\n )\n ):\n print(message)", "def test_packages(self):\n for pkg in self.expected_packages:\n status, output = commands.getstatusoutput('pkg_info -qx %s' % pkg)\n assert status == 0", "def getMissingPackages(self, language=None, all=False, packages=None, showInstalled=False):\n if self._cache.broken_count > 0:\n raise SoftwareIndexBroken\n \n self.langpack_locales = {}\n self.pkg_translations = {}\n self.pkg_writing = {}\n filter_list = {}\n blacklist = []\n show = []\n self.missing = set()\n self.installed = set()\n self.system_pkgcode = ''\n \n for l in open(self.BLACKLIST):\n l = l.strip()\n if not l.startswith('#'):\n blacklist.append(l)\n \n for l in open(self.LANGCODE_TO_LOCALE):\n try:\n l = l.rstrip()\n if ':' in l:\n (pkgcode, locale) = l.split(':')\n else:\n pkgcode = l\n locale = l\n except ValueError:\n continue\n self.langpack_locales[locale] = pkgcode\n \n for l in open(self.PACKAGE_DEPENDS):\n if l.startswith('#'):\n continue\n try:\n l = l.rstrip()\n # sort out comments\n if l.find('#') >= 0:\n continue\n (c, lc, k, v) = l.split(':')\n except ValueError:\n continue\n if (c == 'tr' and lc == ''):\n filter_list[v] = k\n elif (c == 'wa' and lc != ''):\n if '|' in lc:\n for l in lc.split('|'):\n if not l in self.pkg_writing:\n self.pkg_writing[l] = []\n self.pkg_writing[l].append((\"%s\" % k, \"%s\" % v))\n else:\n if not lc in self.pkg_writing:\n self.pkg_writing[lc] = []\n self.pkg_writing[lc].append((\"%s\" % k, \"%s\" % v))\n\n # get list of all packages available on the system and filter them\n for item in self._cache.keys():\n if item in blacklist: \n continue\n for x in filter_list.keys():\n if item.startswith(x) and not item.endswith('-base'):\n # parse language code\n langcode = item.replace(x, '')\n #print \"%s\\t%s\" % (item, langcode)\n if langcode == 'zh':\n # special case: zh langpack split\n for langcode in ['zh-hans', 'zh-hant']:\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n elif langcode in self.langpack_locales.values():\n # langcode == pkgcode\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n else:\n # need to scan for LL-CC and LL-VARIANT codes\n for locale in self.langpack_locales.keys():\n if '_' in locale or '@' in locale:\n if '@' in locale:\n (locale, variant) = locale.split('@')\n else:\n variant = ''\n (lcode, ccode) = locale.split('_')\n if langcode in [\"%s-%s\" % (lcode, ccode.lower()),\n \"%s%s\" % (lcode, ccode.lower()),\n \"%s-%s\" % (lcode, variant),\n \"%s%s\" % (lcode, variant),\n \"%s-latn\" % lcode,\n \"%slatn\" % lcode,\n \"%s-%s-%s\" % (lcode, ccode.lower(), variant),\n \"%s%s%s\" % (lcode, ccode.lower(), variant)]:\n # match found, get matching pkgcode\n langcode = self.langpack_locales[locale]\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n break\n\n if language:\n pkgcode = ''\n if language == 'zh-hans' or language == 'zh-hant':\n self.system_pkgcode = language\n elif language in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[language]\n else:\n # pkgcode = ll\n if '_' in language:\n (self.system_pkgcode) = language.split('_')[0]\n elif '@' in language:\n (self.system_pkgcode) = language.split('@')[0]\n else:\n self.system_pkgcode = language\n\n if packages:\n self.findPackages(self.system_pkgcode, packages)\n else:\n self.findPackages(self.system_pkgcode)\n \n elif all:\n # try all available languages\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist:\n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base'):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n\n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n\n else:\n # get a list of language-packs we have already installed or are going to install\n # 1. system locale\n system_langcode = self._localeinfo.getSystemDefaultLanguage()[0]\n if system_langcode == None:\n system_langcode = 'en_US'\n if system_langcode in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[system_langcode]\n # 2. installed language-packs\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist: \n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base') and \\\n (self._cache[item].is_installed or \\\n self._cache[item].marked_install):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n if self.system_pkgcode and \\\n not self.system_pkgcode in pkgcodes:\n pkgcodes.append(self.system_pkgcode)\n \n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n \n if showInstalled:\n show = self.missing | self.installed\n else:\n show = self.missing\n\n return show", "def have_package_lists():\n return 'Filename:' in execute('apt-cache', 'show', 'python', check=False, capture=True)", "def required_packages(cls) -> List[Text]:\n return []", "def _list_dependencies_info(\n out: Callable, ljust: int, package: str, dependencies: List[Requirement]\n):\n unicode = sys.stdout.encoding.lower().startswith(\"utf\")\n if unicode:\n ljust += 1\n\n not_found: List[Requirement] = list()\n for dep in dependencies:\n if dep.name == package:\n continue\n try:\n version_ = version(dep.name)\n except Exception:\n not_found.append(dep)\n continue\n\n # build the output string step by step\n output = f\"✔︎ {dep.name}\" if unicode else dep.name\n # handle version specifiers\n if len(dep.specifier) != 0:\n output += f\" ({str(dep.specifier)})\"\n output += \":\"\n output = output.ljust(ljust) + version_\n\n # handle special dependencies with backends, C dep, ..\n if dep.name in (\"matplotlib\", \"seaborn\") and version_ != \"Not found.\":\n try:\n from matplotlib import pyplot as plt\n\n backend = plt.get_backend()\n except Exception:\n backend = \"Not found\"\n\n output += f\" (backend: {backend})\"\n out(output + \"\\n\")\n\n if len(not_found) != 0:\n not_found = [\n f\"{dep.name} ({str(dep.specifier)})\"\n if len(dep.specifier) != 0\n else dep.name\n for dep in not_found\n ]\n if unicode:\n out(f\"✘ Not installed: {', '.join(not_found)}\\n\")\n else:\n out(f\"Not installed: {', '.join(not_found)}\\n\")", "def test_correctness_of_installed_rpm_packages(self):\n not_installed_packages = get_not_installed_rpm_packages()\n error_msg = linesep + 'List of not installed packages: '\n for package in not_installed_packages:\n error_msg += linesep + package\n self.assertFalse(not_installed_packages, error_msg)", "def unsatisfied_requirements(buildout, package, working_set):\n\n # read all lines from \"requirements.txt\"\n specs = [k.strip() for k in package_readlines(package, 'requirements.txt')]\n\n # discard empty lines and comments\n specs = [k for k in specs if k and k[0] not in ('#', '-')]\n\n # do not consider packages which are already installed, with a reasonable\n # version matching the user specification, either on the current working\n # set, the installed eggs or the system paths\n newest = bool_option(buildout, 'newest', 'true')\n\n left_over = []\n for k in specs:\n if requirement_is_satisfied(k, working_set, newest):\n dist = working_set.require(k)[0]\n logger.info(\"taking requirement `%s' (%s) from `%s'\", dist.key,\n dist.version, dist.location)\n else:\n left_over.append(k)\n specs = left_over\n\n return left_over", "def check_missing_dep():\n global MISSING_PACKAGES, INSTALLED_PACKAGES, ENABLE_CUDA\n if ENABLE_CUDA and IS_MACOS:\n REQUIRED_PACKAGES.extend(MACOS_REQUIRED_PACKAGES)\n MISSING_PACKAGES = []\n for pkg in REQUIRED_PACKAGES:\n key = pkg.split(\"==\")[0]\n if key not in INSTALLED_PACKAGES:\n MISSING_PACKAGES.append(pkg)\n continue\n else:\n if len(pkg.split(\"==\")) > 1:\n if pkg.split(\"==\")[1] != INSTALLED_PACKAGES.get(key):\n MISSING_PACKAGES.append(pkg)\n continue", "def do_seeSkipList(self, args):\n sl.showList()", "def show_keep(self, command=\"show-keep\"):\n # execute pkgin\n popen = Popen([self.pkgin_bin, \"-P\", command], stdout=PIPE, stderr=PIPE)\n # retrieve output streams\n (stdoutdata, stderrdata) = popen.communicate()\n # if pkgin error\n if(stderrdata):\n # remove the line feed\n error = stderrdata[0:-1]\n raise PykginError(error)\n # retrieve output\n output = stdoutdata\n # create a list which contain each packages\n output_whole_list = output.split('\\n')\n # remove last element (due to the last \\n)\n output_whole_list.pop()\n # create a new list in which package informations are separate\n output_list = []\n for pkg in output_whole_list:\n current = pkg.split(' ', 1)\n output_list.append(self.__extract_package_version(current[0]))\n\n return output_list", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') for it in local_pkgs])\n\n pkgdb_info = pkgdb_pkg_branch()\n\n pkgdb_pkgs = set(pkgdb_info.keys())\n\n ## Commented out as we keep the git of retired packages while they won't\n ## show up in the information retrieved from pkgdb.\n\n #if (local_pkgs - pkgdb_pkgs):\n #print 'Some packages are present locally but not on pkgdb:'\n #print ', '.join(sorted(local_pkgs - pkgdb_pkgs))\n\n if (pkgdb_pkgs - local_pkgs):\n print 'Some packages are present in pkgdb but not locally:'\n print ', '.join(sorted(pkgdb_pkgs - local_pkgs))\n\n tofix = set()\n for pkg in sorted(pkgdb_info):\n pkgdb_branches = pkgdb_info[pkg]\n git_branches = get_git_branch(pkg)\n diff = (pkgdb_branches - git_branches)\n if diff:\n print '%s missing: %s' % (pkg, ','.join(sorted(diff)))\n tofix.add(pkg)\n branch_package(pkg, diff)\n\n if tofix:\n print 'Packages fixed (%s): %s' % (\n len(tofix), ', '.join(sorted(tofix)))", "def skip_check():\n if os.getenv('LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE'):\n reporting.create_report([\n reporting.Title('Skipped OS release check'),\n reporting.Summary('Source RHEL release check skipped via LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE env var.'),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS)\n ] + related)\n\n return True\n return False", "def print_dependencies(_run):\n print('Sources:')\n for source, digest in _run.experiment_info['sources']:\n print(' {:<43} {}'.format(source, digest))\n\n print('\\nDependencies:')\n for pack, version in _run.experiment_info['dependencies']:\n print(' {:<20} >= {}'.format(pack, version))", "def print_dependencies(pkg):\n\n for deptype in (\"build\", \"link\", \"run\"):\n color.cprint(\"\")\n color.cprint(section_title(\"%s Dependencies:\" % deptype.capitalize()))\n deps = sorted(pkg.dependencies_of_type(deptype))\n if deps:\n colify(deps, indent=4)\n else:\n color.cprint(\" None\")", "def run_skip(self):\n pass", "def missing_tests(session):\n print('The following samples do not have tests:')\n for sample in set(ALL_SAMPLE_DIRECTORIES) - set(ALL_TESTED_SAMPLES):\n print('* {}'.format(sample))", "def CheckForUnknownFiles(self):\n unknown_files = self.GetUnknownFiles()\n if unknown_files:\n print \"The following files are not added to version control:\"\n for line in unknown_files:\n print line\n prompt = \"Are you sure to continue?(y/N) \"\n answer = raw_input(prompt).strip()\n if answer != \"y\":\n ErrorExit(\"User aborted\")", "def FindMissingBinaries(needed_tools):\n return [binary for binary in needed_tools if Which(binary) is None]" ]
[ "0.7548334", "0.75025696", "0.6658978", "0.64952445", "0.59089476", "0.58754075", "0.5860346", "0.5699727", "0.5639692", "0.5637996", "0.5636697", "0.5571671", "0.5533277", "0.55121475", "0.54966754", "0.5486724", "0.5446966", "0.54336095", "0.53756714", "0.53676933", "0.53107494", "0.5305312", "0.52908194", "0.52864367", "0.526763", "0.52527773", "0.5222709", "0.5217873", "0.52129894", "0.5205187" ]
0.80562556
0
Check which help message the user actually wants to print out to the shell. The concept behind this function is a bit weird. Imagine you have 3 calls to ``rez_batch_process`` python m rez_batch_process help python m rez_batch_process run help python m rez_batch_process run shell help The first should print the choices "report" and "run". The second should print the arguments for "run". The third should print the arguments for the dynamic plugin for "shell". Unfortunately, that's not how it works. If the "help" flag is listed anywhere after the ``python m rez_batch_process run`` part, it prints the help message for run. The help message for "shell" is never shown. This function fixes this problem, by detecting the user's intent and slightly modifying the `text` input so that argparse stays happy and the right help message is printed.
def _process_help(text): text = copy.copy(text) found_index = -1 found_text = "" if "--help" in text: found_index = text.index("--help") found_text = "--help" elif "-h" in text: found_index = text.index("--h") found_text = "-h" if not found_text: return text, False subparser_index = -1 for key in ("report", "run", "make-git-users"): try: subparser_index = text.index(key) except ValueError: pass if found_index != 0 and subparser_index == -1: raise RuntimeError( 'Text "{text}" is not a registered command.'.format(text=text) ) if text.index(found_text) - 1 > subparser_index: text.remove(found_text) return text, True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_generate_help_text(self):\n self.shell.completer = None\n description, example = self.shell.generate_help_text('')\n self.assertEqual(description, '')\n self.assertEqual(example, '')\n\n self.shell.completer = TestCompleter()\n description, example = self.shell.generate_help_text('friendship --calls')\n self.assertEqual(description, '--calls:\\n' + 'call the friends')\n self.assertEqual(example, space_examples('use with care', 25, 1))", "def check_if_help_message(message):\n return \"The commands are\" in message", "def help_command(update, context):\n update.message.reply_text('Let me help you. \\r\\n /help print this help \\r\\n /safety prints safety instructions \\r\\n /play start the game\\r\\n /joingroup Join CTF tg group')", "def displayHelpMessage(self):\n if self.dialogBox == None:\n if len(self.help) > 0:\n message = self.help.pop()\n if 'SCANNING RESEARCH' in message:\n color = ['cyan']\n elif 'SCANNING INDUSTRY' in message:\n color = ['orange']\n elif 'SCANNING MILITARY' in message:\n color = ['red']\n self.createDialogBox(x=-0.1,y=0.7,texts=[message],textColors=color)", "def test_cli_help(self):\n output = self.update_command('-h')", "def isHelp():\n return (True)", "async def do_help():\n\n if len(message.content.split()) > 1:\n for i in cmd_dict:\n if message.content.split()[1] == i:\n await bot.send_message(c, f'Help for {i}: {cmd_dict[i].__doc__}')\n return\n\n cmdstr = 'Commands: '\n for i in cmd_dict:\n cmdstr += '{}, '.format(i)\n await bot.send_message(c, cmdstr)", "def bot_help(bot, update, args):\n if not args:\n update.message.reply_text(helpmessages.mainHelp())\n elif args[0] == \"alert\":\n bot.sendMessage(update.message.chat_id, helpmessages.alertHelp())\n elif args[0] == \"fortune\":\n bot.sendMessage(update.message.chat_id, fortune.fortuneHelp())\n elif args[0] == \"quote\":\n bot.sendMessage(update.message.chat_id, helpmessages.quoteHelp())\n elif args[0] == \"bash\":\n bot.sendMessage(update.message.chat_id, helpmessages.bashHelp())\n else:\n update.message.reply_text(helpmessages.mainHelp())", "def test_help(self):\n rc, stdout, _, msg = OIM().request('--help')\n self.assertEqual(rc, 0, \"Bad return code when requesting help\\n%s\" % msg)\n self.assert_(re.search(r'[Uu]sage:', stdout), msg)", "def run_help(s, remainder):\n return SlackResponseText(\"help functionality is not currently implemented\")", "async def do_help(self, arg):\n if arg:\n if arg in self._commands:\n _, doc = self._commands[arg]\n print(doc)\n else:\n self.error('No command {}'.format(arg))\n else:\n print('Valid commands:')\n for c in self._commands:\n print(' ' + c)", "def test_handle_help(self):\r\n ret, code = self.testcommand.handle(\"project help\", user)\r\n self.assertEqual(ret, self.testcommand.get_help())\r\n self.assertEqual(code, 200)", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def cmd_help(args):", "def help():\n return statement(help_text)", "def test_handle_help(self):\n ret, code = self.testcommand.handle(\"team help\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)", "def parse_help_command(args):\r\n if len(args) < 2:\r\n print_generic_help()\r\n elif len(args) == 2:\r\n print_specific_help(args[1])\r\n else:\r\n print 'Error! Found too many arguments for --help! Use --help with ' \\\r\n '0 arguments to print generic help or type --help <tool_name> ' \\\r\n 'to get tool specific help!'", "def help():\n print(UI.HELP)", "def _get_help_string(self, action):\n helptext = action.help\n if '%(default)' not in action.help:\n if action.default != '==SUPPRESS==' and action.default:\n # defaulting_nargs = ['?', '*']\n # if action.nargs in defaulting_nargs:\n helptext += colored(' [default: %(default)s]', 'cyan')\n return helptext", "def help(self, dummy):\r\n help = self.doc + \"\\n\"\r\n if help.find(\"%s\") > 0:\r\n help = help.replace(\"%s\", self.progname)\r\n print_function(help, end='', file=self.stdout)\r\n self.exit(0)", "def help_args():\n pass", "def test_createExplicitHelp(self):\n self.assertSuccessStatus(self._makeConfig(None), [\"create\", \"--help\"])\n self.assertSpacelessEqual(self._createHelpText, sys.stdout.getvalue())", "def test_help(self):\n bin_path = \"tools/drake_visualizer\"\n self.assertTrue(isfile(bin_path), bin_path)\n text = subprocess.check_output([bin_path, \"--help\"], encoding=\"utf8\")\n\n # N.B. This should be kept in sync with\n # `drake_visualizer_installed_help_test`.\n print(text)\n # Test for nominal help string.\n self.assertIn(\"usage: drake-visualizer \", text)\n self.assertNotIn(\n \"drake-visualizer: error: unrecognized arguments\", text)\n # Test for modifications in help text.\n self.assertIn(\"--use_builtin_scripts\", text)\n self.assertIn(\"Options: all,\", text)", "def get_help(self) -> None: \n print(messages.get_help())", "def test_main_first_arg_help(capsys):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--help'])\n\n stdout, _ = capsys.readouterr()\n # argparse manipulates the help text (e.g. changes line wrap)\n # so it isn't trivial to compare the output to uflash._HELP_TEXT.\n expected = 'Flash Python onto the BBC micro:bit'\n assert expected in stdout", "def _handle_help_argument(self, arguments):\n for help_option in ['help', '--help', '-h']:\n if help_option in arguments:\n LOGGER.info(USAGE_INFORMATION)\n return exit(0)", "def _is_help(argv):\n if len(argv) == 0:\n return True\n return _HELP_RE.search(argv[0])", "def _display_help():\n if parameters[\"Command flavour\"] in (\"posix\", \"linux\"):\n print(\"usage: what [--debug] [--help|-?] [--version]\", file=sys.stderr)\n print(\" [-s] [--] file [...]\", file=sys.stderr)\n print(\n \" --------- ------------------------------------------\",\n file=sys.stderr\n )\n print(\" -s Quit after finding the first occurrence\", file=sys.stderr)\n print(\" of the pattern in each file\", file=sys.stderr)\n print(\" --debug Enable debug mode\", file=sys.stderr)\n print(\" --help|-? Print usage and this help message and exit\", file=sys.stderr)\n print(\" --version Print version and exit\", file=sys.stderr)\n print(\" -- Options processing terminator\", file=sys.stderr)\n else: # if parameters[\"Command flavour\"] in (\"PNU\", \"bsd\", \"bsd:freebsd\"):\n print(\"usage: what [--debug] [--help|-?] [--version]\", file=sys.stderr)\n print(\" [-qs] [--] [file ...]\", file=sys.stderr)\n print(\n \" --------- -----------------------------------------------------\",\n file=sys.stderr\n )\n print(\" -q Only output the match text, rather than formatting it\", file=sys.stderr)\n print(\" -s Stop searching each file after the first match\", file=sys.stderr)\n print(\" --debug Enable debug mode\", file=sys.stderr)\n print(\" --help|-? Print usage and this help message and exit\", file=sys.stderr)\n print(\" --version Print version and exit\", file=sys.stderr)\n print(\" -- Options processing terminator\", file=sys.stderr)\n print(file=sys.stderr)", "def help_option(args, run):\n pass", "def help(self, msg, status, desc):\n\n msg.Chat.SendMessage(HELP_TEXT)" ]
[ "0.65590817", "0.6279931", "0.6265416", "0.6258547", "0.6257743", "0.6243471", "0.61520076", "0.61436427", "0.61064446", "0.60910314", "0.60761446", "0.60735446", "0.6021295", "0.60164595", "0.60079336", "0.59912646", "0.5986868", "0.5982642", "0.59823585", "0.59813535", "0.5979993", "0.597318", "0.5972406", "0.59661627", "0.5965017", "0.5947603", "0.5931674", "0.59222865", "0.5917516", "0.5915732" ]
0.734195
0
Does any sequence in sequences have a reflection? A reflection is a fourcharacter sequence that is the same backward as forward and consists of two different characters.
def has_reflection(sequences): for sequence in sequences: for i in range(len(sequence) - 3): subseq = sequence[i:i + 4] if (len(Counter(subseq)) == 2) and (subseq == subseq[-1::-1]): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def issequence(obj) -> bool:\n return hasattr(type(obj), '__iter__') and hasattr(type(obj), '__len__')", "def is_reflective(self):\n return self._reflective", "def is_sequence(arg):\n return (not hasattr(arg, \"strip\") and\n hasattr(arg, \"__getitem__\") or\n hasattr(arg, \"__iter__\"))", "def _is_sequence(obj):\n return hasattr(obj, \"__iter__\") and not isinstance(obj, str)", "def _is_sequence_like(self, data):\n return hasattr(data, \"__iter__\") and hasattr(data, \"__getitem__\")", "def is_sequence(x):\n return (not hasattr(x, 'strip') and\n hasattr(x, '__getitem__') or\n hasattr(x, '__iter__'))", "def is_refseq(val):\n return refseq_regexp.match(val)", "def check_reflection(target_player, non_target_player):\n if not check_counter_spell(non_target_player) and not check_dispel_magic(non_target_player):\n if \"Magic Mirror\" in target_player.spell_to_cast:\n # magic mirror effective, reflection happened\n return non_target_player, target_player\n # no reflection happened\n return target_player, non_target_player", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def isSequence(obj):\n # type: (Any) -> bool\n return isinstance(obj, Sequence)", "def is_sequence(value):\n return (hasattr(value, \"__iter__\") and not\n isinstance(value, (six.string_types, six.binary_type)))", "def is_sequence(arg):\n\n # np.float{16,32,64} and np.int types have __getitem__ defined\n # this is a long-standing bug in NumPy and unlikely to be fixed\n # todo: backport to qmmlpack, write tests\n if isinstance(arg, (str, bytes, np.number, dict, set)):\n return False\n\n return hasattr(arg, \"__getitem__\") or hasattr(arg, \"__iter__\")", "def simple_reflections(self):\n res = self.alpha().zip(self.reflection, self.alphacheck())\n # Should we use rename to set a nice name for this family?\n res.rename(\"simple reflections\")\n return res", "def test_reflection_vector(self):\n\n # A ray approaching at 45 degrees\n v = vectors.Vector(1, -1, 0)\n n = vectors.Vector(0, 1, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 1, 0))\n\n # Ray along an axis hits a surface at an angle\n v = vectors.Vector(0, -1, 0)\n n = vectors.Vector(math.sqrt(2)/2, math.sqrt(2)/2, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 0, 0))", "def pred(self):\n return [ self.simple_reflection(i) for i in self.descents() ]", "def isSeq(act, resources):\n ok = True \n count = 0\n while (count < len(resources) and ok):\n ok = act.resources[count] <= resources[count]\n count += 1\n return ok", "def _try_match_reflection_enabler(self, node):\n\n found_crtp_template = False\n\n if node.kind == CursorKind.CXX_BASE_SPECIFIER:\n for c in node.get_children():\n GlobalLogger.warning().step('base spec child: {} [{}]'.format(c.spelling, c.kind))\n\n if c.kind == CursorKind.TEMPLATE_REF and c.spelling == 'Reflectable':\n found_crtp_template = True\n self.logger.info('Found Reflectable<T> base specifier in class ' + self.full_qualified_ref)\n\n if found_crtp_template:\n self.logger.info('Looking at {} (\\'{}\\' {})'.format(c.spelling, c.displayname, c.kind))\n\n if c.kind == CursorKind.TYPE_REF and c.referenced == self.cursor:\n return True;\n\n return False;", "def ISREF(value):\n return isinstance(value, Record)", "def is_record(buf, offset):\n\n if len(buf) < offset + 8:\n return False\n\n magic, size = struct.unpack_from(\"<II\", buf, offset)\n if magic != 0x00002a2a:\n return False\n\n if not (0x30 <= size <= 0x10000):\n return False\n\n if len(buf) < offset + size:\n return False\n\n size2 = struct.unpack_from(\"<I\", buf, offset + size - 4)[0]\n if size != size2:\n return False\n\n return True", "def is_legit_DNA_sequence(record_seq: str) -> bool:\n nts = {\"A\", \"G\", \"T\", \"C\", \"N\"}\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(nts)", "def isdependent(self, t):\n deptypes = set([k[0] for k in self.refined_types \\\n if not isinstance(k, basestring)])\n if isinstance(t, basestring):\n return t in deptypes\n if isinstance(t, Sequence):\n return self.isdependent(t[0])\n return False", "def is_assembly(cls, item: \"SeqFileTypes\") -> bool:\n if item in cls.list_assemblies(): return True;\n return False;", "def doesmatch(TheClass):\n import sys \n\n if sys.version_info.major < 3:\n return None\n S = TheClass.__base__\n for meth_name in dir(TheClass):\n if not hasattr(S, meth_name):\n continue\n meth = getattr(TheClass, meth_name)\n if(callable(meth)):\n try:\n match = (inspect.signature(meth) == inspect.signature(getattr(S,meth_name)))\n #assert(match)\n if not match:\n print(meth_name, ' : does not match parent signature', inspect.signature(meth) , inspect.signature(getattr(S,meth_name)))\n except ValueError:\n pass", "def _check_sequence(self) -> PossibleResult[T]:\n if isinstance(self.constructor_origin, type) and issubclass(\n self.constructor_origin, Sequence\n ):\n if not isinstance(self.obj, Sequence):\n raise DeserializeError(\n Sequence, self.obj, self.new_depth, self.key\n )\n if self.constructor_args:\n _arg = self.constructor_args[0]\n else:\n _arg = Any # type: ignore\n return self.constructor_origin(\n Deserialize(\n obj=value,\n constructor=_arg,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n ).run()\n for value in self.obj\n ) # type: ignore\n return NO_RESULT", "def isDisambiguatedByNextVerb(self, word):\n\t\treturn 'verb' in disambig_const.DISAMBIGUATATION_TABLE.get(word, {});", "def is_field(self, proof = True):\n return True", "def is_sequence(self) -> bool:\n return isinstance(self.yaml_node, yaml.SequenceNode)", "def is_seq_of(seq, expected_type, seq_type=None):\n if seq_type is None:\n exp_seq_type = abc.Sequence\n else:\n assert isinstance(seq_type, type)\n exp_seq_type = seq_type\n if not isinstance(seq, exp_seq_type):\n return False\n for item in seq:\n if not isinstance(item, expected_type):\n return False\n return True", "def isProbabilisticVector(vec):\n return sum(vec)==1" ]
[ "0.592695", "0.5832554", "0.5718492", "0.5712738", "0.5561228", "0.55578184", "0.5450076", "0.52724874", "0.5185765", "0.5170214", "0.5164834", "0.51240486", "0.5089455", "0.49864534", "0.498638", "0.4963209", "0.48805374", "0.4781474", "0.47128096", "0.46820468", "0.46809286", "0.46749455", "0.46582797", "0.4647536", "0.46368316", "0.45731738", "0.4572587", "0.4568459", "0.45578307", "0.45235407" ]
0.7524243
0
Return whether address is compatible with protocol.
def is_compatible(address, protocol=1): bracketed = [word.strip('[]') for word in re.findall('\[[^\]]*\]', address)] not_bracketed = re.split('\[[^\]]*?\]', address) if protocol == 1: if has_reflection(bracketed): return False return has_reflection(not_bracketed) elif protocol == 2: patterns = extract_protocol_patterns(bracketed) for pattern in patterns: inverse_pattern = pattern[1] + pattern[0] + pattern[1] if any([inverse_pattern in word for word in not_bracketed]): return True return False else: raise ValueError('unknown protocol')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_support_address(self, addr: int) -> bool:\n return (self.fpb_rev == 2) or (addr < 0x20000000)", "def isProtocolDefined(self) -> bool:\n ...", "def is_ip(address):\n try:\n socket.inet_pton(socket.AF_INET, address)\n except socket.error:\n try:\n socket.inet_pton(socket.AF_INET6, address)\n except socket.error:\n return False\n return True", "def OSSupportsIPv4(self) -> bool:", "def SupportsIPv4(self) -> bool:", "def is_server_address(\n address: str, additional_schemes: Iterable[str] = ()) -> bool:\n schemes = {\"http\", \"https\"}\n if additional_schemes:\n schemes.update(additional_schemes)\n try:\n pieces = urlparse(address)\n scheme = pieces.scheme.lower()\n return scheme in schemes and pieces.netloc is not None\n except Exception: # pylint: disable=broad-except\n return False", "def supports_protocol(obj, protocol):\n manager = get_global_adaptation_manager()\n return manager.supports_protocol(obj, protocol)", "def SupportsIPv6(self) -> bool:", "def supports_protocol(self, obj, protocol):\n\n return self.adapt(obj, protocol, None) is not None", "def OSSupportsIPv6(self) -> bool:", "def provides_protocol(type_, protocol):\n return issubclass(type_, protocol)", "def is_valid_ipv6_address(address):\n try:\n socket.inet_pton(socket.AF_INET6, address)\n except (socket.error, TypeError):\n return False\n return True", "def is_valid_ipv4_address(address):\n try:\n socket.inet_pton(socket.AF_INET, address)\n except AttributeError: # no inet_pton here, sorry\n try:\n socket.inet_aton(address)\n except socket.error:\n return False\n return address.count('.') == 3\n except socket.error: # not a valid address\n return False\n\n return True", "def is_ipv6(addr):\n try:\n socket.inet_pton(socket.AF_INET6, addr)\n return True\n except socket.error:\n return False", "def is_valid_ipv6_address(address):\n try:\n socket.inet_pton(socket.AF_INET6, address)\n except socket.error: # not a valid address\n return False\n return True", "def is_valid_ip_address(address):\n return Convert.is_valid_ipv6_address(\n address) or Convert.is_valid_ipv4_address(address)", "def is_ip_addr(addr: str, strict: bool = True) -> bool:\n\n try:\n ipaddress.ip_network(addr, strict=strict)\n return True\n except ValueError:\n return False", "def has_internet() -> bool:\n if public_address():\n return True\n else:\n return False", "def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False", "def is_valid_address(self, address):\n assert isinstance(address, tuple), type(address)\n assert len(address) == 2, len(address)\n assert isinstance(address[0], str), type(address[0])\n assert isinstance(address[1], int), type(address[1])\n\n if address[0] == \"\":\n return False\n\n if address[0] == \"0.0.0.0\":\n return False\n\n if address[1] <= 0:\n return False\n\n try:\n binary = inet_aton(address[0])\n except socket_error:\n return False\n\n # ending with .0\n#Niels: is now allowed, subnet mask magic call actually allow for this\n# if binary[3] == \"\\x00\":\n# return False\n\n # ending with .255\n if binary[3] == \"\\xff\":\n return False\n\n return True", "def protocol_available(self, module_id: str) -> bool:\n return self.protocol_loaded(module_id) or ZeroBot.module.module_available(module_id, \"protocol\")", "def is_valid_address(address) -> bool:\n if not address.startswith('one1'):\n return False\n hrp, _ = bech32_decode(address)\n if not hrp:\n return False\n return True", "def is_supported(self) -> bool:\n\n # TODO logging ?\n # TODO ICMP error if ttl is zero\n return self._version == 4 and self._ihl >= 5 and self._ttl != 0", "def has_compatible_scheme(url):\n return url.startswith(('http://', 'https://'))", "def is_valid_ip(address):\n return is_valid_ipv4_address(address) or is_valid_ipv6_address(address)", "def is_valid_ipv4_address(address):\n # inet_aton accepts also 2.2.2\n if address.count('.') != 3:\n return False\n # filter out addresses with unexpected characters, like 1.2x2.2.2\n if any(char not in '0123456789.' for char in address):\n return False\n # inet_pton is available only on some platforms, but\n # inet_aton is less restrictive (this is why we need checks above)\n try:\n socket.inet_aton(address)\n except (socket.error, TypeError):\n return False\n return True", "def is_actual_ip(self, ip_addr):\n try:\n socket.inet_aton(ip_addr)\n return True\n except socket.error:\n return False", "def proto_check(proto):\n # Check for TCP\n if proto == 6:\n return 'tcp'\n # Check for UDP\n elif proto == 17:\n return 'udp'\n else:\n return None", "def is_valid_ipv4_address(address):\n invalid_list = ['0.0.0.0','255.255.255.255']\n try:\n ip = ipaddress.IPv4Address(address)\n if (ip.is_reserved) or (ip.is_multicast) or (ip.is_loopback) or (address in invalid_list):\n return False\n except ipaddress.AddressValueError:\n return False\n\n return True", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP" ]
[ "0.6820964", "0.6668771", "0.66564524", "0.66512966", "0.66328156", "0.64613605", "0.64490527", "0.64391613", "0.64211375", "0.6321145", "0.62663627", "0.6232519", "0.62187594", "0.62100095", "0.6193499", "0.617714", "0.6138549", "0.6138365", "0.6137218", "0.6131304", "0.611822", "0.61088777", "0.6091859", "0.60750717", "0.60510355", "0.60307705", "0.5981736", "0.596916", "0.59204245", "0.5892011" ]
0.7616966
0
Load a list of addresses from a file.
def load_addresses(): with open('addresses.txt') as f: return [address.strip() for address in f.readlines()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_in_address_file(file):\n address_list = list()\n lines = 0\n valid_ips = 0\n with file as f:\n for n in file:\n lines += 1\n if validate_ip(n.strip()):\n address_list.append(n.strip())\n valid_ips += 1\n if valid_ips < lines:\n print(\"Of the {} lines in the file you supplied, only {} were valid. The latter will be used to call the \"\n \"API.\".format(lines, valid_ips))\n if valid_ips == 0:\n print(\"Please supply a valid IP address.\")\n address_list = None\n return address_list", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def __init__(self, filedir='.', filename='.address_list'):\n self.addresses = []\n self.filedir = os.path.expanduser(filedir)\n self.filename = filename\n\n path = os.path.join(os.path.realpath(self.filedir), self.filename)\n if os.path.isdir(path):\n raise IOError(\"Invalid address_list file. File is a directory.\")\n if os.path.exists(path):\n with open(path, 'r') as address_file:\n lines = [line.strip() for line in address_file if line]\n self.addresses = list(map(_addressitem_from_line, lines))", "def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()", "def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst", "def readHouseAddresses():\n addressesRead = []\n with open(\"Files/HouseAddresses.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n details = line.split(\",\")\n address = []\n for detail in details:\n address.append(detail.rstrip('\\n').rstrip().lstrip())\n addressesRead.append(address)\n f.close()\n return addressesRead", "def loadList(file_name):\n with open(file_name) as f:\n l = [line.strip() for line in f]\n return l", "def load_people(self, file_path):\n pass", "def loadNetworkFromFile(self, file):\r\n for line in open(file, 'r'):\r\n fromVertex, toVertex, capacity = map(int, line.split())\r\n self.addEdge(fromVertex, toVertex, capacity)", "def load_file(self, file_path):\n with open(file_path, \"r\") as mappings_file:\n for raw_line in mappings_file:\n line = raw_line.split()\n # Add new record to the records dictionary.\n new_record = Record(line[0], line[1], line[2], line[3])\n self.add_record(new_record)", "def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()", "def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()", "def load_url_list(url_list_file):\n url_list = []\n with open(url_list_file, 'r') as f:\n for eachline in f:\n eachline = eachline.rstrip('\\n')\n parts = eachline.split('\\t')\n domain, script_url = parts\n url_list.append((domain, script_url))\n\n return url_list", "def load_ports_from_file(self, filename):\n lines = open(filename, \"r\").readlines()\n for line in lines:\n name, port = line.split(\":\")\n self.servers[name] = port.rstrip()", "def readLocations():\n locationsRead = []\n\n # Parallel reading from address_file and locations_file\n with open(\"Files/PublicPlaces.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n details = line.split(\",\")\n address = []\n for detail in details:\n address.append(detail.rstrip('\\n').rstrip().lstrip())\n locationsRead.append(address)\n f.close()\n return locationsRead", "def load(self, filename):\n\n with open(filename) as f:\n\n for val in f:\n\n val = val.strip().split(\"#\", 1)[0]\n\n if val == '':\n continue\n\n val = int(val, 2)\n self.ram[self.address] = val\n self.address += 1\n\n if len(sys.argv) != 2:\n print(\"Expected Usage: ls8.py [filename-to-run]\")\n sys.exit(1)\n\n if ValueError:\n pass", "def read_addresses(fileobj, count):\n if PY3:\n r = array.array(ADDR_CHAR)\n b = fileobj.read(ADDR_SIZE * count)\n r.frombytes(b)\n else:\n r = [struct.unpack(ADDR_CHAR, fileobj.read(ADDR_SIZE))[0] \\\n for i in range(count)]\n return r", "def get_address_from_file():\n with open(address_file_path) as fp:\n new_address = fp.readline()\n return new_address", "def loadMaxIPlist(self, filename):\r\n #I need to put this in a try/catch block later \r\n \r\n maxIPlist=10\r\n linecount=0 \r\n iplist=[]\r\n with open(filename, 'r') as infile:\r\n element = infile.readline()\r\n while element:\r\n \r\n linecount +=1\r\n if linecount < maxIPlist:\r\n iplist.append(element)\r\n element = infile.readline()\r\n \r\n self.objdict['IPADDRESS']=iplist\r\n print(\"Loaded \", linecount, \" ip addresses\")\r\n\r\n return(linecount)", "def loadListFromFile (filename):\n retval = []\n filename = os.path.expanduser (filename)\n if not os.path.exists (filename):\n print(\"Error: file '%s' does not exist.\"%(filename))\n raise RuntimeError(\"Bad filename\")\n source = open (filename, 'r') \n for line in source.readlines():\n line = re.sub (r'#.+$', '', line) # remove comment characters\n line = line.strip()\n if len (line):\n retval.append (line)\n source.close()\n return retval", "def load(self, file_):\n\n program = []\n\n with open(f'ls8/examples/{file_}') as f:\n for line in f:\n line = line.split(\"#\")\n try:\n v = int(line[0], 2)\n program.append(v)\n except ValueError:\n continue\n\n for instruction in program:\n self.ram_write(instruction, self.address)\n self.address += 1\n\n self.address = 0", "def load(filename):\n\n owners = []\n\n address_manager = REGISTRY['address_manager']\n error_manager = REGISTRY['error_manager']\n\n reader = csv.reader(open(filename, 'rU'))\n try:\n headers = reader.next() # Text column headers\n except StopIteration:\n syslog.syslog('merge.py: Empty file %s' % filename)\n return owners\n\n if len(headers) != 11:\n raise InvalidInput('Property Owners file should have ' +\n 'exactly 11 columns. Found %d.' % len(headers))\n\n for line in reader:\n property_owner = PropertyOwner(line)\n if address_manager.is_in_strathcona(property_owner.civic):\n owners.append(property_owner)\n else:\n error_manager.add(property_owner, \n 'Not in Strathcona or invalid address')\n\n owners.sort(key=operator.attrgetter('folio'))\n\n return owners", "def loadFromFile(self, filename):\n\t\treturn []", "def load_server_list(filename):\n if not os.path.isfile(filename):\n return #ignore this error for now\n fo=open(filename,\"r\")\n rd=fo.read()\n fo.close()\n __load_server_list(rd)", "def load_map(path):\n file = open(path + '.txt', 'r')\n data = file.read().split('\\n')\n game_map = []\n file.close()\n for row in data:\n game_map.append(list(row))\n return game_map", "def load_file(filename):\n with open(filename, \"r\") as f:\n return f.readlines()", "def load(self):\n\t\tpath = input(\"Please enter the file name, with full path if needed, to load hosts and open ports -> \")\n\t\ttry:\n\t\t\tf = open(path)\n\t\t\tlines = f.read()\n\t\t\tentries = lines.split('\\n\\n')\n\t\t\tfor entry in entries:\n\t\t\t\thost_ports = entry.split('\\n')\n\t\t\t\tif host_ports[0] not in self.host_and_ports.keys():\n\t\t\t\t\tself.host_and_ports[host_ports[0]] = []\n\t\t\t\thost = host_ports[0]\n\t\t\t\t# if blank line read in as a host for some reason, skip it\n\t\t\t\tif host == \"\": continue\n\t\t\t\t# remove host from host ports as we've now accounted for it\n\t\t\t\tports=host_ports[1:]\n\t\t\t\t# add all ports for this host to list, pointed to by host, host acting as a key\n\t\t\t\tfor p in ports:\n\t\t\t\t\tif str(p) not in self.host_and_ports[host]:\n\t\t\t\t\t\tself.host_and_ports[host].append(int(p))\n\t\t\tprint(self.host_and_ports)\n\t\texcept e:\n\t\t\tprint(e)", "def load(self, file):\n reader = Par2FileReader(file)\n self.readers.append(reader)\n logger.info(\"Found {} packets\".format(len(reader)))\n self._read_packets(reader)", "def read_from_file(self, filename):\n with open(filename, 'r') as f:\n for line in f.read().splitlines():\n name, neighbours, r_table = line.split('!')\n\n self.add_new(name)\n if neighbours:\n for neighbour in neighbours.split(';'):\n try:\n self.add_neighbours(name, neighbour)\n except Exception as e:\n\n pass\n if r_table:\n for network in r_table.split(';'):\n net_name, distance = network.split(':')\n\n distance = int(distance)\n self.add_network(name, net_name, distance)", "def read_ip(ip_file):\n with open(ip_file, \"r\") as file:\n address = ordered_load(file)\n \"\"\"address is a dict type object\"\"\"\n # print address\n address = address['node']\n\n available_ip = []\n \"\"\"Get all the ip addresses\"\"\"\n for i in address:\n print i\n for ip in address[i]:\n available_ip.append(ip)\n # print address[i]\n # print address['block1']\n return available_ip" ]
[ "0.68453926", "0.66718036", "0.6595332", "0.65375346", "0.65279084", "0.6232124", "0.6221131", "0.6218062", "0.62075657", "0.6184122", "0.61824334", "0.61824334", "0.61778766", "0.61719495", "0.6160394", "0.6155133", "0.61548156", "0.61479515", "0.609529", "0.6087289", "0.6080476", "0.6072938", "0.6021395", "0.60132694", "0.59996486", "0.599911", "0.59776807", "0.5954593", "0.595203", "0.59176254" ]
0.82936203
0
Method fetches the GPS coordinates for a particular address using the TomTom API
def geo(address): API_PRIVATE = os.environ.get("TOM_TOM_PRIVATE") encoded = urllib.parse.quote(address) query ='https://api.tomtom.com/search/2/geocode/' + str(encoded) + \ '.json?limit=1&countrySet=US&lat=42&lon=-72&topLeft=42.886%2C%20-73.508&btmRight=41.237%2C-69.928&key=' \ + API_PRIVATE response = requests.get(query) while True: try: jsonResponse = response.json() break except: response = requests.get(query) latit = 0 longit = 0 for address in jsonResponse['results']: latit = address['position']['lat'] longit = address['position']['lon'] return latit, longit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_coords(self, address):\n while True:\n try:\n location = self.geolocator.geocode(address) \n break\n except:\n time.sleep(20)\n\n try:\n latitude = location.latitude\n longitude = location.longitude\n except:\n latitude = np.nan\n longitude = np.nan\n\n return((latitude, longitude))", "def __getCoordinates(self,address):\n\n # extremely rudimentary error handling\n\n try:\n xmlout = self.__openURL(address)\n except:\n return\n\n try:\n xmldoc = xml.dom.minidom.parseString(xmlout)\n except:\n return\n\n if self.debug: print xmlout\n\n # <Status><code> of 200 is good\n code = xmldoc.getElementsByTagName('code')\n if self.debug: sys.stderr.write(\"Code = '%s'\" % code[0].childNodes[0].data)\n if code[0].childNodes[0].data != '200':\n return\n\n coord = xmldoc.getElementsByTagName('coordinates')\n try:\n self.long, self.lat, self.ele = \\\n coord[0].childNodes[0].data.split(',')\n except:\n return\n\n if self.debug:\n sys.stderr.write(\"Lat = %s, Long = %s\"%(self.lat,self.long))\n \n return", "def get_coordinates_from_address(address):\n \n geolocator = Nominatim(user_agent=\"NAIP\")\n location = geolocator.geocode(address)\n print('Retrieving location for address:\\n{}'.format(location.address))\n return location.latitude, location.longitude", "def get_lat_long(address):\n url = \"https://maps.googleapis.com/maps/api/geocode/json\"\n params = {'address':address,'key':'AIzaSyBVZhQwm7GZViRzTCuH1VBvMdIpLMwvfT4'}\n req = requests.get(url,params=params)\n stat = req.status_code\n latitude = req.json()['results'][0]['geometry']['location']['lat']\n longitude = req.json()['results'][0]['geometry']['location']['lng']\n return latitude, longitude", "def getCoord(address):\n\n start_time = time.time()\n gmaps = GoogleMaps(os.getenv(\"GOOGLE_KEY\"))\n geocode_result = gmaps.geocode(address)\n\n data = {'long': {}, 'short': {}, 'location': {}}\n\n if geocode_result:\n for geolist in geocode_result:\n for item in geolist['address_components']:\n for category in item['types']:\n data['long'].update([(category, item['long_name'])])\n data['short'].update([(category, item['short_name'])])\n data['location'] = geolist['geometry']['location']\n else:\n e = serializers.ValidationError('GeoCoding Error: Could not parse coordinates')\n capture_message(e)\n raise e\n\n print('--- Tiempo de ejecucion getCoord: {} segundos ---'.format((time.time() - start_time)))\n\n return data", "def get(self, address):\n response = hereService.getLatLongs(address)\n return response", "def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos", "def create_locs(address):\r\n geolocator = Nominatim(user_agent = 'SF_Parking_EDA')\r\n try:\r\n location = geolocator.geocode(address, timeout = 10)\r\n except:\r\n location = None\r\n time.sleep(1)\r\n\r\n if location != None and check_location(location):\r\n return (location.latitude, location.longitude )\r\n else:\r\n return None", "def get_location_data(address=None):\n params = {\n 'address': address,\n 'sensor': 'false'\n }\n\n # Do the request and get the response data\n req = requests.get(GOOGLE_MAPS_API_URL, params=params)\n res = req.json()\n\n # Use the first result\n result = res['results'][0]\n\n try:\n geodata = dict()\n geodata['Latitude'] = result['geometry']['location']['lat']\n geodata['Longitude'] = result['geometry']['location']['lng']\n return geodata\n except Exception:\n return None", "def get_geocode(self, address):\n\n try:\n raw_data = self.__get_raw_data(address)\n except (URLError, ValueError):\n return 503, None\n else:\n code, coords = self.__parse_raw_data(raw_data)\n return code, coords", "def geocoding(address):\n AUTH = json.loads(open(\"auth.json\", \"r\").read())\n\n r = requests.get(f\"https://maps.googleapis.com/maps/api/geocode/json\", params={\n \"address\": address,\n \"key\": AUTH[\"GMAP_API\"]\n })\n\n if r.status_code == 200:\n r = r.json()\n results = r[\"results\"]\n if len(results) < 1:\n log.error(\"No result geocoding for %s\", address)\n return (-1, -1)\n\n result = results[0]\n proper_address = result[\"formatted_address\"]\n loc = result[\"geometry\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n\n return (proper_address, lat, lng)\n\n else:\n log.error(\"Error in Geocoding %s\", address)\n return (-1, -1)", "def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']", "def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df", "def get_location(coordinates):\n location_info = gmaps.reverse_geocode(latlng=coordinates)\n location_list = list()\n for location in location_info:\n if \"locality\" in location[\"types\"]:\n return location[\"formatted_address\"]\n # location_list.append(location[\"formatted_address\"])\n # return location_list", "def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata", "def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y", "def rlis_geocode(addr_str, token):\n\n url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n params = {\n 'token': token,\n 'input': addr_str,\n 'form': 'json'\n }\n rsp = requests.get(url, params=params)\n\n if rsp.status_code != 200:\n return -1, -1, -1\n else:\n json_rsp = rsp.json()\n if json_rsp['error']:\n return -1, -1, -1\n else:\n return json_rsp['data'][0]['lat'], json_rsp['data'][0]['lng'], json_rsp['data'][0]['fullAddress']", "def fetch_address(address):\n geo_location = {\"latitude\": None, \"longitude\": None}\n logger.info(\"Querying for address '%s'\", address)\n\n response = requests.get(GOOGLE_URL, params={'address': address})\n time.sleep(1)\n\n try:\n body = response.json()\n except:\n logger.error(response.status_code)\n logger.error(response.text)\n return geo_location\n\n if body['status'] == 'OK':\n if len(body['results']) == 1:\n if 'geometry' in body['results'][0]:\n location = body['results'][0]['geometry']['location']\n geo_location['latitude'] = location['lat']\n geo_location['longitude'] = location['lng']\n else:\n logger.warning(\"Address is ambiguous!\")\n elif body['status'] == 'ZERO_RESULTS':\n logger.debug(\"Unknown address.\")\n elif body['status'] == 'OVER_QUERY_LIMIT':\n logger.warning(body)\n else:\n logger.error(\"ERROR: status is unknown!\")\n logger.error(body)\n\n return geo_location", "def getLatAndLong(addr):\n\ttry:\n\t\tlocation = geolocator.geocode(addr, timeout = 2)\n\n\t\tprint (location.latitude, location.longitude)\n\t\treturn (location.latitude, location.longitude)\n\texcept Exception, e:\n\t\tprint e\n\t\treturn None", "def geocode(address):\n\n mapsurl = ('http://maps.googleapis.com/maps/api/geocode/xml?address=' +\n address.replace(' ', '+') + '&sensor=false')\n\n coords = urllib.urlopen(mapsurl).read()\n root = etree.fromstring(coords)\n coordstr = (0, 0)\n loc = root.find(\".//location\")\n if not loc is None:\n coordstr = (loc[1].text, loc[0].text)\n return coordstr", "def geocode(addr_str):\n\n\tbase_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n\turl_template = '{0}?token={1}&input={2}&form=json'\n\turl = url_template.format(base_url, token, addr_str)\n\tresponse = requests.get(url)\n\n\tif response.status_code != 200:\n\t\tprint 'unable to establish connection with rlis api'\n\t\tprint 'status code is: {0}'.format(response.status_code)\n\t\treturn response.status_code\n\t\n\tjson_rsp = response.json()\n\tif json_rsp['error']:\n\t\tprint 'the following address could not be geocoded:'\n\t\tprint '\\'{0}\\''.format(addr_str)\n\t\tprint 'the following error message was returned:'\n\t\tprint '\\'{0}\\''.format(json_rsp['error']), '\\n'\n\telse:\n\t\treturn json_rsp['data'][0]", "def get_lat_lng(apiKey,address):\n \n url = ('https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}'\n .format(address.replace(' ','+'), apiKey))\n try:\n response = requests.get(url)\n resp_json_payload = response.json()\n lat = resp_json_payload['results'][0]['geometry']['location']['lat']\n lng = resp_json_payload['results'][0]['geometry']['location']['lng']\n except:\n print('ERROR: {}'.format(address))\n lat = 0\n lng = 0\n return lat, lng", "def get_location(self):\n # h = b'\\r\\nAT-MSGEO\\r\\r\\n-MSGEO: -3936,3464,-3612,7402d50c\\r\\n\\r\\n'\n # an example of the string returned from the AT-MSGEO used for testing.\n h = self.acquire_response(b'AT-MSGEO')\n if isinstance(h, bytes):\n h = h.decode('utf-8')\n h = h.strip()\n h = h.split(':')\n h = h[1].split(',')\n x = int(h[0])*1000 # Convert coordinates to meters.\n y = int(h[1])*1000\n z = int(h[2])*1000\n else:\n print('Location not available')\n\n # 'geocent' refers to the geo-centered frame that the co-ordinates are returned in\n inProj = Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n\n # 'latlong' is the frame to be converted to\n outProj = Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n\n # Convert X, Y, Z to latitude, longitude and altitude\n long, lat, alt = transform(inProj, outProj, x, y, z, radians=False)\n # l = [str(long), str(lat), str(alt)]\n return long, lat, alt", "def address2latlon(addr):\n key = \"AjVyhHv7lq__hT5_XLZ8jU0WbQpUIEUhQ7_nlHDw9NlcID9jRJDYLSSkIQmuQJ82\" # quota de 125 000 requêtes/année\n # b = geocoder.bing([lat, lon], key=key)\n g = geocoder.bing(addr, key=key)\n #g = geocoder.google(addr)\n gjson = g.json\n timeout = time.time() + 7\n while gjson is None: # Redo until we have a response\n g = geocoder.google(addr)\n gjson = g.json\n if time.time() > timeout: # if google can't find the address after a certain amount of time\n sys.exit(\"Google ne trouve pas cette adresse, veuillez réessayer\")\n return g.latlng", "def get_address_and_parking():\n client = MongoClient()\n\n address = request.get_json()['address']\n time = request.get_json()['time']\n\n address_data = []\n space_data = []\n\n try:\n user_point = geo_functions.geocode_address(address)\n closeBlocks = geo_functions.findCloseBlocks(user_point[\"coordinates\"], 200 , client)\n except ValueError as e:\n return jsonify({\"message\": e.message}), 400\n\n address_data.append({\"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": user_point[\"coordinates\"]},\n \"properties\": {\n \"cleanAddress\": user_point[\"address\"]\n }})\n\n blockCoords = geo_functions.findBlockCoordinates(closeBlocks, client)\n space_data.extend(blockCoords)\n\n try:\n space_data = geo_functions.getBlockAvailability(space_data, time, client)\n except ValueError as e:\n return jsonify({\"message\": e.message}), 400\n\n mapping_data = {\"address_data\": address_data,\n \"space_data\" : space_data}\n\n client.close()\n\n return jsonify(mapping_data)", "def reverseGeo(latit, longit):\n API_PRIVATE = os.environ.get(\"TOM_TOM_PRIVATE\")\n query = 'https://api.tomtom.com/search/2/reverseGeocode/'+str(latit)+'%2C%20' +str(longit)+\\\n '.json?returnSpeedLimit=false&heading=0&radius=50&number=0&returnRoadUse=false&key=' + API_PRIVATE\n response = requests.get(query)\n while True:\n try:\n jsonResponse = response.json()\n break\n except:\n response = requests.get(query)\n\n cur_address = ''\n\n for address in jsonResponse['addresses']:\n cur_address = address['address']['freeformAddress']\n return cur_address", "def get_address_from_coord(lat, lon, proxy):\n address = \"\"\n try:\n # build url query to gmap reverse geocoding\n urlquery = u'http://maps.googleapis.com/maps/api/geocode/xml?latlng=%f,%f&sensor=false' % (lat, lon)\n gmap_response = requests.get(urlquery, proxies=proxy)\n gmap_xml = ET.fromstring(gmap_response.text.encode('utf-8'))\n\n # get address (first result)\n first_result = gmap_xml.find(u'result/formatted_address')\n if not (first_result is None):\n address = first_result.text\n except Exception as e:\n print(str(e))\n pass\n finally:\n return address", "def test_query_google(self):\n google_api = LocationData()\n latLng = google_api.getLatLong(test_address)\n self.assertEqual(latLng['lat'], 32.625849)", "def get_apartment_latlng(self, soup, apartment_dict):\n import googlemaps\n from datetime import datetime\n\n gmaps = googlemaps.Client(key='AIzaSyBxV4EAXU1aMLGU9bnokygGL92c2BxDzCE')\n\n # Geocoding an address\n geocode_result = gmaps.geocode(apartment_dict['address'])\n\n if len(geocode_result) > 0:\n # Store lat and lng\n apartment_dict['lat'] = geocode_result[0]['geometry']['location']['lat']\n apartment_dict['lng'] = geocode_result[0]['geometry']['location']['lng']\n else:\n print(\"Failed to find lat and lng values\")", "def getLatLng(zipcode=22207) -> (float, float):\n r = requests.get(f\"https://geocode.xyz/{zipcode}?json=1\")\n data = r.json()\n lat = data.get('latt')\n lng = data.get('longt')\n return lat, lng" ]
[ "0.7102267", "0.7014812", "0.6976603", "0.6896242", "0.68889475", "0.68035966", "0.66808176", "0.66728675", "0.6567192", "0.6561787", "0.6561166", "0.65074974", "0.64575374", "0.6441019", "0.6414824", "0.63909835", "0.6352897", "0.634464", "0.6343922", "0.6325966", "0.6294006", "0.627892", "0.6275722", "0.62111694", "0.6162645", "0.612713", "0.60983914", "0.6068852", "0.6012342", "0.6005279" ]
0.78567797
0
Search through a course query set for the given query text.
def search_courses(courses, query): return courses.annotate( course_id=Concat('subject', Value(' '), 'course_number', Value(' '), 'section', output_field=CharField()), ).annotate(rank=Case( When( course_id__istartswith=query, then=1 ), When( title__icontains=query, then=2 ), When( instructor__icontains=query, then=3 ), default=0, output_field=IntegerField() )).filter(rank__gt=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(query_string):", "def search_courses(self,terms):\n\n return self.course_search.search_for(terms)", "def find_matching_course_indexes(self, query):\r\n return self.course_index.find(query)", "def search(self, query_string):\n terms = query_string.lower().split()\n result = set(self.wordDict[terms[0]])\n if len(result) == 0:\n return list()\n else:\n for t in terms[2:]:\n records_containing_t = self.wordDict[t]\n result = result.intersection(records_containing_t)\n return [self.get_record_dict(id).getTuple() for id in result]", "def course_query(self, term, **kwargs):\n data = {\n 'course_number': kwargs.get('course_number', ''),\n 'subject': kwargs.get('subject', ''),\n 'instructor': kwargs.get('instructor', ''),\n 'course_start_eval': 'After', # todo verify vs 'at'\n 'course_start_time': kwargs.get('start', '-'), # todo parse arg into correct time\n 'course_end_eval': 'Before', # todo verify vs 'at'\n 'course_end_time': kwargs.get('end', '-'), # todo parse arg into correct time,\n 'course_level': kwargs.get('level', '-'),\n 'course_units': kwargs.get('units', '-'),\n 'course_status': 'ALL',\n 'sortBy': '',\n 'showMe': '',\n 'runMe': '1',\n 'clearMe': '1',\n 'termCode': term.code,\n 'expandFilters': ''\n }\n try:\n r = self.post(self.COURSE_SEARCH_ENDPOINT, data=data)\n results = json.loads(r.text)['Results'] # {'COLUMNS': [...], 'DATA': [[col1_data, ...], ...}\n except KeyError:\n r = self.post(self.COURSE_SEARCH_ENDPOINT, data=data)\n results = json.loads(r.text)['Results']\n\n nrml_course_responses = self._normalize_course_query_response(results)\n\n courses = [self._course_from_query_response(term, resp) for resp in nrml_course_responses]\n return courses", "def search(self, query_id, query_str):\n pass", "def search(self, query):", "def search(self, query):\n logger.debug('Performing search for: '+query)\n write_textfield('queryString', query+\"\\n\", check=False)\n self.waitForLoaderToDisappear()", "def search(text):\n s = Search()\n result = _search(s, text)\n _print_results(result)\n return result", "def get_course_by_key_words(input):", "def search(self, term):", "def search_courses():\n current_user = view_helpers.get_current_user()\n courses, has_more = m.Course.search(flask.request.values, current_user)\n\n course_dicts, user_course_dicts, _ = (\n m.Course.get_course_and_user_course_dicts(courses, current_user))\n\n return api_util.jsonify({\n 'courses': course_dicts,\n 'user_courses': user_course_dicts,\n 'has_more': has_more,\n })", "def search(self, query_string):\n return models.Thread.objects.filter(\n reduce(\n lambda q, f: q & Q(title__icontains=f),\n query_string.split(),\n Q()))", "def search(self, query):\n return self._search_provider.search(self._normalize_query(query))", "def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def search(self, q):\n self.__query = q\n self.scrape_page()", "def _search(client, search_string):\n if search_string is None:\n logger.info(uxstring.UxString.list_all, fg=\"green\")\n\n current_page = 0\n total_pages = get_search_results(client, search_string, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n next_page = get_next_page(prompt_resp, current_page)\n if next_page == -1:\n model_id = prompt_resp\n display_search_info(client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n elif next_page != current_page:\n get_search_results(client, search_string, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def search(self, query, maxhits=100):", "def search(self, text: str, category: str = None):\n if category:\n # ensuring that it will be in lowercase\n category = category.lower()\n\n if not category or not category in self.search_categories:\n category = \"all\"\n\n search_url = f\"{SITE_URL}/{self.search_categories[category]}/?adb.search={text}\"\n\n # return answer.text\n return self.fetch_url(search_url)", "def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)", "def search(self, query: str) -> \"QuerySet\":\n if not query:\n return self # Ignore the search if it's an empty sting\n try:\n fields: List[\n Union[Tuple[str, str], str]\n ] = self.model.SEARCH_FIELDS # type: ignore\n except AttributeError:\n fields = []\n try:\n combined_fields: Dict[str, Sequence] = self.model.SEARCH_COMBINED_FIELDS # type: ignore\n except AttributeError:\n combined_fields = {}\n conditions: List = []\n queryset: \"QuerySet\" = self\n if combined_fields:\n annotations = {}\n for name, combined_field in combined_fields.items():\n concat = []\n for item in combined_field:\n concat += [item, Value(\" \")]\n print(concat)\n annotations[name] = Concat(*concat, output_field=CharField())\n queryset = self.annotate(**annotations) # type: ignore\n conditions += [\n Q(**{f\"{field}__icontains\": query})\n for field in fields + list(combined_fields.keys())\n ]\n if conditions:\n return queryset.filter(reduce(lambda x, y: x | y, conditions)).distinct()\n return self.none() # type: ignore", "def search(self, **kwargs):\n return keyword_search(self._rq_list, **kwargs)", "async def search(self, ctx: Context, category: str, *, query: str) -> None:\n if category not in config.basic_search_categories:\n await ctx.send(f\"Invalid Category! ```Available Categories : {', '.join(config.basic_search_categories)}```\")\n return\n await self._basic_search(ctx, query, category)", "def search(self, query=None):\n\n self.visual.log(\"Starting search\")\n if self.search_invoke_counter > 0:\n # step to the starting history to search everything\n self.reset_history()\n search_done = False\n just_began_search = True\n query_supplied = bool(query)\n\n ttr = TimedThreadRunner(self.search_for_entry, \"\")\n # ttr.set_delay(1, self.visual.log, \"delaying search execution...\")\n\n while True:\n # get new search object, if it's a continued search OR no pre-given query\n if not just_began_search or (just_began_search and not query_supplied):\n search_done, new_query = self.visual.receive_search()\n self.visual.log(\"Got: [{}] [{}]\".format(search_done, new_query))\n if search_done is None:\n # pressed ESC\n self.visual.message(\"Aborting search\")\n return\n if new_query == \"\" and search_done:\n # pressed enter\n self.visual.message(\"Concluded search\")\n break\n # got an actual query item\n # if query content is updated, reset the timer\n query = new_query\n\n query = query.lower().strip()\n # ttr.reset_time(query)\n # self.visual.log(\"Got query: {}\".format(query))\n # ttr.update_args(query)\n # ttr.start()\n # ttr.stop()\n # results_ids = ttr.get_result()\n results_ids = self.search_for_entry(query)\n # results_ids = []\n just_began_search = False\n self.search_invoke_counter += 1\n if not self.visual.does_incremental_search:\n break\n\n if not query:\n # no search was performed\n return\n # push the reflist modification to history\n self.change_history(results_ids, \"search:\\\"{}\\\"\".format(query))", "def find_courses_by_search_target(self, field_name, field_value):\n entries = self.find_matching_course_indexes(\n search_targets={field_name: field_value}\n )\n return [\n CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic\n for entry in entries\n ]", "def search(self, search_params):\n if self.db.is_data_set():\n return self.db.search(search_params)\n else:\n self.crawler.initialize()\n # return self.db.search(search_params)", "def search(request):\n if 'q' in request.GET:\n term = request.GET['q']\n story_list = Story.objects.filter(Q(title__contains=term)|Q(markdown_content__contains=term))\n heading = \"Search results\"\n return render_to_response(\"cms/story_list.html\",locals())", "def search_general(abe, q):\n def process(row):\n (name, code3) = row\n return { 'name': name + ' (' + code3 + ')',\n 'uri': 'chain/' + str(name) }\n ret = map(process, abe.store.selectall(\"\"\"\n SELECT chain_name, chain_code3\n FROM chain\n WHERE UPPER(chain_name) LIKE '%' || ? || '%'\n OR UPPER(chain_code3) LIKE '%' || ? || '%'\n \"\"\", (q.upper(), q.upper())))\n return ret", "def get(self, query_text):\n\n with open(FILEPATH, encoding='utf-8') as f:\n lines = f.readlines()\n\n new_search_result = models.SearchResult(query_text=query_text)\n\n occurrence_object_list = []\n\n for line in lines:\n line_index = lines.index(line)\n\n for m in re.finditer(re.escape(query_text), line, re.M|re.I):\n\n text_start = m.start()\n text_end = m.end()\n\n #Initial params for second part of sentence\n second_part = ''\n boundary_index = None\n line_count = 1\n search_line = line[text_start:].replace('\"', \"'\")\n\n #intial params for first part of sentence\n first_part = ''\n boundary_index_rev = None\n line_count_rev = -1\n search_line_rev = line[:text_start].replace('\"', \"'\")\n\n while boundary_index == None or boundary_index_rev == None:\n # Forward Scan of query_text sentence until punctuation or \\n\n if boundary_index == None:\n if (\".\" not in search_line and\n \"?\" not in search_line and\n \"!\" not in search_line):\n\n second_part += search_line\n try:\n search_line = lines[line_index\n + line_count].replace('\"', \"'\")\n except IndexError:\n boundary_index = search_line.index(\n search_line[-1]\n )\n else:\n if search_line == \"\\n\":\n boundary_index = lines[line_index +\n line_count -1].index(\"\\n\")\n\n line_count += 1\n else:\n for punc in (\".\", \"!\", \"?\"):\n try:\n boundary_index = search_line.index(punc)\n except ValueError:\n continue\n try:\n #If last word is in quotes, grab quote after period\n if search_line[boundary_index + 1] == \"'\":\n add_quote_index = 2\n else:\n add_quote_index = 1\n except IndexError:\n add_quote_index = 0\n second_part += search_line[:boundary_index\n + add_quote_index]\n\n # Backwards Scan of query_text sentence until punctuation or \\n\n if boundary_index_rev == None:\n if (\".\" not in search_line_rev and\n \"?\" not in search_line_rev and\n \"!\" not in search_line_rev):\n first_part = search_line_rev + first_part\n\n if search_line_rev == \"\\n\":\n boundary_index_rev = search_line_rev.index(\"\\n\")\n\n elif line_index + line_count_rev >= 0:\n search_line_rev = lines[line_index\n + line_count_rev].replace('\"', \"'\")\n line_count_rev -= 1\n else:\n boundary_index_rev = search_line_rev.index(\n search_line_rev[0]\n )\n else:\n for punc in (\".\", \"!\", \"?\"):\n try:\n boundary_index_rev = search_line_rev.rindex(\n punc)\n except ValueError:\n continue\n first_part = (search_line_rev[boundary_index_rev+1:]\n + first_part)\n\n sentence = (first_part + second_part).replace('\\n', ' ').strip()\n\n occurrence_object_list.append(\n models.Occurrence(\n search_result = new_search_result,\n line = line_index + 1,\n start = text_start + 1,\n end = text_end + 1,\n in_sentence = sentence\n )\n )\n\n #Add occurrences to SearchResult\n setattr(new_search_result, 'occurrences', occurrence_object_list)\n new_search_result.set_num_of_occurrences()\n response = marshal(new_search_result, search_fields)\n return jsonify(response)", "def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass" ]
[ "0.6957833", "0.6722406", "0.65923154", "0.6582587", "0.65008897", "0.64552164", "0.6410639", "0.6144836", "0.61288846", "0.60934883", "0.6073968", "0.60084444", "0.5999161", "0.5854337", "0.5810443", "0.5810396", "0.5810294", "0.5807406", "0.5799478", "0.5748375", "0.5746335", "0.57287854", "0.5716998", "0.5672063", "0.5646376", "0.5630494", "0.56276155", "0.5614247", "0.5598559", "0.55958277" ]
0.68331397
1
Filter a course query set based on known filtering parameters.
def filter_courses(courses, params): if 'notFull' in params and params['notFull'] == 'true': courses = courses.filter( Q(enrollment__lt=F('max_enrollment')) | Q(max_enrollment=0) ) if 'distributions' in params: ds = int(params['distributions']) valid = [] if ds & 0x1: valid.append(0) if ds & 0x2: valid.append(1) if ds & 0x4: valid.append(2) if ds & 0x8: valid.append(3) courses = courses.filter(distribution__in=valid) return courses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_by_org(self):\n # Create a second course to be filtered out of queries.\n alternate_course = self.create_course(\n org=md5(self.course.org.encode('utf-8')).hexdigest()\n )\n\n assert alternate_course.org != self.course.org\n\n # No filtering.\n unfiltered_courses = self._make_api_call(self.staff_user, self.staff_user)\n for org in [self.course.org, alternate_course.org]:\n assert any((course.org == org) for course in unfiltered_courses)\n\n # With filtering.\n filtered_courses = self._make_api_call(self.staff_user, self.staff_user, org=self.course.org)\n assert all((course.org == self.course.org) for course in filtered_courses)", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n customer = request.GET.get('customer', None)\n company = request.GET.get('company', None)\n engineer = request.GET.get('engineer', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if engineer:\n items = items.filter(lead__sales_engineer=engineer).distinct()\n if customer:\n items = items.filter(lead__customer=customer).distinct()\n if company:\n items = items.filter(company=company).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(erp_reference__icontains=str)).distinct()\n return items", "def filter(self, *args, **kwargs):\n self._not_support_combined_queries(\"filter\")\n return self._filter_or_exclude(False, args, kwargs)", "def filter(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n from jetengine.query_builder.transform import validate_fields\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QNot, QCombination)):\n if self._filters:\n self._filters = self._filters & arguments[0]\n else:\n self._filters = arguments[0]\n else:\n validate_fields(self.__klass__, kwargs)\n if self._filters:\n self._filters = self._filters & Q(**kwargs)\n else:\n if arguments and len(arguments) == 1 and isinstance(arguments[0], dict):\n self._filters = Q(arguments[0])\n else:\n self._filters = Q(**kwargs)\n\n return self", "def filter_by(cls, **kwargs):\n return cls.query.filter_by(**kwargs)", "def filter_by_query_params(self, request):\n items = self\n company = request.GET.get('company', None)\n main_contractor = request.GET.get('main_contractor', None)\n main_sub_contractor = request.GET.get('main_sub_contractor', None)\n client = request.GET.get('client', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n # filter\n if main_contractor:\n items = items.filter(main_contractor=main_contractor).distinct()\n if main_sub_contractor:\n items = items.filter(main_sub_contractor=main_sub_contractor).distinct()\n if client:\n items = items.filter(client=client).distinct()\n if company:\n items = items.filter(companies_linked__in=[company]).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(name__icontains=str)).distinct()\n return items", "def get_courses_by_query(self, query: str, *filters: str, \n year=None) -> List[Course]:\n\n url = self._URL + \"search\"\n\n payload = {\n \"view\": \"xml-20200810\",\n \"filter-coursestatus-Active\": \"on\",\n \"q\": query,\n }\n payload.update({f: \"on\" for f in filters})\n if year:\n payload.update({\"academicYear\": year.replace('-', '')})\n\n res = self._session.get(url, params=payload)\n\n root = ET.fromstring(res.content)\n courses = root.findall(\".//course\")\n\n return [Course(course) for course in courses]", "def filter_query(self, query):\n\n if self.state:\n query = query.filter(self.model_class.state == self.state)\n if self.term:\n term = '%{}%'.format(self.term)\n query = query.filter(\n or_(\n *[column.ilike(term) for column in self.term_columns]\n )\n )\n if self.user_ids:\n query = query.filter(self.model_class.user_id.in_(self.user_ids))\n if self.group_ids:\n query = query.filter(self.model_class.group_id.in_(self.group_ids))\n if self.issues:\n query = query.filter(self.model_class._issues.has_any(self.issues))\n if self.categories:\n query = query.filter(\n self.model_class._categories.has_any(self.categories)\n )\n if self.organizations:\n query = query.filter(\n self.model_class._organizations.has_any(self.organizations)\n )\n\n return query", "def filter():\n\n course = request.args['course-filter']\n\n # Get relevant recipes\n get_recipes = mongo.db.recipes.find({'course': {'$regex': course}})\n\n count_recipes = mongo.db.recipes.count_documents({'course':\n {'$regex': course}})\n\n if course == 'All':\n flash('Here are our all of our recipes:', 'success')\n return redirect(url_for('index'))\n # If there are no recipes with the selected course\n elif count_recipes == 0:\n flash('There are currently no ' + course + ' recipes', 'danger')\n return redirect(url_for('index'))\n else:\n flash('Here are our ' + course + ' recipes:', 'success')\n return render_template('filter.html', title=course + ' Recipes',\n recipes=get_recipes)", "def filter_query(self, request, query, view):\n\n if not request.params:\n return query\n\n querystring_params = self.parse_query_string(request.params)\n query, filter_list = self.build_filter_list(querystring_params, query, view)\n\n return self.apply_filter(query, filter_list)", "def filters(self, institute_id, category=\"snv\"):\n filters_res = self.filter_collection.find(\n {\"institute_id\": institute_id, \"category\": category}\n )\n\n return filters_res", "def filter_courses(original_courses_list, year, upper_bound, lower_bound, semester):\n filtered_courses_list = []\n\n for course in original_courses_list:\n if year is not None and course.year != year:\n continue\n if upper_bound is not None and course.grade > upper_bound:\n continue\n if lower_bound is not None and course.grade < lower_bound:\n continue\n if semester is not None and course.semester != semester:\n continue\n filtered_courses_list.append(course)\n\n return filtered_courses_list", "def filter_queryset(self, queryset):\n for backend in list(self.filter_backends):\n queryset = backend().filter_queryset(self.request, queryset, self)\n return queryset", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n customer = request.GET.get('customer', None)\n quote = request.GET.get('quote', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if customer:\n items = items.filter(customer=customer).distinct()\n if quote:\n items = items.filter(quote=quote).distinct()\n\n return items", "def filter(self, filters):", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n if applicable_filters:\n queryset = queryset.filter(applicable_filters)\n if applicable_exclusions:\n queryset = queryset.exclude(applicable_exclusions)\n return queryset", "def default_filters(self, query) -> object:\n assignment_id = self.request.matchdict.get('assignment_id')\n if assignment_id:\n query.filter(self.model.assignment_id == assignment_id)\n return query", "def filter(cls, *args, **kwargs) -> models.QuerySet:\n return cls.objects.filter(*args, **kwargs)", "def satisfying_courses(self):\n return (\n Course.objects.all()\n .exclude(id__in=self.overrides.all())\n .filter(\n Q(department__in=self.departments.all(), semester=self.semester)\n | Q(id__in=self.courses.all())\n )\n )", "def filter_query(self, query, request, resource):\n raise NotImplementedError()", "def filter(self, filterstring):\n if filterstring not in self.FILTERS:\n raise ValueError(\"{dataset} must be in {d for d in self.DATASETS}\")\n self.query[\"filter\"] = filterstring\n return self", "def get_filter_subset(cls, params, rel=None):\n # Determine names of filters from query params and remove empty values.\n # param names that traverse relations are translated to just the local\n # filter names. eg, `author__username` => `author`. Empty values are\n # removed, as they indicate an unknown field eg, author__foobar__isnull\n filter_names = {cls.get_param_filter_name(param, rel) for param in params}\n filter_names = {f for f in filter_names if f is not None}\n return OrderedDict(\n (k, v) for k, v in cls.base_filters.items() if k in filter_names\n )", "def filter(self, request, queryset): # NOQA: A003\n pro = request.GET.get(\"production\")\n\n if pro:\n queryset = queryset.filter(production=pro)\n\n queryset = queryset.prefetch_related(\"production\")\n\n return queryset", "def filter_queryset(self, queryset):\n query_params = self.request.query_params\n # validate query parameters\n exception_response = ParamsCheck.validate(\n query_params, APIParams.products_list_params\n )\n if exception_response:\n return exception_response\n\n products_qs = self.get_queryset() # all\n\n category = query_params.get(\"category\", None)\n exclude_ingredients = query_params.get(\"exclude_ingredient\", None)\n exclude_ingredients = self._clean_string(exclude_ingredients)\n include_ingredients = query_params.get(\"include_ingredient\", None)\n include_ingredients = self._clean_string(include_ingredients)\n\n # filtering part\n if category is not None:\n products_qs = products_qs.filter(category=category)\n for each in include_ingredients:\n products_qs = products_qs.filter(ingredients__name=each)\n for each in exclude_ingredients:\n products_qs = products_qs.exclude(ingredients__name=each)\n\n return products_qs", "def filter(self, *args, **kwargs):\n self._expand_pk(kwargs)\n return self._filter_or_exclude(False, *args, **kwargs)", "def step_filter(self, qs):\n return qs", "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "def filter(self, *args, **kwargs):\n return FilteredQuery(self, F(*args, **kwargs))" ]
[ "0.6403235", "0.6289507", "0.6289507", "0.61878574", "0.6167096", "0.6147818", "0.6120054", "0.6090686", "0.6059228", "0.6019451", "0.5984955", "0.595956", "0.5954526", "0.5939324", "0.5930389", "0.5916537", "0.5906486", "0.58574575", "0.5847235", "0.58464557", "0.5833112", "0.5831829", "0.5830601", "0.58207303", "0.5811344", "0.580825", "0.5803983", "0.5798867", "0.57852703", "0.5771717" ]
0.69457674
0
Get path to freshclam
def get_freshclam_path(module): try: freshclam_binary = module.get_bin_path('freshclam') if freshclam_binary.endswith('freshclam'): return freshclam_binary except AttributeError: module.fail_json(msg='Error: Could not find path to freshclam binary. Make sure freshclam is installed.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flatpath(cam):\n return os.path.join(BASEPATH, cam + \"_flats\")", "def darkpath(cam):\n return os.path.join(BASEPATH, cam + \"_dark\")", "def get_ocio_path():\n bl_path = os.getcwd()\n version = f'{bpy.app.version[0]}' + '.' + f'{bpy.app.version[1]}'\n cs_folder = os.path.join(bl_path, version, 'datafiles', 'colormanagement')\n\n return os.path.join(cs_folder, 'config.ocio')", "def _get_cora_filepath():\n # type: () -> Tuple[str, str]\n cache_root = download.get_dataset_directory(_root)\n feat_cache_path = os.path.join(cache_root, feat_file_name)\n edge_cache_path = os.path.join(cache_root, edge_file_name)\n return feat_cache_path, edge_cache_path", "def update_freshclam(module, freshclam_binary):\n rc_code, out, err = module.run_command(\"%s\" % (freshclam_binary))\n return rc_code, out, err", "def path(self):\r\n return self.chroot", "def path(self):\r\n return self.chroot", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def _get_default_path(self):\n # return os.path.join(datasets.ROOT_DIR, 'data', 'MSRC21')\n # set local path\n return u'/Users/danilonunes/workspace/datasets/msrc21/'", "def get_installation_path():\n file_abs_path = os.path.abspath(__file__)\n real_file_abs_path = os.path.realpath(file_abs_path)\n return real_file_abs_path[:real_file_abs_path.find('/node')]", "def get_cache_path(self):", "def get_cache_path(self):", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def pathtofolder():\n return os.getcwd()", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def cache_path(self):", "def cache_path(self):", "def find_rocm_path():\n if \"ROCM_PATH\" in os.environ:\n return os.environ[\"ROCM_PATH\"]\n cmd = [\"which\", \"hipcc\"]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (out, _) = proc.communicate()\n out = out.decode(\"utf-8\").strip()\n if proc.returncode == 0:\n return os.path.realpath(os.path.join(out, \"../..\"))\n rocm_path = \"/opt/rocm\"\n if os.path.exists(os.path.join(rocm_path, \"bin/hipcc\")):\n return rocm_path\n raise RuntimeError(\"Cannot find ROCm path\")", "def get_root():\n\n return 'data/simulators/mg1'", "def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path", "def get_fmu_file_path(self):\n return self.fmu_file", "def path(self):\n return self.chroot", "def seed(path):\n return os.path.join(os.path.split(os.path.realpath(__file__))[0], path)", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def path_to_calib_dir_default(self):\n if cp.instr_dir .value() is None : return None\n if cp.instr_name.value() is None : return None\n if cp.exp_name .value() is None : return None\n return cp.instr_dir.value() + '/' + cp.instr_name.value() + '/' + cp.exp_name.value() + '/calib'\n #return os.path.join(cp.instr_dir.value(), cp.instr_name.value(), cp.exp_name.value(), 'calib')", "def path_to_calib_dir_custom(self):\n return cp.calib_dir.value()", "def localPath(self):\n return self.home", "def rmfriend_dir():\n return Path.home() / '.rmfriend'", "def masterPath(self):\n\t\treturn fl.File( self._path + '/master.data' )" ]
[ "0.6257656", "0.6125458", "0.59306973", "0.58662236", "0.5794049", "0.5657319", "0.5657319", "0.5646315", "0.5645382", "0.5630027", "0.5627743", "0.5627743", "0.56236285", "0.5618213", "0.5597625", "0.55891645", "0.5585807", "0.5585807", "0.55688757", "0.55400866", "0.55293", "0.552807", "0.55272996", "0.5525762", "0.5525537", "0.550681", "0.549792", "0.54829305", "0.5477329", "0.5476256" ]
0.796606
0
Run freshclam to update ClamAV signatures
def update_freshclam(module, freshclam_binary): rc_code, out, err = module.run_command("%s" % (freshclam_binary)) return rc_code, out, err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n module = AnsibleModule(\n argument_spec=dict(\n update=dict(type='bool', default=True),\n ),\n supports_check_mode=True\n )\n\n update = module.params['update']\n changed = False\n\n # Get path of freshclam\n freshclam = get_freshclam_path(module)\n\n # Update ClamAV signatures via freshclam\n if update:\n rc_code, out, err = update_freshclam(module, freshclam)\n\n # Create Ansible meta output\n if rc_code == 0:\n response = {'update': 'Successfully updated ClamAV signatures via freshclam.'}\n module.exit_json(changed=True, meta=response)\n\n if rc_code == 1:\n response = {'update': 'ClamAV signatures are already up to date.'}\n module.exit_json(changed=False, meta=response)\n\n if rc_code == 2:\n # make sure we catch stdout and stderr\n module.fail_json(msg='Error: ' + str(out) + str(err))", "async def addToFingerPrint(samples, sampleset=, allsampleset=\"all\", workspace=WORKSPACE, sid=, vcf_list=None, \nvcf_list_dir=, working_dir, crosscheck_batch_size, recreate_batch, bamcolname,\ntaiga_dataset, taiga_filename):\n bams = samples[bamcolname]\n bams[sid] = bams.index\n print('adding '+str(len(bams))+' new samples to the fingerprint')\n wm = dm.WorkspaceManager(workspace).disable_hound()\n \n # Create batch files listing all vcfs in fingerprints dir and upload to bucket\n # (NEW VERSION ONLY) will only needed if need to recreate batches\n if recreate_batch:\n if not vcf_list:\n vcf_list = gcp.lsFiles([vcf_list_dir])\n vcf_list = wm.get_samples()[\"fingerprint_vcf\"].tolist()\n batches = []\n for i, l in enumerate(range(0, len(vcf_list), crosscheck_batch_size)):\n f = open(working_dir + \"vcf_batch_\"+str(i), 'w')\n f.write(\"\\n\".join(vcf_list[l:l + crosscheck_batch_size]))\n f.close()\n batches.append(working_dir+\"vcf_batch_\"+str(i))\n gcp.cpFiles(batches, vcf_list_dir)\n\n # Upload sample sheet\n samples_df = pd.DataFrame()\n samples_df[[\"bam_filepath\", \"bai_filepath\", \"sample_id\",\n \"participant_id\"]] = bams[bamcolname + [sid, sid]].values\n samples_df = samples_df.set_index('sample_id')\n wm.upload_samples(samples_df, add_participant_samples=True)\n wm.update_sample_set(sampleset, samples_df.index)\n\n # Submit jobs \n submission_id = wm.create_submission(\"fingerprint_bam_with_liftover\", sampleset, \n 'sample_set', expression='this.samples')\n await terra.waitForSubmission(workspace, submission_id)\n\n #1.2 Crosscheck Fingerprint VCFs\n #Here we use Dalmation to run the crosscheck_vcfs workflow on Terra. \n # This workflow calls Picard CrosscheckFingerprints to compare the new \n # fingerprint vcfs to batches of existing fingerprint vcfs in fingerprints_dir\n # Create list with new vcfs and upload to bucket\n f = open(working_dir + sampleset, 'w')\n f.write(('\\n').join(wm.get_samples().loc[samples_df.index, 'fingerprints'].tolist()))\n f.close()\n gcp.cpFiles(working_dir + sampleset, vcf_list_dir)\n os.system('rm '+working_dir + sampleset)\n\n # Upload sample sheet\n if recreate_batch:\n sample_group_df = pd.DataFrame(data={\"entity:sample_group_id\" : batches, \"vcf_group\" : [vcf_list_dir + x for x in batches]}).set_index('entity:sample_group_id')\n else:\n sample_group_df = pd.DataFrame(data={\"entity:sample_group_id\" : [sampleset], \"vcf_group\" : [vcf_list_dir+sampleset]}).set_index('entity:sample_group_id')\n \n print(wm.get_entities('sample_group').index.tolist())\n wm.upload_entities(\"sample_group\", sample_group_df)\n try:\n wm.update_entity_set(\"sample_group\", set_id=allsampleset,\n entity_ids=wm.get_entities('sample_group').index)\n except:\n print(\"still can't update entitis, please upload directly from the file in ../temp.tsv\")\n #in case it does not work\n sample_group_df.to_csv(\"../temp.tsv\", sep='\\t')\n\n # Submit jobs\n conf = wm.get_config(\"crosscheck_vcfs\")\n conf['inputs']['crosscheck.run_crosscheck.vcf_second_input_file'] = '\"'+vcf_list_dir+sampleset+'\"'\n wm.update_config(conf)\n submission_id = wm.create_submission(\"crosscheck_vcfs\", allsampleset, \n 'sample_set',expression='this.samples')\n await terra.waitForSubmission(workspace, submission_id)\n\n #1.3 Update LOD matrix\n #Here we update the fingerprint LOD matrix on taiga with the new fingerprints\n # Generate matrix with LOD score for new fingerprint vcfs\n new_lod_list = []\n samples_df = wm.get_entities(\"sample_group\")['cross_checks_out'].tolist()\n for batch in samples_df:\n # could be pd concat\n df = pd.read_csv(batch, sep='\\t', comment='#')\n lod_mat = df.pivot(index=\"LEFT_SAMPLE\",\n columns=\"RIGHT_SAMPLE\", values=\"LOD_SCORE\")\n new_lod_list.append(lod_mat)\n new_lod_mat = pd.concat(new_lod_list)\n new_lod_mat.index.name = None\n new_lod_mat = new_lod_mat.T\n\n # Update LOD matrix ( have to update (A+a)*(B+b) = (AB)+(aB)+(Ab)+(ab))\n prev_lod_mat = tc.get(name=taiga_dataset,file=taiga_filename)\n new_ids = set(new_lod_mat.index)\n old_ids = set(prev_lod_mat.index) - set(new_ids)\n updated_lod_mat = pd.concat((prev_lod_mat.loc[old_ids,old_ids],\n new_lod_mat.loc[new_ids,old_ids]), axis=0)\n updated_lod_mat = pd.concat((updated_lod_mat.loc[new_ids.union(old_ids), old_ids], \n new_lod_mat.transpose().loc[new_ids.union(old_ids, new_ids)]), axis=1)\n updated_lod_mat.to_csv(working_dir+taiga_filename+'.csv')\n \n # Upload updated LOD matrix to Tiaga\n tc.update_dataset(dataset_permaname=taiga_dataset,\n changes_description=\"New bam fingerprints added for \"+sampleset,\n upload_files=[\n {\n \"path\": working_dir+taiga_filename+'.csv',\n \"name\": taiga_filename,\n \"format\": \"NumericMatrixCSV\",\n \"encoding\": \"utf-8\"\n }\n ],\n add_all_existing_files=True)\n\n # finding issues with the dataset\n v = updated_lod_mat.loc[new_ids]\n ref = tracker.getTracker()\n ref = ref.append(samples)\n should = {}\n print(\"\\n\\nsamples that should match but don't:\")\n for u in set(fbams.arxspan_id):\n res = v.loc[fbams[fbams.arxspan_id == u].index,\n ref[ref.arxspan_id == u].index.tolist()]\n for i, j in [(res.index[x], res.columns[y]) for x, y in np.argwhere(res.values < 100)]:\n print('__________________________')\n print(res.loc[i, j])\n print(i, ':', tuple(ref.loc[i, ['arxspan_id', 'version', 'datatype', 'participant_id']].values), j, ':', tuple(\n ref.loc[j, ['arxspan_id', 'version', 'datatype', 'participant_id', 'blacklist']]))\n \n print(\"\\n\\nsamples that shouldn't match but do\")\n previ = ''\n shouldnt = {}\n for i, j in [(v.index[x], v.columns[y]) for x, y in np.argwhere(v.values > 500)]:\n if i == j:\n continue\n if ref.loc[i]['participant_id'] == ref.loc[j]['participant_id']:\n continue\n if i != previ:\n if previ != '':\n shouldnt.update({'_'.join(ref.loc[previ, ['arxspan_id', 'version', 'datatype',\n 'participant_id', \n 'stripped_cell_line_name']].astype(str).values.tolist()): n})\n n = [tuple(ref.loc[j, ['arxspan_id', 'version', 'datatype',\n 'participant_id', 'stripped_cell_line_name']].values)]\n else:\n n.append(tuple(ref.loc[j, ['arxspan_id', 'version', 'datatype',\n 'participant_id', 'stripped_cell_line_name']].values))\n previ = i\n return updated_lod_mat, should, shouldnt", "def patch_sdk():", "def patch_sdk():", "def patch_sdk():", "def digest_final(self): # real signature unknown; restored from __doc__\n pass", "def Generar_Claves():\n salida=Keypp()\n savekey(salida)\n savecomp(salida)", "def init():\n\n return \"Welcome to SIX SIGMA, this api is only available to SIX SIGMA developers\"", "def test_standard_mfcc():\n run_framesync(MFCC)", "def initial_setup():\n\n if os.path.exists(cfg.ca_private_key_path()):\n pkey = _try_load_ca_private_key(cfg.ca_private_key_path())\n else:\n pkey = _generate_ca_private_key(cfg.ca_private_key_path())\n\n if os.path.exists(cfg.ca_cert_path()):\n _try_load_ca_cert(cfg.ca_cert_path())\n else:\n _generate_ca_cert(cfg.ca_cert_path(), pkey)", "def test_vmcp_02(self):\r\n signature = dict(signature='XX')\r\n with patch('os.path.exists', return_value=True):\r\n with patch('pybossa.vmcp.sign', return_value=signature):\r\n res = self.app.get('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n out = json.loads(res.data)\r\n assert res.status_code == 200, out\r\n assert out['signature'] == signature['signature'], out\r\n\r\n # Now with a post\r\n res = self.app.post('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n assert res.status_code == 405, res.status_code", "def update_cascs(app, casc_list):\n # To run this package from command line:\n # python -c 'from __init__ import start; start()'\n\n start = time.time()\n\n fy_obj_list = fiscal_years.get_cascs(casc_list)\n\n if __debug__:\n print(\"fy_obj_list:\\n{0}\".format(fy_obj_list))\n fy_obj_list = fiscal_years.parse_fiscal_years(app, fy_obj_list)\n update_casc_total_data(app)\n\n end = time.time()\n\n duration = end - start\n mins = int(duration/60)\n secs = duration % 60\n\n print('Total time: {} minutes and {} seconds'.format(mins, secs))\n\n if not fy_obj_list:\n print(\"\"\"\n\n ===========================================================================\n\n CASC update completed.\\n\"\"\")\n # exit(0)\n # print(\"WHY AM I HERE???\")\n # assert False, \"Should never get here!!!!\"\n # raise Exception(\"Something went wrong in full_hard_search()\")", "def upgrade_savefile(fn):\n\n if signing_keys is None:\n return\n\n atime = os.path.getatime(fn)\n mtime = os.path.getmtime(fn)\n\n with zipfile.ZipFile(fn, \"a\") as zf:\n\n if \"signatures\" in zf.namelist():\n return\n\n log = zf.read(\"log\")\n zf.writestr(\"signatures\", sign_data(log))\n\n os.utime(fn, (atime, mtime))", "async def test_clear_signature_list(\n image_config: ImageConfig, image_config_signed: ImageConfig\n):\n image_config_signed.clear_signature_list()\n assert not image_config_signed.get_signature_list()\n\n image_config.clear_signature_list()\n signatures_unsigned = image_config.get_signature_list()\n assert not signatures_unsigned", "def refresh(self):\n\n # The timestamp role does not have signed metadata about it; otherwise we\n # would need an infinite regress of metadata. Therefore, we use some\n # default, sane metadata about it.\n DEFAULT_TIMESTAMP_FILEINFO = {\n 'hashes':None,\n 'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH\n }\n\n # Update the top-level metadata. The _update_metadata_if_changed() and\n # _update_metadata() calls below do NOT perform an update if there\n # is insufficient trusted signatures for the specified metadata.\n # Raise 'tuf.NoWorkingMirrorError' if an update fails.\n\n # Use default but sane information for timestamp metadata, and do not\n # require strict checks on its required length.\n self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)\n\n self._update_metadata_if_changed('release', referenced_metadata='timestamp')\n\n self._update_metadata_if_changed('root')\n\n self._update_metadata_if_changed('targets')\n\n # Updated the top-level metadata (which all had valid signatures), however,\n # have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata\n # has expired.\n for metadata_role in ['timestamp', 'root', 'release', 'targets']:\n self._ensure_not_expired(metadata_role)", "def prep_mkv(logfile):\n try:\n logging.info(\"Updating MakeMKV key...\")\n update_cmd = \"/bin/bash /opt/arm/scripts/update_key.sh\"\n\n # if MAKEMKV_PERMA_KEY is populated\n if cfg.arm_config['MAKEMKV_PERMA_KEY'] is not None and cfg.arm_config['MAKEMKV_PERMA_KEY'] != \"\":\n logging.debug(\"MAKEMKV_PERMA_KEY populated, using that...\")\n # add MAKEMKV_PERMA_KEY as an argument to the command\n update_cmd = f\"{update_cmd} {cfg.arm_config['MAKEMKV_PERMA_KEY']}\"\n\n subprocess.run(f\"{update_cmd} >> {logfile}\", capture_output=True, shell=True, check=True)\n except subprocess.CalledProcessError as update_err:\n err = f\"Error updating MakeMKV key, return code: {update_err.returncode}\"\n logging.error(err)\n raise RuntimeError(err) from update_err", "def verify(self):\n self.verify_checksums()\n self.verify_apk_signature()\n self.verify_apk_signature_fprs()", "def digest_begin(self): # real signature unknown; restored from __doc__\n pass", "def update_hashes(self, ret=False) -> None:\n self.hash = self.calculate_hash()\n self.stub_hash = self.calculate_hash(include_md=False)", "def appprotect_setup(request, kube_apis, test_namespace) -> None:\n\n print(\"------------------------- Deploy logconf -----------------------------\")\n src_log_yaml = f\"{TEST_DATA}/ap-waf/logconf.yaml\"\n global log_name\n log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)\n\n print(\"------------------------- Create UserSig CRD resource-----------------------------\")\n usersig_name = create_ap_usersig_from_yaml(kube_apis.custom_objects, uds_crd_resource, test_namespace)\n\n print(f\"------------------------- Deploy dataguard-alarm appolicy ---------------------------\")\n src_pol_yaml = f\"{TEST_DATA}/ap-waf/{ap_policy_uds}.yaml\"\n global ap_pol_name\n ap_pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace)\n\n def fin():\n if request.config.getoption(\"--skip-fixture-teardown\") == \"no\":\n print(\"Clean up:\")\n delete_ap_policy(kube_apis.custom_objects, ap_pol_name, test_namespace)\n delete_ap_usersig(kube_apis.custom_objects, usersig_name, test_namespace)\n delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)\n\n request.addfinalizer(fin)", "async def _autopaired(self, new_user_apc_token: str):\n self._user_apc_token = new_user_apc_token\n self._fconfigure()", "def test_upload_manifest_twice(cidc_api, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n mocks = UploadMocks(\n monkeypatch,\n prismify_extra=PBMC_PATCH,\n )\n\n client = cidc_api.test_client()\n\n grant_upload_permission(user_id, \"pbmc\", cidc_api)\n make_nci_biobank_user(user_id, cidc_api)\n\n res = client.post(\n MANIFEST_UPLOAD,\n data=form_data(\n \"pbmc.xlsx\",\n io.BytesIO(b\"a\"),\n \"pbmc\",\n ),\n )\n assert res.status_code == 200\n\n # Check that we tried to publish a patient/sample update\n mocks.publish_patient_sample_update.assert_called_once()\n\n with cidc_api.app_context():\n assert not DownloadableFiles.list() # manifest is not stored\n\n # uploading second time\n res = client.post(\n MANIFEST_UPLOAD,\n data=form_data(\n \"pbmc.xlsx\",\n io.BytesIO(b\"a\"),\n \"pbmc\",\n ),\n )\n assert res.status_code == 200\n\n assert mocks.upload_xlsx.call_count == 0 # manifest is not stored\n\n with cidc_api.app_context():\n assert not DownloadableFiles.list() # manifest is not stored", "def _provision_package(self):", "def test_bossac_create_with_oldsdk(cc, req, get_cod_par, sup,\n\t\t\t\t runner_config, tmpdir):\n runner_config = adjust_runner_config(runner_config, tmpdir,\n DOTCONFIG_COND1)\n runner = BossacBinaryRunner(runner_config)\n with pytest.raises(RuntimeError) as rinfo:\n with patch('os.path.isfile', side_effect=os_path_isfile_patch):\n runner.run('flash')\n assert str(rinfo.value) == \"This version of BOSSA does not support the\" \\\n \" --offset flag. Please upgrade to a newer\" \\\n \" Zephyr SDK version >= 0.12.0.\"", "def update_libscapi(self):\n protocol_name = self.protocol_config['protocol']\n os.system('fab -f Execution/fabfile.py update_libscapi --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def __init__(self, test):\n global manifest_file\n global msl_data_file\n global msl_data_path\n global rsa_key_bin\n rsa_key_bin = 'rsa_manifest_medium.bin'\n msl_data_file = 'msl_data_manifest_medium.json'\n manifest_file = 'manifest_medium.json'\n if os.path.isfile(msl_data_path + rsa_key_bin):\n os.remove(msl_data_path + rsa_key_bin)\n if os.path.isfile(msl_data_path + msl_data_file):\n os.remove(msl_data_path + msl_data_file)\n if os.path.isfile(msl_data_path + manifest_file):\n os.remove(msl_data_path + manifest_file)\n else:\n try:\n os.mkdir(msl_data_path)\n except OSError:\n pass\n\n if self.file_exists(msl_data_path, msl_data_file):\n self._MSL__load_msl_data()\n self.handshake_performed = True\n else:\n if self.file_exists(msl_data_path, rsa_key_bin):\n self._MSL__load_rsa_keys()\n self._MSL__perform_key_handshake()\n else:\n print('Generating Device Keys...')\n self.rsa_key = RSA.generate(2048)\n self._MSL__save_rsa_keys()\n self._MSL__perform_key_handshake()", "def main():\n md5_pass = sys.argv[1]\n print brute_force_rec('f', md5_pass)", "def preCommitFixup(self):\n log_method_call(self, self.name)\n # UEFI firmware/bootloader cannot read 1.1 or 1.2 metadata arrays\n if getattr(self.format, \"mountpoint\", None) == \"/boot/efi\":\n self.metadataVersion = \"1.0\"", "def run_makefakedata(self):\n cl_mfd = self._build_MFD_command_line()\n\n check_ok = self.check_cached_data_okay_to_use(cl_mfd)\n if check_ok is False:\n utils.run_commandline(cl_mfd)\n if not np.all([os.path.isfile(f) for f in self.sftfilenames]):\n raise IOError(\n f\"It seems we successfully ran {self.mfd},\"\n f\" but did not get the expected SFT file path(s): {self.sftfilepath}.\"\n f\" What we have in the output directory '{self.outdir}' is:\"\n f\" {os.listdir(self.outdir)}\"\n )\n logger.info(f\"Successfully wrote SFTs to: {self.sftfilepath}\")\n logger.info(\"Now validating each SFT file...\")\n for sft in self.sftfilenames:\n lalpulsar.ValidateSFTFile(sft)", "def setUp(self):\n self.test_sig = Signature(agreement=self.test_agreement,\n signatory=self.test_user,\n username=self.test_user.username,\n first_name=self.test_user.first_name,\n last_name=self.test_user.last_name,\n email=self.test_user.email,\n department=self.test_department)\n self.test_sig.full_clean()\n self.test_sig.save()" ]
[ "0.6522567", "0.5180732", "0.4842134", "0.4842134", "0.4842134", "0.480723", "0.4806568", "0.46753028", "0.46636984", "0.46499807", "0.4631858", "0.45774838", "0.45397392", "0.4516316", "0.45130098", "0.45083612", "0.4504563", "0.44913578", "0.44790775", "0.44666895", "0.4463028", "0.44615135", "0.44578847", "0.44408107", "0.44253486", "0.44093665", "0.4402698", "0.4394834", "0.4393698", "0.43889922" ]
0.5935258
1
Splits a heightmap into a source and target. For placement, we just need the source heightmap.
def _split_heightmap(self, height): half = height.shape[1] // 2 self._half = half height_s = height[:, half:].copy() return height_s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regenerate_heightmap(self):\n\n for x in range(16):\n for z in range(16):\n column = x * 16 + z\n for y in range(255, -1, -1):\n if self.get_block((x, y, z)):\n break\n\n self.heightmap[column] = y", "def place_targets():\n\n \n coords = []\n while len(coords)<self.N_targets:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y)\n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords +=[p]\n self.coordinates__targets = coords", "def split_tiles(module_data):\n raise NotImplementedError", "def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk", "def __areas_to_pool(self, region_width, region_height, region_width_step, region_height_step):\n \n areas = [[(width_ind * region_width_step, height_ind * region_height_step,\n (width_ind + 1) * region_width_step if (width_ind + 1) < self.width else region_width,\n (height_ind + 1) * region_height_step if (height_ind + 1) < self.height else region_height) for width_ind in range(self.width)] for height_ind in range(self.height)]\n\n return areas", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def rechunking_plan(\n dim_sizes: Mapping[str, int],\n source_chunks: Mapping[str, int],\n target_chunks: Mapping[str, int],\n itemsize: int,\n max_mem: int,\n) -> List[Dict[str, int]]:\n plan_shapes = algorithm.rechunking_plan(\n shape=tuple(dim_sizes.values()),\n source_chunks=tuple(source_chunks[dim] for dim in dim_sizes),\n target_chunks=tuple(target_chunks[dim] for dim in dim_sizes),\n itemsize=itemsize,\n max_mem=max_mem,\n )\n return [dict(zip(dim_sizes.keys(), shapes)) for shapes in plan_shapes]", "def extract_field_blocks(self):\n t_start = time.time()\n\n scale_range = [self.source_range[0], self.source_range[1],\n self.dest_range[0], self.dest_range[1]]\n counter = 0\n\n for source_path, target_path in self.path_pairs:\n\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n block_id = os.path.split(os.path.dirname(target_path))[-1]\n\n try:\n\n result_warp = gdal.Warp(data_settings.TEMP_PATH,\n source_path,\n srcSRS=self.extract_block_projection(\n source_path),\n dstSRS='EPSG:3067',\n cutlineDSName=data_settings.BLOCK_SHAPEFILE_PATH,\n cutlineWhere=\"LOHKO = '{}'\".format(\n block_id),\n cropToCutline=True,\n xRes=self.target_resolution,\n yRes=self.target_resolution,\n srcNodata=self.source_nodata,\n dstNodata=np.nan,\n outputType=gdal.GDT_CFloat32,\n multithread=True)\n\n arr = result_warp.ReadAsArray()\n assert ~np.isnan(arr).all(), \"Warped image contains only NaNs\"\n\n except (RuntimeError, AttributeError, AssertionError) as ex:\n\n print(\"{}\\tError (warp): {}\".format(block_id, ex))\n print(\"\\t\\tFrom\\t{}\".format(source_path))\n print(\"\\t\\tTo\\t{}\".format(data_settings.TEMP_PATH))\n\n continue\n\n finally:\n\n result_warp = None\n\n try:\n\n result_translate = gdal.Translate(target_path,\n settings.TEMP_PATH,\n outputType=gdal.GDT_Float32,\n scaleParams=[scale_range],\n noData=np.nan)\n\n arr = result_translate.ReadAsArray()\n\n assert ~np.isnan(arr).all(\n ), \"Translated image contains only NaNs\"\n assert np.nanmin(arr) >= self.dest_range[0], \"Translated values below lower destination range {}, min={}\".format(\n self.dest_range[0], np.nanmin(arr))\n assert np.nanmax(arr) <= self.dest_range[1], \"Translated values above upper destination range {}, max={}\".format(\n self.dest_range[1], np.nanmax(arr))\n\n print(\"{}\\tFrom\\t{}\".format(block_id, source_path))\n print(\"\\t\\tTo\\t{}\".format(target_path))\n\n plt.rcParams['figure.figsize'] = 1, 1\n\n if len(arr.shape) >= 3:\n\n plt.imshow(arr[:3].transpose(1, 2, 0))\n\n else:\n\n plt.imshow(arr, cmap='gray', vmin=0, vmax=1)\n\n plt.axis('off')\n plt.show()\n\n except (RuntimeError, AttributeError, AssertionError) as ex:\n\n print(\"{}\\tError (translate): {}\".format(block_id, ex))\n print(\"\\t\\tFrom\\t{}\".format(data_settings.TEMP_PATH))\n print(\"\\t\\tTo\\t{}\".format(target_path))\n\n finally:\n\n result_translate = None\n\n counter += 1\n\n t_delta = time.time() - t_start\n print(\"Processed {} field blocks in {:.0f}m {:.0f}s\".format(\n counter, t_delta // 60, t_delta % 60))", "def split_data_set(reddit_path, data_set_name, on, num_splits, target_directories, map_columns=None):\n targets = {}\n for i in range(num_splits):\n targets[i] = os.path.join(target_directories[i], data_set_name)\n mkdir(targets[i])\n\n full_sub_data_path = os.path.join(reddit_path, data_set_name)\n data_files = map(lambda f: os.path.join(full_sub_data_path, f), os.listdir(full_sub_data_path))\n args_list = [(on, table_file, targets, num_splits, map_columns) for table_file in data_files]\n\n pool = mp.Pool(pool_size)\n pool.map(unpack_split_file_with_map, args_list)", "def split_edges(self, maximum_distance):\n \"\"\" Iterate through the vertices of each section. For each vertex v, evaluate edges for which v is a source.\n If an edge of weight greater than maximum_distance, then split it. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='splitting edges')\n current_section = [] # Need to update the section data after splitting the edges.\n for source in self.sections[section_id]:\n current_section.append(source)\n edges_to_remove = [] # If an edge is split, it will need to be removed.\n for edge in self.graph.get_out_edges(source):\n if self.edge_weights[edge] > maximum_distance:\n target = edge[1] # edge is a numpy array of [source, target, edge]. Select target.\n edges_to_remove.append(self.graph.edge(edge[0], edge[\n 1])) # If an edge is split, the original edge should be removed.\n\n new_edge_count = int(math.ceil(self.edge_weights[edge] / maximum_distance))\n new_edge_distance = self.edge_weights[edge] / new_edge_count\n current_point = shapes.Point.from_list(\n list(self.node_locations[source]) + [self.node_heading[target]])\n previous_vertex = source\n for _ in range(new_edge_count):\n current_point = utils.offset_point(current_point, new_edge_distance, current_point.bearing)\n current_vertex = self.graph.add_vertex()\n current_section.append(current_vertex) # The new vertex becomes a part of the section.\n \"\"\" Populate the property map for the new vertex. Inherit values from the target node,\n unless the target node is a junction node. Then inherit values from the source. \"\"\"\n self.node_locations[current_vertex] = current_point.as_list()\n self.node_heading[current_vertex] = current_point.bearing\n property_vertex = source if not self.junctions[target] else target\n self.node_speed_limit[current_vertex] = self.node_speed_limit[property_vertex]\n self.node_width[current_vertex] = self.node_width[property_vertex]\n self.node_id[current_vertex] = self.node_id[property_vertex]\n\n \"\"\" Create an edge between the previous vertex and the newly created vertex, \n and update the edge weight property map. \"\"\"\n current_edge = self.graph.add_edge(previous_vertex, current_vertex)\n self.edge_weights[current_edge] = new_edge_distance\n\n # The current vertex becomes the previous vertex in the next step.\n previous_vertex = current_vertex\n\n \"\"\" Create an edge between the last new vertex that was created and the target of the\n original edge which is being split, and update the property map. \"\"\"\n self.edge_weights[self.graph.add_edge(previous_vertex, target)] = new_edge_distance\n list(map(self.graph.remove_edge, edges_to_remove)) # Remove all relevant edges\n self.sections[section_id] = current_section # Update the section with the new vertices", "def get_heightmap(robot,\n heightmap_resolution,\n workspace_limits):\n color_img_set, depth_img_set = robot.get_camera_data()\n depth_img_set = depth_img_set * robot.cam_depth_scale # Apply depth scale from calibration\n color_heightmap, depth_heightmap = utils.get_heightmap(color_img_set, depth_img_set,\n robot.cam_intrinsics,\n robot.cam_pose, workspace_limits,\n heightmap_resolution)\n depth_heightmap[np.isnan(depth_heightmap)] = 0\n kernel = np.ones([3, 3])\n color_heightmap = cv2.dilate(color_heightmap, kernel, iterations=2)\n color_heightmap = cv2.erode(color_heightmap, kernel, iterations=2)\n valid_depth_heightmap = cv2.dilate(depth_heightmap, kernel, iterations=2)\n valid_depth_heightmap = cv2.erode(valid_depth_heightmap, kernel, iterations=2)\n return color_heightmap, valid_depth_heightmap", "def blit(src, src_start, dst, dst_start, width, height, depth):\n # print(\"blit\", src_start, \"to\", dst_start, \"for\", width, height, depth)\n # src_start and dst_start assumed to be (x,y,z) tuples\n src_base = src_start[1]*src.plane_span+src_start[2]*src.row_span+src_start[0]\n dst_base = dst_start[1]*dst.plane_span+dst_start[2]*dst.row_span+dst_start[0]\n\n map = []\n map_ext = [None]*10\n\n for y in range(height):\n src_idx = src_base\n dst_idx = dst_base\n src_base += src.plane_span\n dst_base += dst.plane_span\n for z in range(depth):\n for x in range(width):\n blk = src.blocks[src_idx+x]\n while blk > len(map):\n map.extend(map_ext)\n\n if map[blk] is None:\n map[blk] = block_state_index(dst.palette, **src.palette[blk])\n\n dst.blocks[dst_idx+x] = map[blk]\n\n src_idx += src.row_span\n dst_idx += dst.row_span", "def get_cropped_heightmap(state,heightmap):\r\n map_x, map_y = state[:2]\r\n th = state[2]\r\n windowsize = 100\r\n\r\n #OK, to properly get the heightmap, we need to 1. translate to vehicle origin. 2. Rotate by theta 3. translate by -windowsize/2 to center\r\n HTM_trans = np.array([[1., 0., map_x], [0., 1., map_y], [0., 0., 1.]])\r\n HTM_rot = np.array([[cos(th), -sin(th), 0.], [sin(th), cos(th), 0.], [0., 0., 1.]])\r\n HTM_center = np.array([[1., 0., -windowsize//2], [0., 1., -windowsize//2], [0., 0., 1.]])\r\n HTM = np.matmul(HTM_trans, np.matmul(HTM_rot, HTM_center))\r\n heightmap_tr = skimage.transform.warp(heightmap, ProjectiveTransform(matrix=HTM))\r\n heightmap_out = heightmap_tr[:windowsize, :windowsize]\r\n\r\n return heightmap_out", "def reshape(self,bottom,top):\n pass", "def split_file_with_map(on, file_path, targets, num_splits, map_columns=None):\n file_name = os.path.split(file_path)[1]\n logger.debug(\"Loading: %s\" % file_name)\n df = pd.read_csv(file_path)\n\n def split():\n logger.debug(\"Splitting: %s\" % file_name)\n file_targets = {i: os.path.join(targets[i], file_name) for i in targets}\n split_data_frame(df, on, lambda x: hash(x) % num_splits, file_targets)\n\n def dump():\n if map_columns is not None:\n logger.debug(\"Dumping col. map \\\"%s\\\" to Redis: %s\" % (map_columns[0], file_name))\n redis_db = redis.StrictRedis(connection_pool=redis_pool)\n d = dict(zip(df[map_columns[0]], df[map_columns[1]]))\n dump_dict_to_redis(redis_db, d)\n\n # do these two tasks in a random order for load-balancing\n if random.randint(0, 1):\n split()\n dump()\n else:\n dump()\n split()", "def build_map(chunk_start, result, total_chunks, start_id, end_id):\n size = len(chunk_start)\n for i in prange(size):\n beg = chunk_start[i]\n end = chunk_start[i + 1] if i < size - 1 else total_chunks\n if start_id < end and beg < end_id: # [beg, end) intersect [start_id, end_id)\n result[max(beg - start_id, 0) : (end - start_id), 0] = beg\n result[max(beg - start_id, 0) : (end - start_id), 1] = end", "def make_sections(self, split_num=1000):\n self.obstacles.add(self.chairlift.pylons.sprites())\n num = max(1, int(len(self.obstacles) / split_num))\n section_length = int(self.map_size[1] / num)\n self.sections = {}\n for y in range(0, self.map_size[1], section_length):\n rect_info = (0, y, self.map_size[0], section_length)\n rect = pg.Rect(rect_info)\n self.sections[rect_info] = pg.sprite.Group([x for x in self.obstacles if rect.collidepoint(x.rect.midbottom)])", "def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile", "def overlay(grid, hitmap, structure, x, y):\n\n width, height, bt, hm, portals = structure\n for row_num in range(height):\n for tile in range(width):\n grid[y + row_num][x + tile] = bt[row_num][tile]\n hitmap[y + row_num][x + tile] = hm[row_num][tile]\n\n return grid, hitmap, portals", "def sub_target_mapping(bin_tracking_array, target_bed_array, args, chrom):\n log = Tool_Box.Logger(args, parellel_id=chrom)\n log.debug(\"Traversing chromosome {0}\".format(chrom))\n\n map_list = []\n\n seg_count = bin_tracking_array[bin_tracking_array[:, 1] == chrom.encode()].shape[0]\n target_row_count = target_bed_array[target_bed_array[:, 1] == chrom.encode()].shape[0]\n chrom_slice = bin_tracking_array[bin_tracking_array[:, 1] == chrom.encode()]\n target_slice = target_bed_array[target_bed_array[:, 1] == chrom.encode()]\n\n for i in range(seg_count-1):\n coord_start = int(chrom_slice[i, 2])\n coord_stop = int(chrom_slice[i, 3])\n source_seg_id = int(chrom_slice[i, 0])\n\n target_id_list = []\n seg_match = False\n\n for j in range(target_row_count-1):\n target_start = int(target_slice[j, 2])\n target_stop = int(target_slice[j, 3])\n target_seg_id = int(target_slice[j, 0])\n\n if coord_start <= target_start <= coord_stop:\n seg_match = True\n target_id_list.append(target_seg_id)\n\n elif coord_stop >= target_stop >= coord_start:\n seg_match = True\n target_id_list.append(target_seg_id)\n\n elif coord_start >= target_stop >= coord_stop:\n seg_match = True\n target_id_list.append(target_seg_id)\n\n if seg_match:\n map_list.append([source_seg_id, tuple(target_id_list)])\n\n Tool_Box.delete([\"{}{}_{}.log\".format(args.Working_Folder, args.Job_Name, chrom)])\n\n return map_list", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def create_hill(map_height, prev_height, max_height, size):\n form = random.choice(['steep', 'concave'])\n\n if form == 'steep':\n if prev_height < map_height/2:\n return [(size/4, min(prev_height * 2, max_height)), (size/2, min(map_height * 4/5, max_height)), (size, min(map_height * 9/10, max_height))]\n else:\n return [(size/4, min(map_height * 4/5, max_height)), (size/2, min(map_height * 9/10, max_height)), (size, min(map_height * 9/10, max_height))]\n elif form == 'concave':\n return [(size/2, map_height/2), (size, map_height * 3/4)]", "def match_chunk_permuted(src, target, indices, match_bounds=False):\n\n ds = src.datashape.copy()\n ds.dim_low = list(ds.dim_low)\n ds.dim_high = list(ds.dim_high)\n ds_target = target.datashape.copy()\n ds_target.dim_low = list(ds_target.dim_low)\n ds_target.dim_high = list(ds_target.dim_high)\n\n hi1 = ds.dim_high\n hi2 = ds_target.dim_high\n\n # lookup array dounds if schema is unbound\n if match_bounds:\n if any(l is None for l in hi1):\n tops = src.unpack('_').max().toarray()\n hi1 = [int(tops['%s_max' % l][0]) for l in src.dim_names]\n if any(l is None for l in hi2):\n tops = target.unpack('_').max().toarray()\n hi2 = [int(tops['%s_max' % l][0]) for l in target.dim_names]\n\n for i, j in indices:\n if not isinstance(i, int):\n i = target.dim_names.index(i)\n if not isinstance(j, int):\n j = src.dim_names.index(j)\n ds.chunk_size[j] = target.datashape.chunk_size[i]\n ds.chunk_overlap[j] = target.datashape.chunk_overlap[i]\n if match_bounds:\n l = min(ds.dim_low[j], ds_target.dim_low[i])\n h = max(hi1[j], hi2[i])\n\n ds.dim_low[j] = l\n ds.dim_high[j] = h\n ds_target.dim_low[i] = l\n ds_target.dim_high[i] = h\n\n if ds.schema != src.datashape.schema:\n src = src.redimension(ds.schema)\n if ds_target.schema != target.datashape.schema:\n target = target.redimension(ds_target.schema)\n\n return src, target", "def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \n download_server = environ.get('ROAD_OBSTACLE_URL')\n if download_server is None:\n raise RuntimeError('Please specify server URL as ROAD_OBSTACLE_URL env variable.')\n\n download_url = download_server + \"/dataset_RoadObstacle_0.0.3.zip\"\n download_dir = dl_manager.download_and_extract(download_url)\n\n data_dir = Path(download_dir) / 'dataset_RoadObstacle'\n\n splits = json.loads((data_dir / 'splits.json').read_text())\n\n make_split_entry = lambda name, key: SplitGenerator(\n name=name, \n gen_kwargs = dict(data_dir=str(data_dir), split=key)\n )\n\n return [\n make_split_entry(tfds.Split.TEST, 'full')\n ] + [\n make_split_entry(k, k)\n for k in sorted(splits.keys())\n ]", "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def reshape(self, bottom, top):\n\t\tpass", "def test_split_adds_known_neighbours(mock_amg):\n\n mock_amg.cells[4].split()\n # bl\n assert mock_amg.cells[-4].north is mock_amg.cells[-2]\n assert mock_amg.cells[-4].east is mock_amg.cells[-3]\n\n # br\n assert mock_amg.cells[-3].north is mock_amg.cells[-1]\n assert mock_amg.cells[-3].west is mock_amg.cells[-4]\n\n # tl\n assert mock_amg.cells[-2].south is mock_amg.cells[-4]\n assert mock_amg.cells[-2].east is mock_amg.cells[-1]\n\n # tr\n assert mock_amg.cells[-1].south is mock_amg.cells[-3]\n assert mock_amg.cells[-1].west is mock_amg.cells[-2]", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass" ]
[ "0.5663573", "0.56367624", "0.5591895", "0.55197525", "0.5284474", "0.52686596", "0.5200579", "0.51957935", "0.5177453", "0.51321024", "0.5119517", "0.50274366", "0.49972472", "0.4958767", "0.49391246", "0.49228847", "0.49190336", "0.486945", "0.4841022", "0.48311082", "0.48113534", "0.48052412", "0.47980168", "0.47977474", "0.47914433", "0.4783627", "0.47750366", "0.47708124", "0.47708124", "0.47708124" ]
0.66152096
0
Randomly samples negative pixel indices.
def _sample_negative(self, positives): max_val = self._H * self._W num_pos = len(positives) num_neg = int(num_pos * self._sample_ratio) positives = np.round(positives).astype("int") positives = positives[:, :2] positives = np.ravel_multi_index((positives[:, 0], positives[:, 1]), (self._H, self._W)) if self._sample_ratio < 70: negative_indices = [] while len(negative_indices) < num_neg: negative = np.random.randint(0, max_val) if negative not in positives: negative_indices.append(negative) else: allowed = list(set(np.arange(0, max_val)) - set(positives.ravel())) np.random.shuffle(allowed) negative_indices = allowed[:num_neg] negative_indices = np.unravel_index(negative_indices, (self._H, self._W)) return negative_indices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_free_negative(self, kit_mask):\n max_val = self._H * self._W\n num_neg = int(100 * self._sample_ratio)\n negative_indices = []\n while len(negative_indices) < num_neg:\n negative_indices.append(np.random.randint(0, max_val))\n negative_indices = np.vstack(np.unravel_index(negative_indices, (self._H, self._W))).T\n idxs = np.random.choice(np.arange(len(kit_mask)), size=30, replace=False)\n inside = kit_mask[idxs]\n negative_indices = np.vstack([negative_indices, inside])\n return negative_indices", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n repeat_ = num_expected // neg_inds.numel()\n return torch.cat((neg_inds.repeat(repeat_), self.random_choice(neg_inds, num_expected % neg_inds.numel())))\n else:\n return self.random_choice(neg_inds, num_expected)", "def sampleNegativeImages(images, negativeSample, size=(64, 64), N=200):\n # Initialize internal state of the random number generator.\n random.seed(1)\n\n # Final image resolution.\n w, h = size[0], size[1]\n\n resizedImages = []\n \n for image in images:\n res = cv2.resize(image, dsize=(1728, 1152), interpolation=cv2.INTER_CUBIC)\n resizedImages.append(res)\n\n for image in resizedImages:\n images.append(image)\n\n # Read all images from the negative list.\n\n i = 0\n for image in images:\n\n if i > 4:\n N = 100\n for j in range(N):\n # random.random produced random number in [0,1) range\n y = int(random.random() * (len(image) - h))\n x = int(random.random() * (len(image[0]) - w))\n sample = image[y:y + h, x:x + w].copy()\n negativeSample.append(sample)\n\n # Create Afine transform\n afine_tf = tf.AffineTransform(shear = random.uniform(-0.2,0.2))\n # Apply transform to image data\n shearedImage = tf.warp(sample, inverse_map=afine_tf)\n negativeSample.append(shearedImage)\n i = i + 1\n\n return", "def under_sample(pos_nids, neg_nids, scale=1):\n index = np.arange(neg_nids.shape[0])\n index = np.random.RandomState().permutation(index)\n N = min(int(pos_nids.shape[0] * scale), neg_nids.shape[0])\n index = index[0: N]\n neg_sampled = neg_nids[index]\n sampled_nids = torch.cat((pos_nids, neg_sampled))\n\n return sampled_nids", "def negative_sampling(self):\n \n self.train_arr = []\n sample_list = np.random.choice(list(range(self.item_count)), size = 10 * len(self.interactions) * self.num_ns)\n \n sample_idx = 0\n for user, pos_item, _ in self.interactions:\n ns_count = 0\n \n while True:\n neg_item = sample_list[sample_idx]\n if not is_visited(self.rating_mat, user, neg_item):\n self.train_arr.append((user, pos_item, neg_item))\n sample_idx += 1\n ns_count += 1\n if ns_count == self.num_ns:\n break\n \n sample_idx += 1", "def no_resampling(random_state: np.random.RandomState,\n indices: np.ndarray) -> np.ndarray:\n return indices", "def global_uniform_negative_sampling(\n self, num_samples, exclude_self_loops=True, replace=False, etype=None\n ):\n raise NotImplementedError(\n \"global_uniform_negative_sampling not implemented yet\"\n )", "def generate_negative_sample_list(self, xc_start):\r\n return [self.get_random_Nb_sample(xc_start) for _ in range(self.Nb)]", "def sample(self, omit):\n w2id_list = list(self.w2id.values())\n q_list = list(self.q.values())\n negativeIds = np.random.choice(w2id_list, size=self.negativeRate, p=q_list)\n for i in range(len(negativeIds)):\n if negativeIds[i] in omit:\n while negativeIds[i] in omit:\n negativeIds[i] = np.random.choice(w2id_list, p=q_list)\n return negativeIds", "def negative_sampling(data: pd.DataFrame,\n vocab: np.ndarray,\n noise_distribution: list,\n neg_sample_size: int\n ) -> pd.DataFrame:\n \n def samples_generator(word: str\n ) -> List[str]:\n while True:\n samples = np.random.choice(\n vocab, neg_sample_size, p=noise_distribution\n )\n if word not in samples:\n return samples\n \n data['negative_samples'] = data['centre_word'].apply(samples_generator)\n return data", "def add_uniform_random_negatives(\n ds,\n num_nodes,\n num_negs_per_pos,\n):\n negative_sampler = RandomUniformNegativeSampler(num_nodes, num_negs_per_pos)\n return ds.map(\n negative_sampler, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE\n )", "def sample_negative_answers(self, answer_list, batch_size):\n return np.random.choice(answer_list, batch_size)", "def get_negative_sample(context, num, prob, Gn):\n\tnegative_list = []\n\twhile len(negative_list) < Gn:\n\t\tnegative_sample = np.random.choice(num, p=prob.ravel())\n\t\tif negative_sample != context:\n\t\t\tnegative_list.append(negative_sample)\n\t\telse:\n\t\t\tpass\n\treturn np.array([negative_list])", "def random_sample_from_masked_image(img_mask, num_samples):\n idx_tuple = img_mask.nonzero()\n num_nonzero = len(idx_tuple[0])\n if num_nonzero == 0:\n empty_list = []\n return empty_list\n rand_inds = random.sample(range(0,num_nonzero), num_samples)\n\n sampled_idx_list = []\n for i, idx in enumerate(idx_tuple):\n sampled_idx_list.append(idx[rand_inds])\n\n return sampled_idx_list", "def sample_negative(ratings):\r\n ## user_pool = set(ratings['userId'].unique())\r\n item_pool = set(ratings['itemId'].unique())\r\n\r\n interact_status = ratings.groupby('userId')['itemId'].apply(set).reset_index().rename(\r\n columns={'itemId': 'interacted_items'})\r\n interact_status['negative_items'] = interact_status['interacted_items'].apply(lambda x: item_pool - x)\r\n interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, 99))\r\n return interact_status[['userId', 'negative_samples']]", "def return_neg_index(self, index, tneg, window_size):\r\n midlow = max(0, index-(tneg//window_size))\r\n midhigh = min(len(self.data)-1, index+(tneg//window_size))\r\n assert (midlow > 0 or midhigh < len(self.data)) # check if it's even possible to return a negative index\r\n trial = np.random.randint(0, len(self.data))\r\n while trial >= midlow and trial <= midhigh:\r\n trial = np.random.randint(0, len(self.data)) # keep trying\r\n return trial", "def return_neg_index(self, index, tneg, window_size):\r\n midlow = max(0, index-(tneg//window_size))\r\n midhigh = min(len(self.data)-1, index+(tneg//window_size))\r\n assert (midlow > 0 or midhigh < len(self.data)) # check if it's even possible to return a negative index\r\n trial = np.random.randint(0, len(self.data))\r\n while trial >= midlow and trial <= midhigh:\r\n trial = np.random.randint(0, len(self.data)) # keep trying\r\n return trial", "def sampleNo(xvar, yvar, N, avoididx):\n\n allidx = np.arange(0, len(xvar)*len(yvar)) # flattened array of all indices in mesh\n noidx = np.setxor1d(allidx, avoididx) #allidx - avoididx\n #noidx = np.array(list(set(allidx) - set(avoididx)))\n nosampleidx = np.random.choice(noidx, size=N,replace=False)\n newavoididx = np.sort(np.hstack((avoididx, nosampleidx)))\n rowidx,colidx = np.unravel_index(nosampleidx, (len(yvar), len(xvar)))\n samples = []\n for row,col in zip(rowidx, colidx):\n xp = xvar[col]\n yp = yvar[row]\n samples.append((xp, yp))\n\n return (samples, newavoididx)", "def test_no_duplicates_and_positives_in_negative_sample(self):\n model = PoincareModel(self.data_large, negative=3)\n positive_nodes = model.node_relations[0] # Positive nodes for node 0\n num_samples = 100 # Repeat experiment multiple times\n for i in range(num_samples):\n negatives = model._sample_negatives(0)\n self.assertFalse(positive_nodes & set(negatives))\n self.assertEqual(len(negatives), len(set(negatives)))", "def neg_sampling_transform(data):\n train_neg_edge_index = negative_sampling(\n edge_index=data.train_pos_edge_index, num_nodes=data.num_nodes,\n num_neg_samples=data.train_pos_edge_index.size(1))\n data.train_edge_index = torch.cat(\n [data.train_pos_edge_index, train_neg_edge_index], dim=-1)\n data.train_edge_label = create_link_label(data.train_pos_edge_index,\n train_neg_edge_index)\n\n return data", "def _negative_sample_user(interaction_row: np.array, neg_pos_ratio: int, extra_samples: int) -> Tuple[List[int], int]:\n # Find all items that are not interacted with by the user\n neg_items = np.where(interaction_row == 0)[1]\n available_samples = len(neg_items)\n\n # Randomly sample negative items\n npos = interaction_row.shape[1] - len(neg_items)\n samples_required = npos * neg_pos_ratio + extra_samples\n should_sample = samples_required <= available_samples\n\n neg_items = np.random.choice(neg_items, samples_required, replace=False) if should_sample else neg_items\n\n return neg_items.tolist(), max(0, samples_required - available_samples)", "def get_random_indexes(msk, n_idxs):\n pos_idxs = np.array(np.where(msk > 0))\n neg_idxs = np.array(np.where(msk == 0))\n n_pos = pos_idxs.shape[1] # number of positives found in the mask\n n_neg = neg_idxs.shape[1] # number of negatives found in the mask\n n_min = min(n_neg, min(n_idxs, n_pos))\n rnd_idxs_pos = range(n_pos)\n np.random.shuffle(rnd_idxs_pos)\n rnd_idxs_neg = range(n_neg)\n np.random.shuffle(rnd_idxs_neg)\n return pos_idxs[:, rnd_idxs_pos[:n_min]], neg_idxs[:, rnd_idxs_neg[:n_min]]", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)", "def generate_negative_samples(self, data, sampled_data, zeros=[], validation=False):\n negative_sampled_data = []\n negative_sampled_indices = []\n for sample in sampled_data:\n i = data['pos'].index(sample) ## index of a particular move in a demo\n all_num = 0\n for which, num in enumerate(data['leng_pos']):\n all_num += num\n if all_num > i:\n which_demo = which ## index of a demo the move with index i comes from\n break\n\n sum_neg_lengths = sum(data['leng_neg'][:which_demo])\n\n key = sum_neg_lengths-1 \n value = sum_neg_lengths + data['leng_neg'][which_demo]\n demo_negative_data = data['neg'][key : value]\n state, action = sample\n for demo_state, demo_action in demo_negative_data:\n if demo_state == state:\n negative_sampled_data.extend([(demo_state, demo_action)])\n demo_index = data['neg'].index((demo_state, demo_action))\n negative_sampled_indices.append(demo_index)\n\n if not validation:\n num_pos = sum(self.pipeline_y == 1)\n num_neg = len(negative_sampled_data)\n pos_sample = self.pipeline_X[:num_pos, :]\n neg_sample = self.pipeline_X[num_pos + negative_sampled_indices, :]\n y_vector = [1] * num_pos + [0] * num_neg\n ######################### Mouselab ad-hc #########################\n ########################## Removing 0's ##########################\n non_zero = [self.pipeline_X[i, :] for i in range(num_pos)\n if i not in zeros]\n pos_sample = vstack(non_zero) if non_zero != [] else self.pipeline_X[0,:]\n num_pos = pos_sample.shape[0]\n y_vector = [1] * num_pos + [0] * num_neg\n ##################################################################\n\n self.pipeline_X = vstack((pos_sample, neg_sample))\n self.pipeline_y = np.array(y_vector, dtype='uint8')\n \n return negative_sampled_data", "def draw_negative_sample(c):\n caption = c\n while True:\n assert len(captions) >= 2 # in case we ran out...\n capt_idx = r.randint(len(captions))\n caption = captions[capt_idx]\n if caption == c:\n continue\n if len(idxes_by_caption[caption]) == 0:\n del captions[capt_idx]\n del idxes_by_caption[caption]\n continue\n # print('caption', caption)\n res = idxes_by_caption[caption].pop()\n if len(idxes_by_caption[caption]) == 0:\n del idxes_by_caption[caption]\n del captions[capt_idx]\n return res", "def sample_from_zero_axis(x):\n idx = np.random.choice(x.shape[0], 1, replace=True)[0]\n return x[idx], idx", "def chooseRandPixel(mask):\n array = np.transpose(np.nonzero(mask)) # Get the indices of nonzero elements of mask.\n index = random.randint(0,len(array)-1) # Select a random index\n return array[index]", "def _sample_from_zeros(n: int, sparse: sp.csr_matrix) -> List[List[int]]:\n zeros = np.argwhere(np.logical_not(sparse.todense()))\n ids = np.random.choice(range(len(zeros)), size=(n,))\n return zeros[ids].tolist()", "def white_noise():\n return random.randint(-32767, 32767)" ]
[ "0.7825663", "0.7001766", "0.69911414", "0.6830196", "0.6780321", "0.67745644", "0.6610074", "0.65719396", "0.6553887", "0.6516553", "0.65040195", "0.6457263", "0.6457089", "0.64084995", "0.6372672", "0.63520557", "0.63315266", "0.63315266", "0.6311688", "0.6283116", "0.62791324", "0.6253508", "0.6248981", "0.62093145", "0.61740565", "0.6154053", "0.61469406", "0.6141533", "0.614083", "0.61237544" ]
0.7932355
0
Randomly samples negative pixel indices.
def _sample_free_negative(self, kit_mask): max_val = self._H * self._W num_neg = int(100 * self._sample_ratio) negative_indices = [] while len(negative_indices) < num_neg: negative_indices.append(np.random.randint(0, max_val)) negative_indices = np.vstack(np.unravel_index(negative_indices, (self._H, self._W))).T idxs = np.random.choice(np.arange(len(kit_mask)), size=30, replace=False) inside = kit_mask[idxs] negative_indices = np.vstack([negative_indices, inside]) return negative_indices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_negative(self, positives):\n max_val = self._H * self._W\n num_pos = len(positives)\n num_neg = int(num_pos * self._sample_ratio)\n positives = np.round(positives).astype(\"int\")\n positives = positives[:, :2]\n positives = np.ravel_multi_index((positives[:, 0], positives[:, 1]), (self._H, self._W))\n if self._sample_ratio < 70:\n negative_indices = []\n while len(negative_indices) < num_neg:\n negative = np.random.randint(0, max_val)\n if negative not in positives:\n negative_indices.append(negative)\n else:\n allowed = list(set(np.arange(0, max_val)) - set(positives.ravel()))\n np.random.shuffle(allowed)\n negative_indices = allowed[:num_neg]\n negative_indices = np.unravel_index(negative_indices, (self._H, self._W))\n return negative_indices", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n repeat_ = num_expected // neg_inds.numel()\n return torch.cat((neg_inds.repeat(repeat_), self.random_choice(neg_inds, num_expected % neg_inds.numel())))\n else:\n return self.random_choice(neg_inds, num_expected)", "def sampleNegativeImages(images, negativeSample, size=(64, 64), N=200):\n # Initialize internal state of the random number generator.\n random.seed(1)\n\n # Final image resolution.\n w, h = size[0], size[1]\n\n resizedImages = []\n \n for image in images:\n res = cv2.resize(image, dsize=(1728, 1152), interpolation=cv2.INTER_CUBIC)\n resizedImages.append(res)\n\n for image in resizedImages:\n images.append(image)\n\n # Read all images from the negative list.\n\n i = 0\n for image in images:\n\n if i > 4:\n N = 100\n for j in range(N):\n # random.random produced random number in [0,1) range\n y = int(random.random() * (len(image) - h))\n x = int(random.random() * (len(image[0]) - w))\n sample = image[y:y + h, x:x + w].copy()\n negativeSample.append(sample)\n\n # Create Afine transform\n afine_tf = tf.AffineTransform(shear = random.uniform(-0.2,0.2))\n # Apply transform to image data\n shearedImage = tf.warp(sample, inverse_map=afine_tf)\n negativeSample.append(shearedImage)\n i = i + 1\n\n return", "def under_sample(pos_nids, neg_nids, scale=1):\n index = np.arange(neg_nids.shape[0])\n index = np.random.RandomState().permutation(index)\n N = min(int(pos_nids.shape[0] * scale), neg_nids.shape[0])\n index = index[0: N]\n neg_sampled = neg_nids[index]\n sampled_nids = torch.cat((pos_nids, neg_sampled))\n\n return sampled_nids", "def negative_sampling(self):\n \n self.train_arr = []\n sample_list = np.random.choice(list(range(self.item_count)), size = 10 * len(self.interactions) * self.num_ns)\n \n sample_idx = 0\n for user, pos_item, _ in self.interactions:\n ns_count = 0\n \n while True:\n neg_item = sample_list[sample_idx]\n if not is_visited(self.rating_mat, user, neg_item):\n self.train_arr.append((user, pos_item, neg_item))\n sample_idx += 1\n ns_count += 1\n if ns_count == self.num_ns:\n break\n \n sample_idx += 1", "def no_resampling(random_state: np.random.RandomState,\n indices: np.ndarray) -> np.ndarray:\n return indices", "def global_uniform_negative_sampling(\n self, num_samples, exclude_self_loops=True, replace=False, etype=None\n ):\n raise NotImplementedError(\n \"global_uniform_negative_sampling not implemented yet\"\n )", "def generate_negative_sample_list(self, xc_start):\r\n return [self.get_random_Nb_sample(xc_start) for _ in range(self.Nb)]", "def sample(self, omit):\n w2id_list = list(self.w2id.values())\n q_list = list(self.q.values())\n negativeIds = np.random.choice(w2id_list, size=self.negativeRate, p=q_list)\n for i in range(len(negativeIds)):\n if negativeIds[i] in omit:\n while negativeIds[i] in omit:\n negativeIds[i] = np.random.choice(w2id_list, p=q_list)\n return negativeIds", "def negative_sampling(data: pd.DataFrame,\n vocab: np.ndarray,\n noise_distribution: list,\n neg_sample_size: int\n ) -> pd.DataFrame:\n \n def samples_generator(word: str\n ) -> List[str]:\n while True:\n samples = np.random.choice(\n vocab, neg_sample_size, p=noise_distribution\n )\n if word not in samples:\n return samples\n \n data['negative_samples'] = data['centre_word'].apply(samples_generator)\n return data", "def sample_negative_answers(self, answer_list, batch_size):\n return np.random.choice(answer_list, batch_size)", "def add_uniform_random_negatives(\n ds,\n num_nodes,\n num_negs_per_pos,\n):\n negative_sampler = RandomUniformNegativeSampler(num_nodes, num_negs_per_pos)\n return ds.map(\n negative_sampler, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE\n )", "def get_negative_sample(context, num, prob, Gn):\n\tnegative_list = []\n\twhile len(negative_list) < Gn:\n\t\tnegative_sample = np.random.choice(num, p=prob.ravel())\n\t\tif negative_sample != context:\n\t\t\tnegative_list.append(negative_sample)\n\t\telse:\n\t\t\tpass\n\treturn np.array([negative_list])", "def random_sample_from_masked_image(img_mask, num_samples):\n idx_tuple = img_mask.nonzero()\n num_nonzero = len(idx_tuple[0])\n if num_nonzero == 0:\n empty_list = []\n return empty_list\n rand_inds = random.sample(range(0,num_nonzero), num_samples)\n\n sampled_idx_list = []\n for i, idx in enumerate(idx_tuple):\n sampled_idx_list.append(idx[rand_inds])\n\n return sampled_idx_list", "def sample_negative(ratings):\r\n ## user_pool = set(ratings['userId'].unique())\r\n item_pool = set(ratings['itemId'].unique())\r\n\r\n interact_status = ratings.groupby('userId')['itemId'].apply(set).reset_index().rename(\r\n columns={'itemId': 'interacted_items'})\r\n interact_status['negative_items'] = interact_status['interacted_items'].apply(lambda x: item_pool - x)\r\n interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, 99))\r\n return interact_status[['userId', 'negative_samples']]", "def return_neg_index(self, index, tneg, window_size):\r\n midlow = max(0, index-(tneg//window_size))\r\n midhigh = min(len(self.data)-1, index+(tneg//window_size))\r\n assert (midlow > 0 or midhigh < len(self.data)) # check if it's even possible to return a negative index\r\n trial = np.random.randint(0, len(self.data))\r\n while trial >= midlow and trial <= midhigh:\r\n trial = np.random.randint(0, len(self.data)) # keep trying\r\n return trial", "def return_neg_index(self, index, tneg, window_size):\r\n midlow = max(0, index-(tneg//window_size))\r\n midhigh = min(len(self.data)-1, index+(tneg//window_size))\r\n assert (midlow > 0 or midhigh < len(self.data)) # check if it's even possible to return a negative index\r\n trial = np.random.randint(0, len(self.data))\r\n while trial >= midlow and trial <= midhigh:\r\n trial = np.random.randint(0, len(self.data)) # keep trying\r\n return trial", "def sampleNo(xvar, yvar, N, avoididx):\n\n allidx = np.arange(0, len(xvar)*len(yvar)) # flattened array of all indices in mesh\n noidx = np.setxor1d(allidx, avoididx) #allidx - avoididx\n #noidx = np.array(list(set(allidx) - set(avoididx)))\n nosampleidx = np.random.choice(noidx, size=N,replace=False)\n newavoididx = np.sort(np.hstack((avoididx, nosampleidx)))\n rowidx,colidx = np.unravel_index(nosampleidx, (len(yvar), len(xvar)))\n samples = []\n for row,col in zip(rowidx, colidx):\n xp = xvar[col]\n yp = yvar[row]\n samples.append((xp, yp))\n\n return (samples, newavoididx)", "def test_no_duplicates_and_positives_in_negative_sample(self):\n model = PoincareModel(self.data_large, negative=3)\n positive_nodes = model.node_relations[0] # Positive nodes for node 0\n num_samples = 100 # Repeat experiment multiple times\n for i in range(num_samples):\n negatives = model._sample_negatives(0)\n self.assertFalse(positive_nodes & set(negatives))\n self.assertEqual(len(negatives), len(set(negatives)))", "def neg_sampling_transform(data):\n train_neg_edge_index = negative_sampling(\n edge_index=data.train_pos_edge_index, num_nodes=data.num_nodes,\n num_neg_samples=data.train_pos_edge_index.size(1))\n data.train_edge_index = torch.cat(\n [data.train_pos_edge_index, train_neg_edge_index], dim=-1)\n data.train_edge_label = create_link_label(data.train_pos_edge_index,\n train_neg_edge_index)\n\n return data", "def _negative_sample_user(interaction_row: np.array, neg_pos_ratio: int, extra_samples: int) -> Tuple[List[int], int]:\n # Find all items that are not interacted with by the user\n neg_items = np.where(interaction_row == 0)[1]\n available_samples = len(neg_items)\n\n # Randomly sample negative items\n npos = interaction_row.shape[1] - len(neg_items)\n samples_required = npos * neg_pos_ratio + extra_samples\n should_sample = samples_required <= available_samples\n\n neg_items = np.random.choice(neg_items, samples_required, replace=False) if should_sample else neg_items\n\n return neg_items.tolist(), max(0, samples_required - available_samples)", "def get_random_indexes(msk, n_idxs):\n pos_idxs = np.array(np.where(msk > 0))\n neg_idxs = np.array(np.where(msk == 0))\n n_pos = pos_idxs.shape[1] # number of positives found in the mask\n n_neg = neg_idxs.shape[1] # number of negatives found in the mask\n n_min = min(n_neg, min(n_idxs, n_pos))\n rnd_idxs_pos = range(n_pos)\n np.random.shuffle(rnd_idxs_pos)\n rnd_idxs_neg = range(n_neg)\n np.random.shuffle(rnd_idxs_neg)\n return pos_idxs[:, rnd_idxs_pos[:n_min]], neg_idxs[:, rnd_idxs_neg[:n_min]]", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)", "def generate_negative_samples(self, data, sampled_data, zeros=[], validation=False):\n negative_sampled_data = []\n negative_sampled_indices = []\n for sample in sampled_data:\n i = data['pos'].index(sample) ## index of a particular move in a demo\n all_num = 0\n for which, num in enumerate(data['leng_pos']):\n all_num += num\n if all_num > i:\n which_demo = which ## index of a demo the move with index i comes from\n break\n\n sum_neg_lengths = sum(data['leng_neg'][:which_demo])\n\n key = sum_neg_lengths-1 \n value = sum_neg_lengths + data['leng_neg'][which_demo]\n demo_negative_data = data['neg'][key : value]\n state, action = sample\n for demo_state, demo_action in demo_negative_data:\n if demo_state == state:\n negative_sampled_data.extend([(demo_state, demo_action)])\n demo_index = data['neg'].index((demo_state, demo_action))\n negative_sampled_indices.append(demo_index)\n\n if not validation:\n num_pos = sum(self.pipeline_y == 1)\n num_neg = len(negative_sampled_data)\n pos_sample = self.pipeline_X[:num_pos, :]\n neg_sample = self.pipeline_X[num_pos + negative_sampled_indices, :]\n y_vector = [1] * num_pos + [0] * num_neg\n ######################### Mouselab ad-hc #########################\n ########################## Removing 0's ##########################\n non_zero = [self.pipeline_X[i, :] for i in range(num_pos)\n if i not in zeros]\n pos_sample = vstack(non_zero) if non_zero != [] else self.pipeline_X[0,:]\n num_pos = pos_sample.shape[0]\n y_vector = [1] * num_pos + [0] * num_neg\n ##################################################################\n\n self.pipeline_X = vstack((pos_sample, neg_sample))\n self.pipeline_y = np.array(y_vector, dtype='uint8')\n \n return negative_sampled_data", "def draw_negative_sample(c):\n caption = c\n while True:\n assert len(captions) >= 2 # in case we ran out...\n capt_idx = r.randint(len(captions))\n caption = captions[capt_idx]\n if caption == c:\n continue\n if len(idxes_by_caption[caption]) == 0:\n del captions[capt_idx]\n del idxes_by_caption[caption]\n continue\n # print('caption', caption)\n res = idxes_by_caption[caption].pop()\n if len(idxes_by_caption[caption]) == 0:\n del idxes_by_caption[caption]\n del captions[capt_idx]\n return res", "def sample_from_zero_axis(x):\n idx = np.random.choice(x.shape[0], 1, replace=True)[0]\n return x[idx], idx", "def _sample_from_zeros(n: int, sparse: sp.csr_matrix) -> List[List[int]]:\n zeros = np.argwhere(np.logical_not(sparse.todense()))\n ids = np.random.choice(range(len(zeros)), size=(n,))\n return zeros[ids].tolist()", "def chooseRandPixel(mask):\n array = np.transpose(np.nonzero(mask)) # Get the indices of nonzero elements of mask.\n index = random.randint(0,len(array)-1) # Select a random index\n return array[index]", "def white_noise():\n return random.randint(-32767, 32767)" ]
[ "0.79328346", "0.70020574", "0.6991375", "0.6832262", "0.67802733", "0.6776218", "0.6609127", "0.65733886", "0.6555638", "0.6516547", "0.6505941", "0.6458264", "0.6458162", "0.6410023", "0.6369915", "0.63526434", "0.6331508", "0.6331508", "0.6309954", "0.62844604", "0.62815195", "0.6253953", "0.62462926", "0.62069887", "0.61764365", "0.6154446", "0.61437905", "0.6139822", "0.61373824", "0.61238366" ]
0.7826438
1
Returns Enclosure Manager configuration information from specified enclosure name.
def get_enclosure_configuration(enclosure_name): for name in enclosure_configurations: if enclosure_name == name: return enclosure_configurations[name] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_variables(enclosure_name=None):\n variables = enclosure_defaults\n\n # Get enclosure configuration\n if enclosure_name is not None:\n print \"enclosure name: %s\" % enclosure_name\n enclosure_configuration = get_enclosure_configuration(enclosure_name)\n if enclosure_configuration is not None:\n for key in enclosure_configuration:\n variables[key] = enclosure_configuration[key]\n origIP = variables['EM_IP']\n print \"EM_IP is Static: %s.\" % variables['EM_IP']\n variables['EM_IP'] = get_enclosure_manager_ip(variables)\n if variables['EM_IP'] == None:\n variables['EM_IP'] = origIP\n print \"EM_IP is FloatingIp: %s.\" % variables['EM_IP']\n else:\n print \"WARNING: Enclosure '%s' is not known configuration.\" % enclosure_name\n return variables", "def config(name):\n return RepoListApp._config[name]", "def get_module_config(name):\n return _CONFIGS[name]", "def get_config(self, name):\n return self.configs[name][0]", "def get_config_descr(self, name):\n return self.configs[name][1]", "def get_srv_config(name):\n cmd = \"ceph --admin-daemon %s/%s.asok config show\" % \\\n (CEPH_SOCKET_PATH, name)\n out = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, \\\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n return json.loads(out.stdout.read())", "def get_app_cfg_by_name(self, name: str) -> Any:\n\n config_name = self.sanitize_configuration_option_name(name)\n return getattr(self.parent.env.config, config_name, NONE)", "def __getitem__(self, name : str) -> Any:\n return self._client.get_config()[name]", "def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}", "def get_config(name: str):\n conf_file = Path(__file__).parent.joinpath(\"configs\").joinpath(name)\n return console.read_config(conf_file)", "def get_module_command_handler_config(self, name: str):\n handler = self.get_module_command_handler(name)\n if handler is None or not hasattr(handler, FILEBASE_API_API_METHOD_MARKER_CONFIG_ATTRIB_NAME):\n return None\n\n return getattr(handler, FILEBASE_API_API_METHOD_MARKER_CONFIG_ATTRIB_NAME)", "def GetConfig(self, genname):\n if genname in self.config:\n return self.config[genname]\n else:\n return dbus.Dictionary(signature='sv')", "def config(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['Config']", "def _getConfigName(self):\n pass", "def get_configuration(self, env, package_manager_name):\n if str(env) not in [Constants.DEV, Constants.TEST, Constants.PROD]:\n print (\"Error: Environment configuration not supported - \" + str(env))\n return None\n\n if str(package_manager_name) not in [Constants.APT, Constants.YUM, Constants.ZYPPER]:\n print (\"Error: Package manager configuration not supported - \" + str(package_manager_name))\n return None\n\n configuration_key = str.lower('{0}_{1}_config'.format(str(package_manager_name), str(env)))\n selected_configuration = self.configurations[configuration_key]\n return selected_configuration", "def name(self):\n return self._config_name", "def get_config_by_name(name):\r\n attrs = get_config_attributes(name)\r\n if attrs['online']:\r\n config = CONFIGS[name]()\r\n else:\r\n config = CONFIGS[name]\r\n \r\n # setting the Tune Run name attribute\r\n config['_call']['name'] = name\r\n \r\n return config", "def get_config_emoji(self, emoji_name: str) -> str:\n return attrgetter(emoji_name)(self.emoji_config)", "def get_config(name):\n db = dbm.open(config_file, 'c')\n url = db[name]\n db.close()\n return url", "def manager_config(self, manager):\n _, body = self.request('/v1.1/managers/configs/%s' % manager, 'GET')\n return body", "def get_config_name(self): # pragma: no cover\n pass", "def _get_adapter_config(self):\n proxy = self.core.get_proxy('/')\n try:\n config = proxy.get('/adapters/' + self.adapter_name)\n return config\n except KeyError:\n return None", "def get_drive_enclosure_information(self):\n\n for device in self._devices:\n if not device['Device Type'].startswith(\"disk\"):\n continue\n enc_device, device_slot, element_index = self.get_device_slot(device['SAS Address'])\n device['Enclosure Device'] = enc_device\n device['Enclosure Slot'] = device_slot\n device['Slot Description'] = self.get_array_desc_text(enc_device, element_index)", "def addons_config():\n # type () -> dict\n addons_json_path = 'addons.json'\n addons_json_path = os.path.join(_HERE, addons_json_path)\n with open(addons_json_path, encoding='utf-8') as addons_json:\n return json.load(addons_json)", "def _getConfigName(self):\n return \"%s_processCoadd_config\" % (self.config.coaddName,)", "def config(self):\n return self.namespace['config']", "def get_details(self):\n return self.__config_data", "def get_conf(name: str):\n name = name if name.split(\".\")[-1] == \"yaml\" else name + \".yaml\"\n cfg = OmegaConf.load(name)\n return cfg", "def configStore(self):\n return getattr(self._data.configStore, self._name)", "def name(self):\n return self._config.get(CONF_NAME)" ]
[ "0.5719812", "0.56810486", "0.5645006", "0.5638644", "0.563859", "0.5427897", "0.539716", "0.5308325", "0.53053176", "0.52962", "0.526522", "0.5247232", "0.5241141", "0.52331144", "0.52289283", "0.518747", "0.5168693", "0.51547784", "0.51333475", "0.51181686", "0.5108758", "0.50767934", "0.5058746", "0.50518703", "0.49826992", "0.49574548", "0.4954708", "0.4944129", "0.4943994", "0.49300578" ]
0.7809599
0
Takes in tokens, marks them by POS, finds NEs, returns consolidated list of NEs
def chunk(tokens): # Uses NLTK function to pair each token with its Part Of Speech entity_list = [] pos = nltk.pos_tag(tokens) named_entities_chunk = nltk.ne_chunk(pos, binary=True) # Finds named entities in tokens, stores in list of strings for i in range(0, len(named_entities_chunk)): ents = named_entities_chunk.pop() if getattr(ents, 'label', None) is not None and ents.label() == "NE": entity_list.append([ne for ne in ents]) # Combines named entity components, pulls off the POF labels return [' '.join(next(zip(*l))) for l in entity_list]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nerspos(tokens, ners):\n pos_list = list()\n for ner in ners:\n pos = get_nerpos(tokens, ner)\n pos_list.append(pos)\n\n return pos_list", "def get_nerpos(tokens, ner):\n\n loc = list()\n for i, token in enumerate(tokens):\n if token == ner:\n loc.append(i)\n return loc", "def merge_ners(tokens):\n ners = list()\n merged_tokens = list()\n\n candid_entity = list()\n keep = False\n prev_tag = 'O'\n\n for i, (token, tag) in enumerate(tokens):\n if keep:\n if tag not in IGNORE_NER_TAG:\n candid_entity.append(token)\n keep = True\n else:\n # ner ends in prev step\n merged_tokens.append(candid_entity)\n merged_tokens.append(token)\n ners.append((candid_entity, prev_tag))\n keep = False\n else:\n if tag not in IGNORE_NER_TAG:\n # new ner\n candid_entity = list()\n candid_entity.append(token)\n keep = True\n else:\n # not ner token\n merged_tokens.append(token)\n prev_tag = tag\n\n return ners, merged_tokens", "def filter_pos(self):\n all_tokens = []\n for zettel in self.lemma_tokens:\n tokens = []\n for word in zettel:\n if word[1] in ['NN', 'NNS', 'NNP', 'NNPS', 'NG']: # NG = n_gram\n tokens.append(word)\n all_tokens.append(tokens)\n self.lemma_tokens = all_tokens", "def build_sentences_from_tokens(tokens):\n\n\tsentences = []\n\ttmp_sentence = []\n\n\tfor elem in tokens:\n\t\tif elem == \"EOS\":\n\t\t\ttmp_sentence = ' '.join(tmp_sentence)\n\t\t\tsentences.append(tmp_sentence)\n\t\t\ttmp_sentence = []\n\t\telse:\n\t\t\ttmp_sentence.append(elem)\n\n\treturn sentences", "def build_sentences_from_tokens(tokens):\n\n\tsentences = []\n\ttmp_sentence = []\n\n\tfor elem in tokens:\n\t\tif elem == \"EOS\":\n\t\t\ttmp_sentence = ' '.join(tmp_sentence)\n\t\t\tsentences.append(tmp_sentence)\n\t\t\ttmp_sentence = []\n\t\telse:\n\t\t\ttmp_sentence.append(elem)\n\n\treturn sentences", "def process_token_sentence(text):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n return sentences", "def _find_tokens(doc, text):\n word_tokens, pos_tags = zip(*nltk.pos_tag(nltk.word_tokenize(text)))\n\n offset = 0\n tokens, missing = [], []\n for token, pos_tag in zip(word_tokens, pos_tags):\n while offset < len(text) and (text[offset] == '\\n' or text[offset] == ' '):\n if text[offset] == '\\n':\n tokens.append(Token(doc, offset, offset + 1, 'NL', get_shape_category_simple('\\n'), '\\n'))\n offset += 1\n pos = text.find(token, offset, offset + max(50, len(token)))\n if pos > -1:\n if missing:\n start = tokens[-1].end if len(tokens) > 1 else 0\n for m in missing:\n while text[start] in [' ', '\\n']:\n if text[start] == '\\n':\n tokens.append(Token(doc, start, start + 1, 'NL', get_shape_category_simple('\\n'), '\\n'))\n start += 1\n length = len(m[0]) if m[0] not in ['\\'\\'', '``'] else 1\n tokens.append(Token(doc, start, start + length, m[1], get_shape_category_simple(m[0]), m[0]))\n start = start + length\n missing = []\n tokens.append(Token(doc, pos, pos + len(token), pos_tag, get_shape_category_simple(token), token))\n offset = pos + len(token)\n else:\n missing.append((token, pos_tag))\n LOGGER.debug('Token \"{}\" not found'.format(token))\n return tokens", "def get_part_of_speech(tokens):\n\n return [e for e in nltk.chunk.ne_chunk(nltk.pos_tag(tokens)) if type(e) is tuple]", "def remove_non_wordnet(tokens):\n return [token for token in tokens if wn.synsets(token)]", "def prepocess_pos_tagged_texts(tweet_tokens):\n return [TextPreprocessor.additional_text_preprocessing_with_pos(json.loads(t)) for t in tweet_tokens]", "def pos_tag_sents(\n sentences: List[List[str]], engine: str = \"perceptron\", corpus: str = \"orchid\"\n) -> List[List[Tuple[str, str]]]:\n if not sentences:\n return []\n\n return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]", "def _align_tagged_sentence(self, ne_tagged_sentence):\n omitted_tokens = self.task_config[\"OMITTED_TOKENS_FOR_ALIGNMENT\"]\n\n return [\n (word, tag)\n for word, tag in ne_tagged_sentence\n if word not in omitted_tokens\n ]", "def full_cleanse(data):\n tokenizer = RegexpTokenizer(r'\\w+')\n stops = set(stopwords.words('english'))\n\n sent_toks = []\n for text in data:\n try:\n text = tokenizer.tokenize(text)\n pos_tagged = nltk.pos_tag(text)\n words = [w[0] for w in pos_tagged if w[1].capitalize() != 'NNP']\n words = [WordNetLemmatizer().lemmatize(w) for w in words]\n words = [w.lower() for w in words if not w.lower() in stops]\n words = [w for w in words if not w.isdigit()]\n sent_toks.append(words)\n except TypeError:\n pass\n return sent_toks", "def filter_ne_punct(pos_tags, named_entities, null_ne='O', punct_tags=(',', '.')):\n ne_punct = []\n punct_tags = set(punct_tags)\n # loop on all but last index\n for i_token, token in enumerate(named_entities[:-1]):\n word, ne = token\n if ne != null_ne and pos_tags[i_token+1][1] in punct_tags:\n ne_punct.append(word)\n return ne_punct", "def lemmatisation(tokens):\n pos_tag = nltk.pos_tag(tokens)\n lemmatiser = nltk.WordNetLemmatizer()\n wornet_tags = {\"J\": wordnet.ADJ, \"N\": wordnet.NOUN, \"V\": wordnet.VERB, \"R\": wordnet.ADV}\n words = []\n for word, tag in pos_tag:\n proper_tag = wornet_tags.get(tag[0].upper(), wordnet.NOUN)\n words.append(lemmatiser.lemmatize(word, proper_tag))\n return words", "def prepare_tokens(self, tokens: Iterator[Token]) -> List[str]:\n\n\t\tif self.ignore_punctuation:\n\t\t\ttokens = filter(lambda t: t.value not in self.PUNCTUATION, tokens)\n\n\t\tif self.uses_lemma:\n\t\t\ttoken_values = map(lambda t: t.lemma, tokens)\n\t\telse:\n\t\t\ttoken_values = map(lambda t: t.value, tokens)\n\n\t\tif self.caseless:\n\t\t\ttoken_values = map(lambda v: v.lower(), token_values)\n\n\t\tif self.add_pos_tags:\n\t\t\ttoken_values = map(lambda x: \"_\".join(x), zip(token_values, map(lambda t: t.pos, tokens)))\n\n\t\tn_gram_values = self.build_n_grams(tokens=list(token_values), n_gram_size=self.n_gram_size)\n\n\t\treturn n_gram_values", "def extract_entities_from_dependency_parse(dtrees, postag):\n sents = []\n for x in range(0,len(dtrees)):\n tok_list = []\n for node_index in dtrees[x].nodes:\n if node_index != 0:\n node = dtrees[x].nodes[node_index]\n if node['ctag'] == postag:\n tok_list.append((node['word'],postag))\n else:\n tok_list.append((node['word'],'O'))\n sents.append(tok_list)\n return sents", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def full_ne_list_and_pos_amount(self):\n #open file\n with open(self.lang + '.txt') as file:\n for paragraph in file:\n sentences = tokenize.sent_tokenize(paragraph)\n for sentence in sentences:\n #instance of the named_entity_methods_sentence class\n inst = named_entity_methods_sentence(sentence, self.lang)\n #save into a list all NEs of the text and update the total\n #number of nouns and numerals\n if self.method == 'stanford':\n self.named_entity_list_total.append(inst.named_entity_list_stanford_nlp())\n self.amount_nouns_and_num_total += inst.amount_nouns_and_numerals_stanford_nlp()\n elif self.method == 'spacy':\n self.named_entity_list_total.append(inst.named_entity_list_spacy())\n self.amount_nouns_and_num_total += inst.amount_nouns_and_numerals_spacy()\n return", "def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result", "def assign_POS_tag(lines):\n lines_POS = lines.copy()\n for i in range(len(lines)):\n line = lines[i]\n association = nltk.pos_tag(line) # Get the list of tokens with their POS tags\n lines_POS[i] = [x[1] for x in association]\n print(\"Creation of the POS tags list is finished.\")\n\n return lines_POS", "def extract_entities(event):\n # TODO The text should probably already be tagged and tokenized before this step\n tree = ne_chunk(event.pos_tagged)\n entities = set([])\n\n people = tree.subtrees(lambda x: x.node == \"PERSON\")\n for person in people:\n entities.add(\" \".join([leaf[0] for leaf in person.leaves()]))\n\n places = tree.subtrees(lambda x: x.node == \"GPE\")\n for place in places:\n entities.add(\" \".join([leaf[0] for leaf in place.leaves()]))\n\n organizations = tree.subtrees(lambda x: x.node == \"ORGANIZATION\")\n for org in organizations:\n entities.add(\" \".join([leaf[0] for leaf in org.leaves()]))\n \n return entities", "def find_named_entities(pos_tags):\n contains_proper_noun = False\n tokens = list()\n for tags in pos_tags:\n if tags['tag'] == '^':\n contains_proper_noun = True\n\n if contains_proper_noun:\n for tags in pos_tags:\n if len(tags['token']) == 1:\n tags['token'] = NLPUtils.character_to_unicode(tags['token'])\n tokens.append(tags['token'])\n try:\n text = ' '.join(tokens)\n headers = {\n 'Accept': 'application/json',\n }\n # print(text)\n data = [\n ('text', text),\n ('confidence', '0.25'),\n ('support', '20')\n ]\n\n r = requests.post('http://model.dbpedia-spotlight.org/en/annotate', headers=headers, data=data,\n timeout=10)\n # print(str(r.content.decode()))\n res = r.json()\n\n entities = list()\n if 'Resources' in res:\n for i in res['Resources']:\n # res_str = str(i).replace(',','\\n')\n # print(res_str)\n\n if i['@types'] is not None:\n original = i['@surfaceForm']\n entity_tmp = i['@URI']\n entity_tmp = re.sub('.*/', '', entity_tmp)\n entity_tmp = re.sub('\\(.*\\)', '', entity_tmp)\n entity = re.sub('_', ' ', entity_tmp).strip()\n\n if entity.lower() in text.lower() and ' ' in entity:\n entities.append((entity, int(i['@offset'])))\n # print(entities)\n new_pos_tags = list()\n curr_pos = 0\n tokens_to_omit = 0\n for tags in pos_tags:\n # if re.match(\"U\\+[a-zA-Z0-9]{1,5}\",tags['token']):\n # print(tags['token'])\n # tags['token'] = NLPUtils.unicode_to_character(tags['token'])\n # print(tags['token'])\n\n token = tags['token']\n for e in entities:\n curr_dict = dict()\n if curr_pos == e[1]:\n tokens_to_omit = len(re.split(' ', e[0]))\n curr_dict['token'] = e[0]\n curr_dict['tag'] = '^'\n new_pos_tags.append(curr_dict)\n # +1 for whitespace\n curr_pos += len(token) + 1\n if tokens_to_omit == 0:\n new_pos_tags.append(tags)\n else:\n tokens_to_omit -= 1\n\n # decode unicode sequence\n new_pos_tags = NLPUtils.unicode_to_character_pos_tagged(new_pos_tags)\n return new_pos_tags\n # decode uniocde character\n pos_tags = NLPUtils.unicode_to_character_pos_tagged(pos_tags)\n except Exception as e:\n print(e)\n return None\n\n return pos_tags", "def create_ngrams(self, tokens):\n ngrams = []\n for i in range(len(tokens)- self.N + 1):\n ngrams.append(tuple(tokens[i:i+self.N]))\n return ngrams", "def _parse_individual_tokens(self, tokens: List[str]) -> List:\r\n objs = []\r\n\r\n for token in tokens:\r\n obj = self._parse_token(token)\r\n objs.append(obj)\r\n\r\n return objs", "def _tokenize(self, raw_text):\n\n doc = self.nlp(raw_text.strip())\n\n # Loop through tokens and find known entities aren't already marked\n for token in doc:\n # Is this word in our known_entities, but is not recognized by the spaCy parser?\n if token.text.lower() in self.known_entities and token.ent_type not in self.entities:\n # We need to set the new entity to doc.ents directly (I believe the getter for doc.ents does\n # some important massaging. However, counter to the online docs, setting doc.ents wipes out\n # all of the previously recognized ents, so we stash the value, then we combine and reset.\n stash = doc.ents\n doc.ents = [(token.text.title(), doc.vocab.strings['PERSON'], token.i, token.i + 1)]\n doc.ents = doc.ents + stash\n\n # Find proper noun n-grams: (a) find a known entity, (b) is the next word also a known entity?,\n # (c) merge, (d) repeat\n # TODO: Joining multi-word named entities sometimes causes us trouble.\n doc_len = len(doc) # Helps us know when to exit the 'for loop' (since we change the # of items via merge)\n for token in doc:\n # if we're not at the end of the loop, and we recognize this as a proper noun and it's not a stop word\n # and the token isn't a space...\n if token.i + 1 < doc_len and token.ent_type in self.entities and \\\n token.text.lower() not in self.stop_words and token.text not in ' ':\n next_token = doc[token.i + 1]\n # keep looping while we're not at the end of the loop and this token has the same entity type as\n # the previous token and it's not a stop word or a space.\n while token.i + 1 < doc_len and next_token.ent_type == token.ent_type and \\\n next_token.text.lower() not in self.stop_words and next_token.text not in ' ':\n n_gram = doc[token.i:token.i + 2]\n n_gram.merge()\n doc_len -= 1 # the merge changes the list length, so we just shrunk the list!\n # print(x)\n if token.i + 1 >= doc_len:\n break\n\n return doc", "def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches", "def sents_2(list_of_tweets):\n stopwords = nltk.corpus.stopwords.words('english')\n contextTerms = []\n for sent in list_of_tweets:\n for word in sent.split():\n word_lemmatizer = WordNetLemmatizer()\n word = word_lemmatizer.lemmatize(word.lower())\n if wordnet.synsets(word) and word not in stopwords and len(word)>2:\n contextTerms.append(word)\n\n #print( contextTerms)\n return contextTerms", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs" ]
[ "0.6511681", "0.62655246", "0.62197286", "0.61083245", "0.6082513", "0.6082513", "0.5933781", "0.5828468", "0.5822531", "0.57664376", "0.5731313", "0.5682466", "0.56602114", "0.5646151", "0.5586088", "0.55553305", "0.5539988", "0.5534359", "0.55341905", "0.55209655", "0.5458795", "0.5455932", "0.5437676", "0.5419406", "0.54021144", "0.5361851", "0.5359256", "0.53460383", "0.5333783", "0.53202945" ]
0.62955546
1
Takes in a document, returns the named entities in that document
def add_entities(doc): # Calls function to tokenize the document, stores as list of strings tokens = tokenize(doc) # Calls function to find named entities in the tokens, stores as list of strings chunks = chunk(tokens) return chunks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def named_entities(self) -> List[str]:", "def get_entities(doc, clusters):\n ent_clusts = []\n for clust in clusters:\n ent_clust = []\n for (s, e) in clust:\n ent_clust.append(doc[s : e + 1])\n ent_clusts.append(ent_clust)\n return ent_clusts", "def named_entity_list_stanford_nlp(self) -> List:\n stanza.download(self.lang, processors = 'tokenize,mwt,ner')\n #load file and convert input string\n nlp = stanza.Pipeline(self.lang, processors = 'tokenize,mwt,ner')\n doc = nlp(self.sent)\n #build the output list\n for sentence in doc.ents:\n #avoid that 'Abstract', 'Title' and 'Author' are counted between NEs\n if sentence.text != 'Abstract' and sentence.text != 'Title' and sentence.text != 'Author':\n self.named_entity_list.append((sentence.text, \n sentence.start_char, \n sentence.end_char, sentence.type))\n return self.named_entity_list", "def extract_entities(event):\n # TODO The text should probably already be tagged and tokenized before this step\n tree = ne_chunk(event.pos_tagged)\n entities = set([])\n\n people = tree.subtrees(lambda x: x.node == \"PERSON\")\n for person in people:\n entities.add(\" \".join([leaf[0] for leaf in person.leaves()]))\n\n places = tree.subtrees(lambda x: x.node == \"GPE\")\n for place in places:\n entities.add(\" \".join([leaf[0] for leaf in place.leaves()]))\n\n organizations = tree.subtrees(lambda x: x.node == \"ORGANIZATION\")\n for org in organizations:\n entities.add(\" \".join([leaf[0] for leaf in org.leaves()]))\n \n return entities", "def _get_entities(self, text, language=''):\n body = {\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text,\n },\n 'encodingType': 'UTF32',\n }\n if language:\n body['document']['language'] = language\n\n request = self.service.documents().analyzeEntities(body=body)\n response = request.execute()\n result = []\n for entity in response.get('entities', []):\n mentions = entity.get('mentions', [])\n if not mentions:\n continue\n entity_text = mentions[0]['text']\n offset = entity_text['beginOffset']\n for word in entity_text['content'].split():\n result.append({'content': word, 'beginOffset': offset})\n offset += len(word)\n return result", "def _get_entities(self, entity_name):\n return get_entities(self.attributes.workspace.namespace, self.attributes.workspace.name, entity_name)", "def get_entity(doc):\n doc = clean_doc(doc)\n length = len(doc)\n num = length // 63\n if num < 1: num = 1\n header = {\n 'content-type': 'application/json'\n }\n enti = []\n for i in range(num):\n time.sleep(1)\n chaxun = doc[i * 63: (i + 1)*63].strip()\n try:\n res = requests.post('https://aip.baidubce.com/rpc/2.0/kg/v1/cognitive/entity_annotation', \n params={'access_token': token},\n headers=header,\n json={\"data\": chaxun}).json()['entity_annotation']\n for item in res:\n enti.append(item['mention'])\n except KeyError as e:\n print(e)\n print('chauxn:', chaxun)\n continue\n return enti", "def extract_entities(self) :\n entities = []\n googleEntityList = self.googleLanguageModel.analyze_entities() \n watsonEntityList = self.watsonLanguageModel['entities']\n\n for entity in googleEntityList.entities[:self.entitySizeLimit]:\n if len(entity.metadata) > 0:\n entities.append({ 'name' : entity.name, 'metadata' : entity.metadata})\n \n for entity in watsonEntityList[:self.entitySizeLimit]: \n entities.append({ 'name': entity['text'], 'metadata': entity.get('disambiguation', {})}) \n\n return entities", "def find_named_entities(pos_tags):\n contains_proper_noun = False\n tokens = list()\n for tags in pos_tags:\n if tags['tag'] == '^':\n contains_proper_noun = True\n\n if contains_proper_noun:\n for tags in pos_tags:\n if len(tags['token']) == 1:\n tags['token'] = NLPUtils.character_to_unicode(tags['token'])\n tokens.append(tags['token'])\n try:\n text = ' '.join(tokens)\n headers = {\n 'Accept': 'application/json',\n }\n # print(text)\n data = [\n ('text', text),\n ('confidence', '0.25'),\n ('support', '20')\n ]\n\n r = requests.post('http://model.dbpedia-spotlight.org/en/annotate', headers=headers, data=data,\n timeout=10)\n # print(str(r.content.decode()))\n res = r.json()\n\n entities = list()\n if 'Resources' in res:\n for i in res['Resources']:\n # res_str = str(i).replace(',','\\n')\n # print(res_str)\n\n if i['@types'] is not None:\n original = i['@surfaceForm']\n entity_tmp = i['@URI']\n entity_tmp = re.sub('.*/', '', entity_tmp)\n entity_tmp = re.sub('\\(.*\\)', '', entity_tmp)\n entity = re.sub('_', ' ', entity_tmp).strip()\n\n if entity.lower() in text.lower() and ' ' in entity:\n entities.append((entity, int(i['@offset'])))\n # print(entities)\n new_pos_tags = list()\n curr_pos = 0\n tokens_to_omit = 0\n for tags in pos_tags:\n # if re.match(\"U\\+[a-zA-Z0-9]{1,5}\",tags['token']):\n # print(tags['token'])\n # tags['token'] = NLPUtils.unicode_to_character(tags['token'])\n # print(tags['token'])\n\n token = tags['token']\n for e in entities:\n curr_dict = dict()\n if curr_pos == e[1]:\n tokens_to_omit = len(re.split(' ', e[0]))\n curr_dict['token'] = e[0]\n curr_dict['tag'] = '^'\n new_pos_tags.append(curr_dict)\n # +1 for whitespace\n curr_pos += len(token) + 1\n if tokens_to_omit == 0:\n new_pos_tags.append(tags)\n else:\n tokens_to_omit -= 1\n\n # decode unicode sequence\n new_pos_tags = NLPUtils.unicode_to_character_pos_tagged(new_pos_tags)\n return new_pos_tags\n # decode uniocde character\n pos_tags = NLPUtils.unicode_to_character_pos_tagged(pos_tags)\n except Exception as e:\n print(e)\n return None\n\n return pos_tags", "def get_entities(tags):\n pass", "def process_extract_entity_names(t):\n\n entity_names = []\n if hasattr(t, 'label') and t.label:\n if t.label() == 'NE':\n entity_names.append(' '.join([child[0] for child in t]))\n else:\n for child in t:\n entity_names.extend(process_extract_entity_names(child))\n\n return entity_names", "def get_named_entities(\n self,\n identity: Optional[str] = None,\n type: Optional[str] = None,\n subtype: Optional[str] = None,\n ) -> List[NamedEntity]:\n found: List[NamedEntity] = []\n for named_entity in [\n e for h in self.headlines for s in h.sentences for e in s.named_entities\n ]:\n if identity and (identity != named_entity.identity):\n continue\n if type and (type != named_entity.type):\n continue\n if subtype and (subtype != named_entity.subtype):\n continue\n found.append(named_entity)\n return found", "def check_named_entity(check):\r\n\tglobal word_buffer\r\n\tglobal temp\r\n\t\r\n\t\r\n\tif check == \"All\": \r\n\t# @return - Return Named Entities identified from the begining of the sentence except for the Named Entity at the end\r\n \r\n\t if temp == 1: \r\n \r\n\t\tnamed_entity = join_named_entity(word_buffer)\r\n\r\n\t\tword_buffer = []\r\n\t\t\r\n\t\ttemp = 0\r\n\r\n\t\treturn named_entity\r\n\telse:\r\n\t# @ return - Return Named Entity present at the end of the sentence, if available\r\n\r\n\t if len(word_buffer)>1: \r\n\t \r\n named_entity = join_named_entity(word_buffer)\r\n \r\n\t\treturn named_entity", "def named_entity_list_spacy(self) -> List:\n #choose language\n if self.lang == 'en':\n lang_for_spacy = 'en_core_web_sm'\n elif self.lang == 'de':\n lang_for_spacy = 'de_core_news_sm'\n elif self.lang == 'fr':\n lang_for_spacy = 'fr_core_news_md'\n #load file and convert input string\n nlp = spacy.load(lang_for_spacy)\n doc = nlp(self.sent)\n #build the output list\n for ent in doc.ents:\n #avoid that 'Abstract', 'Title' and 'Author' are counted between NEs\n if ent.text != 'Abstract' and ent.text != 'Title' and ent.text != 'Author':\n self.named_entity_list.append((ent.text, ent.start_char, \n ent.end_char, ent.label_))\n return self.named_entity_list", "def extract_entities(filename):\n logger.info(\"Processing entities for file {}\".format(filename))\n root, full_doc = process_xml_text(filename)\n logger.info(\"Starting to extract entities for file {}\".format(filename))\n name_to_verbs = defaultdict(list)\n \n final_verb_dict = {}\n id_to_sent={}\n try:\n for coref in root.find('document').find('coreference').iter('coreference'):\n verbs_to_cache = []\n name = \"Unknown\"\n for mention in coref.findall('mention'):\n if 'representative' in mention.attrib:\n name = mention.find('text').text\n\n sent_id = int(mention.find('sentence').text) - 1\n\n sentence = root.find('document').find('sentences')[sent_id]\n for dep in sentence.find('dependencies').iter('dep'):\n if int(dep.find('dependent').get(\"idx\")) != int(mention.find('end').text) - 1:\n continue\n\n parent_id = int(dep.find('governor').get(\"idx\")) - 1\n parent = dep.find('governor').text\n\n parent_lemma = sentence.find('tokens')[int(parent_id)].find('lemma').text\n\n # We save the sentence id, the parent id, the entity name, the relationship, the article number\n # With sentence id and parent id we can find embedding\n if dep.get(\"type\") in [\"nsubj\", \"nsubjpass\", \"dobj\"]:\n verbs_to_cache.append(VerbInstance(sent_id, parent_id, parent, parent_lemma, dep.get(\"type\"), mention.find('text').text, \"\", filename))\n\n # end coreff chain\n # We do it this way so that if we set the name in the middle of the chain we keep it for all things in the chain\n if verbs_to_cache:\n name_to_verbs[name] += verbs_to_cache\n\n for name,tupls in name_to_verbs.items():\n for t in tupls:\n key = (t.sent_id, t.verb_id)\n final_verb_dict[key] = t._replace(entity_name=name)\n\n # Also keep all verbs that are in lex\n for s in root.find('document').find('sentences').iter('sentence'):\n sent = []\n for tok in s.find('tokens').iter('token'):\n sent.append(tok.find(\"word\").text.lower())\n sent_id = int(s.get(\"id\")) - 1\n verb_id = int(tok.get(\"id\")) - 1\n key = (sent_id, verb_id)\n if key in final_verb_dict:\n continue\n\n if tok.find('POS').text.startswith(\"VB\"):\n final_verb_dict[key] = VerbInstance(sent_id, verb_id, tok.find(\"word\").text, tok.find('lemma').text.lower(), \"\", \"\", \"\", filename)\n id_to_sent[sent_id] = \" \".join(sent)\n except Exception as e:\n logger.error(\"Error {} occured for file {}\".format(e, filename))\n return final_verb_dict, id_to_sent", "def add_named_entities_to_graph(graph, cursor, docid):\n # Retrieve named entities for current docid\n entities = db_utils.get_entities_from_docid(cursor, docid, \"entity_ids\")\n\n # Create node for each entity\n for entity in entities: # [['Washington Redskins', '[30]', '1', 'ORG'], ..]\n entity_name = entity[0]\n entity_tf = int(entity[2])\n entity_type = entity[3]\n\n # Convert list string to list (fails occasionally)\n try:\n ent_positions = json.loads(entity[1].replace(\";\", \"\"))\n except Exception as e:\n print(f\"issue with enitity: {entity[1]}, docid: {docid}\")\n logging.info(e)\n continue\n\n # Add node to graph\n graph.add_node(Node(entity_name, entity_type, ent_positions, entity_tf))", "def findentity(string):\r\n for x in entitylist:\r\n if x in string:\r\n print(f\"(Doc.{i})--Entity = {x.title()}\")\r\n break", "def get_entities_dict(p_str):\n nlp = en_core_web_sm.load()\n doc = nlp(p_str)\n entities = {}\n relevant_keywords = []\n list_of_types = ['NORP', 'ORG', 'GPE', 'LAW', 'LANGUAGE']\n for X in doc.ents:\n if not(X.label_ in entities):\n entities[X.label_] = []\n entities[X.label_].append(X.text)\n if X.label_ in list_of_types:\n relevant_keywords.append(X.text)\n print(entities)\n print(\"HERE\")\n print(relevant_keywords)\n return entities, relevant_keywords", "def entities(self):\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]", "def entities(self):\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]", "def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency", "def chunk(tokens):\n\n # Uses NLTK function to pair each token with its Part Of Speech\n entity_list = []\n pos = nltk.pos_tag(tokens)\n named_entities_chunk = nltk.ne_chunk(pos, binary=True)\n\n # Finds named entities in tokens, stores in list of strings\n for i in range(0, len(named_entities_chunk)):\n ents = named_entities_chunk.pop()\n if getattr(ents, 'label', None) is not None and ents.label() == \"NE\":\n entity_list.append([ne for ne in ents])\n\n # Combines named entity components, pulls off the POF labels\n return [' '.join(next(zip(*l))) for l in entity_list]", "def get_urns_from_docx(document):\n import sys\n import zipfile\n import re\n from bs4 import BeautifulSoup\n\n with zipfile.ZipFile(document, 'r') as zfp:\n with zfp.open('word/document.xml') as fp:\n soup = BeautifulSoup(fp.read(), 'xml')\n\n return re.findall(\"[0-9]{13}\", str(soup))", "def get_named_document(self, entity, name):\n view = self.db.view(\"%s/name\" % entity, include_docs=True)\n result = view[name]\n if len(result) != 1:\n raise ValueError(\"no such %s document '%s'\" % (entity, name))\n return result.rows[0].doc", "def _find_entities(tagged_data, retrieval_data):\n ret = list()\n assert len(tagged_data) == retrieval_data.shape[0], \"Please make sure that tagged data and retrieval data entries align for ID tagging.\"\n ids = retrieval_data.original_id.tolist()\n for i, (sentence, entity_dict) in enumerate(tagged_data):\n ignore_indices, relevant_tokens, relevant_contexts = list(), list(), list()\n ignore_substrings = \" \".join([sentence[start_idx:end_idx] for [start_idx, end_idx, type_ent] in entity_dict['entities'] if type_ent in ignore_ents])\n doc = nlp(sentence)\n #print(\"sentence:\", sentence)\n #print(\"ignoreing substrings:\", ignore_substrings)\n #print(\"found nouns:\")\n for j, token in enumerate(doc):\n if token.tag_.startswith(\"N\") and not token.text in ignore_substrings:#noun which is not ignored\n #print(\"token:\", token.text)\n #print(\"token dep type:\", token.dep_)\n #print(\"token head text:\", token.head.text)\n #print(\"token head pos\", token.head.pos_)\n #print(\"children:\", [child for child in token.children])\n relevant_tokens.append(token.text)\n window_left = min(0, j-2)\n window_right = min(j+3, len(doc))\n relevant_contexts.append(\" \".join([token.text for token in doc[window_left:window_right]]))\n ret.append([ids[i], sentence, relevant_tokens, relevant_contexts])\n with open(\"output/untagged_nouns.pkl\", \"wb\") as outf:\n pickle.dump(ret, outf)\n return ret", "def search_entity(self, name_filter):\n name_filter=name_filter.lower()\n model_reader=oc.delegator.getModelReader()\n names=model_reader.getEntityNames()\n # print(len(names))\n for name in names:\n if name_filter in name.lower():\n print(name)", "def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities", "def fetch_required_entities(name):\n try:\n db = get_service_collection()\n service = db.find({\"name\": {\"$regex\": name.strip(), \"$options\": \"i\"}})\n required_entities = {}\n for change_log_service in service:\n entities = change_log_service[\"value\"][\"entities\"]\n break\n\n for entity in entities:\n if \"true\" == entities[entity][\"required\"]:\n required_entities[entity] = entity\n break\n except Exception as e:\n logger.error(\"Exception in _fetch_required_entities : \" + str(e))\n\n return required_entities", "def discover_people(self, doc):\n self.log.info(\"Matching entities to people\")\n count = 0\n\n tomatch = set(u.entity for u in doc.utterances if not u.entity.person)\n if tomatch:\n people = Person.query.all()\n people_by_name = {p.name: p for p in people}\n\n # we could already have found matching people during this loop,\n # so protect against it\n for entity in (e for e in tomatch if not e.person):\n name = self.clean_name(entity.name)\n self.log.info(\"Trying to match entity '%s' to a person as '%s'\" % (entity.name, name))\n\n match = None\n entity_name_len = len(name)\n\n if name in people_by_name:\n # exact match\n match = people_by_name[name]\n\n else:\n # calculate distance to all other names\n # as a small optimisation, don't test an entity if the\n # length of the names is too different\n candidates = (\n (p, levenshtein(p.name, name))\n for p in people\n if abs(len(p.name) - entity_name_len) <= 2)\n\n # limit to only the good ones\n candidates = [(p, x) for p, x in candidates if x >= 0.95]\n if candidates:\n match = max(candidates, key=lambda p: p[1])[0]\n\n if match:\n count += 1\n self.log.info(\"Matched entity %s to person %s\" % (entity, match))\n entity.person = match\n\n self.log.info(\"Matched %s entities to people\" % count)", "def entities_text(text):\n if len(text) == 0:\n return None\n\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects entities in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n entities = client.analyze_entities(document).entities\n\n # entity types from enums.Entity.Type\n entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION',\n 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')\n\n for entity in entities:\n print(u'{:<16}\\t{:8}\\t{}'.format(entity.salience, entity.name, entity_type[entity.type]))\n \"\"\"print('=' * 20)\n print(u'{:<16}: {}'.format('name', entity.name))\n print(u'{:<16}: {}'.format('type', entity_type[entity.type]))\n print(u'{:<16}: {}'.format('metadata', entity.metadata))\n print(u'{:<16}: {}'.format('salience', entity.salience))\n print(u'{:<16}: {}'.format('wikipedia_url',\n entity.metadata.get('wikipedia_url', '-')))\"\"\"\n\n return entities" ]
[ "0.67873853", "0.647988", "0.6388253", "0.6316278", "0.62480557", "0.6205224", "0.6171935", "0.6130217", "0.60650283", "0.60634047", "0.6033262", "0.59924245", "0.5955614", "0.59127384", "0.5888513", "0.58798087", "0.587367", "0.58041203", "0.57902235", "0.57902235", "0.5684723", "0.56708133", "0.5638297", "0.56253684", "0.5613388", "0.5611157", "0.56058806", "0.5494645", "0.54788035", "0.5457713" ]
0.7496968
0
The function 'match' when given a list of words, finds all indices pairs such that the concatenation of the two words is a palindrome.
def match(list_string): assert type(list_string)==list for i in list_string: assert type(i)==str assert i.isalpha() #Loops through all the possible substrings of the list of words to find the word pairs that are palindromes. my_match = [] for i in range(0,len(list_string)): for j in range(0,len(list_string)): if i!=j: a = list_string[i] b = list_string[j] c = a+b d = b+a if c==c[::-1]: if (i,j) not in my_match: my_match.append((i,j)) elif d==d[::-1]: if (j,i) not in my_match: my_match.append((j,i)) return my_match
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindromePairs(self, words: List[str]) -> List[List[int]]:\n d = {w : i for i, w in enumerate(words)}\n \n res = []\n for idx, word in enumerate(words):\n for i in range(len(word)+1):\n str1 = word[:i]\n str2 = word[i:]\n # first part should be palindrome, second part (reverse) should be in w\n if str1 == str1[::-1]:\n back = str2[::-1]\n if back in d and back != word:\n res.append([d[str2[::-1]], idx])\n # second part should be palindrome, first part (reverse) should be in w\n if str2 and str2 == str2[::-1]: # if the last part is empty, it is calculated before \n back = str1[::-1]\n if back in d and back != word: \n res.append([idx, d[str1[::-1]]])\n # print(res)\n return res", "def find_pairs(words): \n pass", "def palindromePairs(lst):\n results = []\n for i, e1 in enumerate(lst):\n for j, e2 in enumerate(lst):\n if i != j:\n if isPalindrome(e1+e2):\n results.append((i, j))\n return results", "def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal", "def word_match(w):\n matches = [word for word in common_words if len(word) == len(w)]\n \n # If the letter in w is upper then it is a decoded letter so that same letter must be in the same index in all matches\n # If the letter is lowercase then it is encrypted and can be mapped to any letter that is not already mapped to an encoded letter\n for i in range(len(w)):\n if (w[i]).isupper() == True:\n matches = [word for word in matches if word[i] == w[i]]\n else:\n matches = [word for word in matches if word[i] not in decoded_dict.values()]\n # Making a copy of the current matches so that I can iterate over them which removing items if the mapping isn't one to one\n matches_copy = [word for word in matches] \n map_dict = {}\n # I iterate through all the words in the matches list and then through all the letters in each match.\n # If it is the first time the letter appears in a word then the match is removed if that encoded letter is being sent to a letter that already has another encoded letter mapped to it.\n # If the letter has appeared in the word before then the word is removed if that encoded letter is not being mapped to the same letter as it was previously\n for match in matches_copy:\n map_dict.clear()\n for i in range(len(match)):\n if w[i] not in map_dict:\n if match[i] not in map_dict.values():\n map_dict[w[i]] = match[i]\n else:\n matches.remove(match)\n break\n else:\n if map_dict[w[i]] == match[i]:\n continue \n else: \n matches.remove(match)\n break \n return(matches)", "def matching(a, b):\n\tcomp = zip(a, b)\n\tdiff = abs(len(a)-len(b))\n\tm = sum(1 for x,y in comp if x == y)\n\treturn m - diff", "def chars_match (found, word):\n index = 0\n for i in found:\n if (i != word[index]):\n return False\n index += 1\n return True", "def match_twosided(desc1,desc2):\n matches_12 = match(desc1, desc2)\n matches_21 = match(desc2, desc1)\n ndx_12 = matches_12.nonzero()[0]\n # remove matches that are not symmetric\n for n in ndx_12:\n if matches_21[int(matches_12[n])] != n:\n matches_12[n] = 0\n return matches_12", "def match(a1, a2):\n inds_1, inds_2 = [], []\n i, j = 0, 0\n for s in range(0, len(a1)):\n increment = lambda a, x, ind: x+1 if a[ind] != \"-\" else x\n if a1[s] is a2[s] and a1[s] != \"-\":\n inds_1.append(i)\n inds_2.append(j)\n i = increment(a1,i,s)\n j = increment(a2,j,s)\n return inds_1, inds_2", "def match_twosided(desc1,desc2):\n\t\n\tmatches_12 = match(desc1,desc2)\n\tmatches_21 = match(desc2,desc1)\n\t\n\tndx_12 = matches_12.nonzero()[0]\n\t\n\t#remove matches that are not symmetric\n\tfor n in ndx_12:\n\t\tif matches_21[int(matches_12[n])] != n:\n\t\t\tmatches_12[n] = 0\n\t\n\treturn matches_12", "def match_twosided(desc1, desc2):\n\n matches_12 = match(desc1, desc2)\n matches_21 = match(desc2, desc1)\n\n ndx_12 = matches_12.non_zeros()[0]\n\n for n in ndx_12:\n if matches_21[int(matches_12[n])] != n:\n matches_12[n] = 0\n\n return matches_12", "def trifeca(word: str):\n last_letter = 'None'\n last_pair_matched = False\n consecutive_matching_pairs = 0\n\n for letter in word:\n if last_pair_matched:\n last_pair_matched = False\n last_letter = letter\n continue\n\n if letter == last_letter:\n last_pair_matched = True\n consecutive_matching_pairs += 1\n \n if consecutive_matching_pairs == 3:\n return True\n else:\n consecutive_matching_pairs = 0\n last_letter = letter \n \n return False", "def index_word_pairs(word, seq):\n indices = [i for i, x in enumerate(seq) if x == word]\n res = []\n for i in indices:\n res += [(word, i)]\n return res", "def find_palindromes(word_list):\n palindrome_list = []\n\n for word in word_list:\n if check_palindrome(word):\n palindrome_list.append(word)\n\n return palindrome_list", "def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True", "def find_mismatching_pair(s):\n i = 0\n j = len(s) - 1\n while i < j and s[i] == s[j]:\n i += 1\n j -= 1\n return i, j", "def two_pairs(pword):\n\n last = ''\n count = 1\n counts = []\n for char in pword:\n if char == last:\n char_and_count = counts.pop()\n count = char_and_count.pop()\n updated_count = count + 1\n char_and_count.append(updated_count)\n counts.append(char_and_count)\n elif char != last:\n counts.append([char, count])\n last = char\n count = 1\n\n distinct_pairs = set()\n for char_and_count in counts:\n if char_and_count[1] >= 2:\n distinct_pairs.update(char_and_count[0])\n if len(distinct_pairs) >= 2:\n return True\n return False", "def compare_matches(matches, target) -> bool:\n\n for match in target:\n if match not in matches and tuple(reversed(match)) not in matches:\n return False\n return True", "def find_reversed(word_list):\n reversed_list = []\n word_set = set(word_list)\n for word in word_list:\n if word[::-1] in word_set and not check_palindrome(word):\n reversed_list.append(word)\n return reversed_list", "def fn(i1, i2):\n if i1 == len(word1): return len(word2) - i2\n if i2 == len(word2): return len(word1) - i1\n if word1[i1] == word2[i2]: return fn(i1+1, i2+1)\n return 1 + min(fn(i1+1,i2), fn(i1, i2+1))", "def chars(a, b, match=False):\n\ta, b = match_length(a, b)\n\tcomp = zip(a, b)\n\tif match:\n\t\treturn [y for x,y in comp if x == y]\n\telse:\n\t\treturn [y for x,y in comp if x != y]", "def match_finder(word_list):\n dupe_check = []\n match_list = []\n for word in word_list:\n if word in match_list:\n continue\n elif word in dupe_check:\n match_list.append(word)\n else:\n dupe_check.append(word)\n return match_list", "def one_away(w1, w2):\n\n if abs(len(w1) - len(w2) > 1):\n return False\n\n # i = 0\n # w1_d = {}\n # w2_d = {}\n\n # for i in w1:\n # w1_d[i] = w1.count(i)\n\n # for j in w2:\n # w2_d[j] = w2.count(j)\n\n # unmatched = set(w1_d.items())^set(w2_d.items())\n \n # if len(unmatched) > 2:\n # return False\n # return True\n \n if len(w2) > len(w1):\n w1, w2 = w2, w1\n\n # Keep track of number of wrong letters\n diff = 0\n\n # Loop over w1 with i and over w2 with j\n i = j = 0\n\n # while j < len(w2):\n\n # if w1[i] != w2[j]:\n\n # # We found a wrong letter\n # wrong += 1\n # # We'll move to the next char in the longer string.\n # i += 1\n # if wrong > 1:\n # return False\n\n # # If same length, move the next char in shorter.\n # # Otherwise, don't move in shorter string --- this\n # # will cover the case of a added letter.\n # if len(w1) == len(w2):\n # j += 1\n\n # else:\n # # Both letters match; move to next letter in both\n # i += 1\n # j += 1\n\n # return True\n\n # iterate over 1 word - shorter of the two, so there is no index out of range error\n # as i, j increments\n while j < len(w2):\n # if letter are different, add to diff variable\n if w1[i] != w2[j]:\n diff += 1\n # as soon as diff is more than 1, than it's fast fail\n if diff > 1:\n return False\n # two scenarios: if same length for both words, both go on check next \n # word\n if len(w1) == len(w2):\n i += 1\n j += 1\n \n else: #if one word is longer than the other, go on to next letter in \n # longer word, and see if it matches previous letter in shorter word\n # because this is a case where extra letter is added in the middle of long\n # word, but the rest should be the same as the shorter\n i += 1\n else:\n i += 1\n j += 1\n return True", "def check_word_overlap(tag_1, tag_2):\n position_tracker = 0\n words_found = []\n for t1 in tag_1:\n for t2 in tag_2:\n if t1 == t2:\n words_found.append([t1,position_tracker])\n position_tracker+=1\n return [words_found, len(words_found)]", "def compare(theInput,dictionary):\n n=len(theInput)\n ret=0\n for word in dictionary:\n if theInput==word: return 2\n if theInput==word[:n]: ret=1\n return ret", "def gen_linear_anagram_candidates(word):\n anagram_candidates = []\n for pos in range(1, len(word)):\n anagram_candidates += [word[pos:] + word[0:pos]]\n return anagram_candidates", "def match(a, b):\n\n if len(b) > len(a):\n return False\n\n for i, xa in enumerate(a):\n if i < len(b):\n xb = b[i]\n if xb and xb != xa:\n return False\n else:\n break\n\n return True", "def find_matches_for_start_pairs(pairs, adj_list_ids, adj_list_words):\n all_valid = []\n for p in tqdm(pairs):\n valid = find_matches(p[0], p[1], adj_list_ids, adj_list_words)\n if valid:\n all_valid.append((p, valid))\n return all_valid", "def generate_pairs_of_words(word_list):\n def pair_words(word_list, i, j, connector):\n return word_list[i] + connector + word_list[j]\n pairs = []\n n = len(word_list)\n for i in range(n-1):\n for j in range(i+1, n):\n pairs.append(pair_words(word_list, i, j, ' '))\n pairs.append(pair_words(word_list, j, i, ' '))\n pairs.append(pair_words(word_list, i, j, '-'))\n pairs.append(pair_words(word_list, j, i, '-'))\n pairs.append(pair_words(word_list, i, j, '_'))\n pairs.append(pair_words(word_list, j, i, '_'))\n pairs.append(pair_words(word_list, i, j, ''))\n pairs.append(pair_words(word_list, j, i, ''))\n outputs = list(set(pairs)) # remove duplicates\n return outputs", "def findIndicesOfStartsWithEndsWith(target, wordListCustomSorted):\n n = len(wordListCustomSorted)\n firstInd = lastInd = n // 2\n if target == firstAndLastChars(wordListCustomSorted[0]):\n firstInd = 0\n elif target < firstAndLastChars(wordListCustomSorted[0]):\n return (-1, -1)\n if target == firstAndLastChars(wordListCustomSorted[n-1]):\n lastInd = n-1\n elif target > firstAndLastChars(wordListCustomSorted[n-1]):\n return (-1, -1)\n def firstIndIsMatch(firstInd):\n if firstInd == 0:\n return True\n else:\n return firstAndLastChars(wordListCustomSorted[firstInd]) == target and \\\n firstAndLastChars(wordListCustomSorted[firstInd-1]) < target\n def lastIndIsMatch(lastInd):\n if lastInd == n-1:\n return True\n else:\n return firstAndLastChars(wordListCustomSorted[lastInd]) == target and \\\n firstAndLastChars(wordListCustomSorted[lastInd+1]) > target\n\n lb = 1\n ub = n-1\n while not firstIndIsMatch(firstInd):\n if lb >= ub:\n return (-1, -1)\n if target > firstAndLastChars(wordListCustomSorted[firstInd]):\n lb = firstInd + 1\n else:\n ub = firstInd\n firstInd = (lb + ub) // 2\n lb = firstInd\n ub = n-2\n while not lastIndIsMatch(lastInd):\n if lb > ub:\n return (-1, -1)\n if target >= firstAndLastChars(wordListCustomSorted[lastInd]):\n lb = lastInd + 1\n else:\n ub = lastInd\n lastInd = (lb + ub) // 2\n return firstInd, lastInd" ]
[ "0.75175416", "0.7488159", "0.6926305", "0.6626271", "0.6542914", "0.63874125", "0.63204014", "0.630162", "0.62365013", "0.622492", "0.611714", "0.6113025", "0.609781", "0.6046568", "0.604634", "0.6016963", "0.6004262", "0.60025907", "0.6000019", "0.59904724", "0.59786916", "0.59679914", "0.59667176", "0.59606785", "0.5943965", "0.59436744", "0.5920611", "0.5917627", "0.5886265", "0.58676785" ]
0.8192264
0
Given a string comprising of opening parentheses, closing parentheses and asterix() where could represent an opening parentheses, closing parentheses or an empty string, the function 'isBalanced()' takes in the string and determines if the string is balanced or not. It returns True if it is Balanced and False otherwise.
def isBalanced(string): assert type(string)==str if any(a not in '(*)' for a in string): raise AssertionError string = list(string) #Converts the inputted list to a string. #Loops through the list, checks for opening and closing parentheses and removes them from the list. k = 0 while True: if k>=len(string)-1 or len(string)==0: break if string[k]=='(': if ')' in string[k:]: b = string[k:].index(')') c = string.pop(k) d = string.pop(k+b-1) else: break else: k+=1 continue #Checks if the list is empty and returns True if string==[]: return True #Loops through the list, checks for opening parentheses as '*' and closing parentheses and removes them from the list. k = 0 while True: if k>=len(string)-1 or len(string)==0: break if string[k]=='*': if ')' in string[k:]: b = string[k:].index(')') c = string.pop(k) d = string.pop(k+b-1) else: break else: k+=1 continue #Checks if the list is empty and returns True if string==[]: return True #Loops through the list, checks for opening parentheses and closing parentheses as '*'and removes them from the list. k = 0 while True: if k>=len(string)-1 or len(string)==0: break if string[k]=='(': if '*' in string[k:]: b = string[k:].index('*') c = string.pop(k) d = string.pop(k+b-1) else: break else: k+=1 continue #Checks if the list is empty and returns True if string==[]: return True #Checks if the list contains only asterix and returns True. if not any(a not in '*' for a in string): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_balanced_parens(string):\n\n parens = 0\n\n for char in string:\n if char == \"(\":\n parens += 1\n elif char == \")\":\n parens -= 1\n\n if parens < 0:\n return False\n\n return parens == 0", "def balanced_parenths(string):\n balanced = 0\n for c in string:\n if c == '(':\n balanced += 1\n elif c == ')':\n balanced -= 1\n return balanced == 0", "def is_balanced(exp: str) -> bool:\n if len(exp) % 2 != 0:\n print(\"expression is not balanced because it doesn't have at least 1 closing parenthesis;\")\n print(\"its not a balanced expression; the length is odd\")\n return False\n\n if len(exp) == 0:\n print(\"empty expression, balance\")\n return True\n\n open_list = [\"[\", \"{\", \"(\"]\n closed_list = [\"]\", \"}\", \")\"]\n s = MyStack()\n for i in range(len(exp) // 2):\n if exp[i] in open_list:\n s.push(exp[i])\n\n for i in range(len(exp) // 2, len(exp)):\n close_pos = closed_list.index(exp[i])\n open_pos = open_list.index(s.pop())\n if close_pos == open_pos:\n continue\n else:\n return False\n\n return True", "def isBalanced(s):\n left = list(s)\n right = []\n\n result = 'YES'\n# breakpoint()\n while left or right: # if not empty, enter loop\n if (not left) and right:\n # left is empty but right is not empty, we have a problem\n result = 'NO'\n break\n\n else:\n x = left.pop()\n if x in set('})]'):\n right.append(x)\n elif x == '{' and not right:\n # cannot have hanging opening brackets; right.pop()\n # when right is empty generates error\n result = 'NO'\n break\n\n elif x == '{' and right[-1] == '}':\n right.pop()\n elif x == '(' and not right:\n result = 'NO'\n break\n\n elif x == '(' and right[-1] == ')':\n right.pop()\n elif x == '[' and not right:\n result = 'NO'\n break\n elif x == '[' and (right[-1] == ']'):\n right.pop()\n else:\n result = 'NO'\n break\n print(result)\n return result", "def parentheses_balanced(astr):\n\n if astr == '':\n return True\n\n start = 0\n end = len(astr) - 1\n found_open = False\n found_closed = False\n\n while True:\n\n while found_open is False:\n if astr[start] == '(':\n found_open = True\n elif astr[start] == ')':\n return 'Invalid'\n elif start >= end:\n return 'Valid'\n\n start += 1\n \n found_open = False\n\n while found_closed is False:\n if astr[end] == ')':\n found_closed = True\n elif astr[end] == '(' or end <= start:\n return 'Invalid'\n\n end -= 1\n\n found_closed = False", "def balanced_string(string):\n stack = create_stack()\n pos = 0\n while pos < len(string):\n if string[pos] in '[{(':\n stack.push(string[pos])\n elif string[pos] in ']})':\n pair = stack.pop()\n if not match(string[pos], pair):\n return False\n pos = pos+1\n #return stack.length()\n if stack.length() == 0:\n return True\n else:\n return False", "def is_balanced(content):\n\n if content.find('{') == -1:\n return False\n stack = []\n push_chars, pop_chars = '({', ')}'\n for c in content:\n if c in push_chars:\n stack.append(c)\n elif c in pop_chars:\n if not len(stack):\n return False\n else:\n stack_top = stack.pop()\n balancing_bracket = push_chars[pop_chars.index(c)]\n if stack_top != balancing_bracket:\n return False\n return not stack", "def has_balanced_parens(phrase):\n\n# result = False\n\n# open_count = 0\n# close_count = 0\n\n# for char in phrase:\n# if char == \"(\":\n# open_count += 1\n\n# if char == \")\":\n# close_count += 1\n \n# if open_count < close_count:\n# return result \n\n\n# if open_count == close_count:\n# result = True\n\n \n# #open has to come first \n# return result\n\n # pair = \"()\"\n\n # if char == \"(\": #no open parenthesis then false\n# ( ) (\n\n# )(\n #for every open parenthesis there has to be a closed parenthesis \n\n\n parenthesis = []\n result = True\n\n for char in phrase:\n if char == \"(\":\n parenthesis.append(char)\n if char == \")\" and len(parenthesis) < 1: #seeing a closed before an open \n result = False\n if char == \")\" and len(parenthesis) >= 1:\n parenthesis.pop()\n \n\n if len(parenthesis) == 0:\n return result \n\n else: \n result = False \n return result", "def BalancedBrackets(Str):\n stack = []\n for parenthesis in Str:\n if parenthesis == '{' or parenthesis == '(' or parenthesis == '[':\n stack.append(parenthesis)\n elif parenthesis == '}' or parenthesis == ')' or parenthesis == ']':\n if len(stack) < 1:\n return False\n last_item = stack.pop()\n if not Compare(last_item, parenthesis):\n return False\n if len(stack) != 0:\n return False\n\n return True", "def has_balanced_parens(string):", "def is_balanced(input_string):\n\n stack = Stack()\n for item in input_string:\n if item in MAPPING.values():\n stack.push(item)\n else:\n if stack.isEmpty():\n return False\n value = stack.pop()\n if value != MAPPING[item]:\n return False\n\n if not stack.isEmpty():\n return False\n return True", "def balanced(expstr):\n pc = 0\n for c in expstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if pc < 0:\n return False\n if pc == 0:\n return True\n else:\n return False", "def are_parens_balanced(symbols):\n\n # make a stack\n parens = Stack()\n\n for char in symbols:\n\n if char == \"(\":\n parens.push(char) # push onto stack\n\n elif char == \")\":\n if parens.is_empty():\n return False\n else:\n parens.pop() # pop from stack\n\n return parens.is_empty()", "def is_balanced(par_str):\n\n left_par = \"{[(\"\n right_par = \"}])\"\n\n if len(par_str) % 2 != 0:\n return False\n \n # prevents in index error\n if par_str[0] in right_par:\n return False\n \n while len(par_str) < 0:\n for ind, par in par_str:\n if par not in left_par and par[ind] == par[ind - 1]:\n par_str.pop(ind)\n par_str.pop(ind - 1)\n print(par_str)\n break\n else:\n return False\n return True", "def is_paired(input_string):\n\n input_array = list(input_string)\n opening_counts = {}\n closing_counts = {}\n\n for char in OPENING_CHARS:\n opening_counts[char] = input_array.count(char)\n\n for char in CLOSING_CHARS:\n closing_counts[char] = input_array.count(char)\n\n balanced = True\n\n for char in OPENING_CHARS:\n if opening_counts[char] != closing_counts[CHAR_MAP[char]]:\n balanced = False\n\n well_ordered = True\n\n if input_string != '' and input_string[0] in CLOSING_CHARS:\n well_ordered = False\n\n opened_parens = []\n\n for char in input_string:\n if char in OPENING_CHARS:\n opened_parens.append(char)\n\n # If We've logged opening parens, and now we're at a closing one...\n if char in CLOSING_CHARS and opened_parens:\n # Make sure that it matches the most-recently opened paren.\n if char != CHAR_MAP.get(opened_parens[-1], False):\n well_ordered = False\n\n # Move cursor to next most recent\n opened_parens.pop()\n\n return balanced and well_ordered", "def balanced_parentheses(self, string):\n for i in string:\n\n if i == '(' or i == '[' or i == '{':\n stack.push(i)\n\n if ((stack.peek() == '(' and i == ')') or (stack.peek() == '[' and i == ']') or (\n stack.peek() == '{' and i == '}')) and stack.size() > 0:\n stack.pop()\n continue\n\n if i == ')' or i == ']' or i == '}':\n stack.push(i)\n\n if stack.size() == 0:\n print(\"Balanced Parenthesis \")\n else:\n print(\"Parenthesis is not Balanced \")", "def test_balanced(self):\n self.assertTrue(is_closed('(())') == 0)", "def is_balanced_other_solution(exp):\n closing = [\"}\", \")\", \"]\"]\n stack = MyStack()\n for character in exp:\n if character in closing:\n if stack.is_empty():\n return False\n top_element = stack.pop()\n if character == \"}\" and top_element != \"{\":\n return False\n if character == \")\" and top_element != \"(\":\n return False\n if character == \"]\" and top_element != \"[\":\n return False\n else:\n stack.push(character)\n if not stack.is_empty():\n return False\n return True", "def checkString(self,newick):\n\t\tisBalanced=True\n\t\tstack=[]\n\t\tfoundComma=False\n\t\tfor char in newick:\n\t\t\tif char == \"(\":\n\t\t\t\tstack.append(char)\n\t\t\telif char == \",\":\n\t\t\t\tfoundComma=True\n\t\t\telif char == \")\":\n\t\t\t\tif foundComma==True:\n\t\t\t\t\tstack.pop()\n\t\t\t\t\tfoundComma=False\n\t\t\t\telse:\n\t\t\t\t\tisBalanced=False\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tpass\n\t\tif len(stack) != 0:\n\t\t\tisBalanced=False\n\t\treturn isBalanced", "def _is_balanced(node):\n\n # An empty node is always balanced\n if not node:\n return True\n\n # If the heights differ by more than 1, return False\n if abs(_height(node.left) - _height(node.right)) > 1:\n return False\n\n return _is_balanced(node.left) & _is_balanced(node.right)", "def bracketsBalance(exp):\n stk = LinkedStack()\n for ch in exp:\n if ch in ['[' '(']:\n stk.push(ch)\n elif ch in [']', ')']:\n if stk.isEmpty():\n return False\n chFromStack = stk.pop()\n if ch == ']' and chFromStack != '[' or \\\n ch == ')' and chFromStack != '(':\n return False\n return stk.isEmpty()", "def isValid(self, s: str) -> bool:\n open_brackets = [] # stack of open but not closed brackets\n for char in s:\n if char in ['(', '[', '{']:\n open_brackets.append(char)\n else:\n try:\n complement = open_brackets.pop()\n except IndexError:\n return False\n else:\n if complement + char not in ['()', '[]', '{}']:\n return False\n return not len(open_brackets)", "def validate_balanced_parenthesis(s):\n positions = [] # container for position of unbalanced parenthesis\n # counter for comments\n k = 0 # 0 = no comment range\n comment_open1 = False # comment indicator for /* */ comments\n comment_open2 = False # comment indicator for -- comments\n quote_open1 = False # quote '\n quote_open2 = False # quote \"\n for i, c in enumerate(s):\n if c == \"(\" and k == 0:\n positions.append(i)\n elif c == \")\" and k == 0:\n if len(positions) == 0:\n return {\n \"exit_code\": 1,\n \"val_lines\": find_line_number(s, [i]),\n \"total_lines\": count_lines(s)\n }\n else:\n positions.pop()\n elif (\n s[i:i+2] == \"/*\" and\n not comment_open1 and\n not comment_open2 and\n not quote_open1 and\n not quote_open2\n ): # if there is an opening comment /*\n k += 1\n comment_open1 = True\n elif (\n s[i:i+2] == \"*/\" and\n comment_open1 and\n not comment_open2 and\n not quote_open1 and\n not quote_open2\n ): # if there is a closing comment */\n k -= 1\n comment_open1 = False\n elif (\n s[i:i+2] == \"--\" and\n not comment_open1 and\n not comment_open2 and\n not quote_open1 and\n not quote_open2\n ): # if there is an opening comment --\n k += 1\n comment_open2 = True\n elif (\n (c == \"\\n\" or s[i:i+3] == \"[c]\") and\n not comment_open1 and\n comment_open2 and\n not quote_open1 and\n not quote_open2\n ): # if the -- comment ends\n k -= 1\n comment_open2 = False\n elif (\n c == \"'\" and\n not comment_open1 and\n not comment_open2 and\n not quote_open1 and\n not quote_open2\n ): # if opening quote '\n k += 1\n quote_open1 = True\n elif (\n c == \"'\" and\n not comment_open1 and\n not comment_open2 and\n quote_open1 and\n not quote_open2\n ): # if opening quote '\n k -= 1\n quote_open1 = False\n elif (\n c == '\"' and\n not comment_open1 and\n not comment_open2 and\n not quote_open1 and\n quote_open2\n ): # if opening quote '\n k += 1\n quote_open2 = True\n elif (\n c == '\"' and\n not comment_open1 and\n not comment_open2 and\n not quote_open1 and\n quote_open2\n ): # if opening quote '\n k -= 1\n quote_open2 = False\n if len(positions) == 0:\n return {\n \"exit_code\": 0,\n \"total_lines\": count_lines(s)\n }\n else:\n return {\n \"exit_code\": 1,\n \"val_lines\": find_line_number(s, positions),\n \"total_lines\": count_lines(s)\n }", "def is_matched(expression):\n\n balance = []\n\n for char in expression:\n if char == \"{\" or char == \"[\" or char == \"(\":\n balance.append(char)\n\n elif char == \"}\":\n if balance[-1] == \"{\":\n balance.pop()\n else:\n return False\n\n elif char == \"]\":\n if balance[-1] == \"[\":\n balance.pop()\n else:\n return False\n\n elif char == \")\":\n if balance[-1] == \"(\":\n balance.pop()\n else:\n return False\n\n if len(balance) == 0:\n return True", "def is_balanced(self):\n\n def _height(node):\n \"\"\"Calculate the height of a node\"\"\"\n\n if not node:\n return 0\n\n return 1 + max(_height(node.left), _height(node.right))\n\n def _is_balanced(node):\n \"\"\"Check if the subtree at a node is balanced\"\"\"\n\n # An empty node is always balanced\n if not node:\n return True\n\n # If the heights differ by more than 1, return False\n if abs(_height(node.left) - _height(node.right)) > 1:\n return False\n\n return _is_balanced(node.left) & _is_balanced(node.right)\n\n return _is_balanced(self)", "def is_balanced(root):\n return depth(root) != -1;", "def parentheses_are_uneven(input_string):\n pcounter = 0\n for char in input_string:\n if char == '(':\n pcounter += 1\n elif char == ')':\n pcounter -= 1\n if pcounter != 0:\n return False\n else:\n return True", "def isValid(self, s: str) -> bool:\n stack = []\n \n mapping = {\n \")\":\"(\",\n \"}\":\"{\",\n \"]\":\"[\"\n }\n \n for char in s:\n if char not in mapping:\n stack.append(char)\n \n else:\n top_element = stack.pop() if stack else \"#\"\n \n if mapping[char] != top_element:\n return False\n \n return not stack", "def is_valid(s):\n in_str = False\n bb = 0\n for c in s:\n if c == '(' and not in_str:\n bb += 1\n elif c == ')' and not in_str:\n bb -= 1\n if bb < 0:\n return False\n elif c == '\\\"':\n in_str = not in_str\n return bb == 0", "def brackets_match(string):\n expected = Stack()\n for character in string:\n if character == \"(\":\n expected.push(\")\")\n elif character == \"[\":\n expected.push(\"]\")\n elif character in \")]\":\n if expected and character == expected.top():\n expected.pop()\n else:\n return False\n return expected.is_empty()" ]
[ "0.76423204", "0.7475379", "0.7464924", "0.7454584", "0.73763055", "0.7281661", "0.72627336", "0.72251475", "0.7140217", "0.70825875", "0.70499676", "0.6990417", "0.68499714", "0.68152845", "0.680589", "0.65978754", "0.6583618", "0.6527759", "0.6501284", "0.64587474", "0.64300203", "0.64189386", "0.6367709", "0.6254574", "0.61018413", "0.60574716", "0.603915", "0.5982129", "0.59401864", "0.59065944" ]
0.80695677
0
add. Add a new flavor
def add(self, flavor): # check if the flavor already exist. # Note: If it does, no LookupError will be raised try: self.get(flavor.flavor_id) except LookupError: pass else: raise ValueError("A flavor with the id '%s' already exists" % flavor.flavor_id) providers = dict((p.provider_id, p.provider_url) for p in flavor.providers) args = { 'flavor_id': flavor.flavor_id, 'providers': providers } self.session.execute(CQL_CREATE, args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_add_flavor(self):\n for flavor_id, flavor in OPENSTACK_FLAVOR.items():\n self.cmd._add_flavor(flavor, flavor_id)\n ralph_flavor = CloudFlavor.objects.get(flavor_id=flavor_id)\n self.assertEqual(ralph_flavor.name, flavor['name'])\n self.assertEqual(ralph_flavor.cloudprovider, self.cloud_provider)\n self.assertIn(flavor['tag'], ralph_flavor.tags.names())\n self.assertEqual(flavor['cores'], ralph_flavor.cores)\n self.assertEqual(flavor['memory'], ralph_flavor.memory)\n self.assertEqual(flavor['disk'], ralph_flavor.disk)", "def _create_flavor(self, context, flavor):\n flavor_dict = flavor.__dict__\n name = self.prefix + flavor.name\n flavorid = self.prefix + flavor.id\n memory = flavor.ram\n vcpus = flavor.vcpus\n root_gb = flavor.disk\n ephemeral_gb = flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0)\n u_swap = flavor_dict.get('swap', 0)\n rxtx_factor = flavor_dict.get('rxtx_factor', 1.0)\n is_public = flavor_dict.get('os-flavor-access:is_public', True)\n if u_swap == \"\":\n swap = 0\n else:\n swap = int(u_swap)\n\n try:\n return flavors.create(name, memory, vcpus, root_gb,\n ephemeral_gb=ephemeral_gb,\n flavorid=flavorid, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n except exception.InstanceExists as err:\n raise err", "def flavor(self, name=None):\n raise NotImplementedError", "def flavor(self, flavor):\n self._flavor = flavor", "def create_flavor(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n flavor = conn.load_balancer.create_flavor(\n name=data['flavor']['name'],\n flavor_profile_id=data['flavor']['flavor_profile_id'],\n description=data['flavor'].get('description'),\n enabled=data['flavor'].get('enabled'),\n )\n\n return _get_sdk_object_dict(flavor)", "def add_fruit(self):\n # print('fruit added to container')", "def flavors(self, **kwargs):\n raise NotImplementedError", "def create_flavor(cls, values):\n return cls.dbdriver.create_flavor(values)", "def post(self, request):\n kwargs = {\n 'flavor': request.DATA.get('flavor')\n }\n return create_flavor(request, **kwargs)", "def test_create_flavor_existing(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))\n\n flavor_creator_2 = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor2 = flavor_creator_2.create()\n\n self.assertEqual(flavor.id, flavor2.id)", "def _insert_pvc_flavor_extraspecs(self, context, flavor, extra_specs):\n flavor_created = self._create_flavor(context, flavor)\n if extra_specs:\n self._update_flavor_extraspecs(context,\n flavor_created.get('flavorid'),\n extra_specs)", "def create_flavor(self):\n logger.debug(\"Creating VM Flavor\")\n rc, flavor_id = self.cal.create_flavor(self.account, self.flavor)\n assert rc == RwTypes.RwStatus.SUCCESS\n\n return flavor_id", "def resize(self, flavor):\n # ram size of the instance\n if type(flavor) == Flavor:\n flavor = flavor.bookmark_link\n elif type(flavor) == dict:\n flavor = self.parent.flavors().find(**flavor)\n elif type(flavor) in (int, str, unicode):\n flavor = str(flavor)\n else:\n # TODO : proper error\n raise Exception()\n\n self.client.post(self.path+'/action', { 'resize': {'flavorRef': flavor} })\n return True", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def test_create_flavor(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))", "def add(self, bento_name, bento_version):", "def _update_flavor_extraspecs(self, context, flavorid, flavor_extraspecs):\n db.flavor_extra_specs_update_or_create(context,\n flavorid,\n flavor_extraspecs)", "def update_flavor(cls, flavor_uuid, values):\n return cls.dbdriver.update_flavor(flavor_uuid, values)", "def create_flavor_profile(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.create_flavor(\n name=data['flavor_profile']['name'],\n provider_name=data['flavor_profile']['provider_name'],\n flavor_data=data['flavor_profile']['flavor_data'],\n )\n\n return _get_sdk_object_dict(flavor_profile)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def add_to_model(model, loader_module, data=None, code=None, env=None):\n parms = {MAIN: loader_module}\n parms[PY_VERSION] = PYTHON_VERSION\n if code:\n parms[CODE] = code\n if data:\n parms[DATA] = data\n if env:\n parms[ENV] = env\n return model.add_flavor(FLAVOR_NAME, **parms)", "def add_favorite(self, product_id: str, substitute_id: str) -> None:\n add_favorite_request = \"INSERT INTO substituted_product VALUES (%s, %s)\"\n self.insert(add_favorite_request, (substitute_id, product_id))", "def add_recipe(self, recipe): \n\t\tfor key, val in self.recipes_list.items():\n\t\t\tif key == recipe.recipe_type:\n\t\t\t\tself.recipes_list[key][recipe.name] = recipe", "def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))", "def add_variant(self, variant):\n self.__variants.append(variant)\n self.__variants.sort(reverse=True)", "def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)", "def add_service(self, service):\n self.app.add_service(service)", "def test_aws_service_api_flavor_get(self):\n pass", "def add_fu(self, state):\r\n self._fu_set.add(state)" ]
[ "0.66554", "0.6539999", "0.64634496", "0.6276409", "0.6224643", "0.6144736", "0.60846186", "0.6082218", "0.59546834", "0.5807027", "0.5702086", "0.56974804", "0.56030923", "0.55301595", "0.55301595", "0.5521269", "0.5513947", "0.5496889", "0.5475897", "0.5458206", "0.5424259", "0.54221284", "0.5397984", "0.53759676", "0.53627557", "0.535517", "0.53425455", "0.52657074", "0.52401155", "0.51913655" ]
0.7390688
0
Differentiate between classes and ids in the way jQuery does (id, .class)
def class_or_id(selector): if selector[0] == '.': soup_selector = 'class' elif selector[0] == '#': soup_selector = 'id' else: soup_selector = '' return [soup_selector, selector[1:]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def identify_class(self, cls):", "def CSSClasses(self):", "def choose_class(self, *args, **kwargs):", "def has_css_class(self, selector, klass):\n from selenium.webdriver.common.by import By\n\n return (\n self.selenium.find_element(\n By.CSS_SELECTOR,\n selector,\n )\n .get_attribute(\"class\")\n .find(klass)\n != -1\n )", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def get_elements_with_class(soup, elt, cls): \n return soup.findAll(elt, {'class': cls})", "def class_id(self):\n return self._class_id", "def check_class_in_element():\n nonlocal class_not_expected\n result = []\n expected_class_ls = expected_class.split(\" \")\n actual_class = element.get_attribute(\"class\")\n for class_ in expected_class_ls:\n for element_class_ in actual_class.split(\" \"):\n if element_class_ == class_:\n result.append(element)\n if len(result) == len(expected_class_ls):\n return element\n if class_not_expected is None:\n class_not_expected = actual_class\n return False", "def find_general_class(self, class_id):\n for class_ in my_classes:\n if class_.class_id == class_id:\n return class_\n\n return None", "def class_id(self) -> str:\n return self._class_id", "def class_id(self) -> str:\n return self._class_id", "def getSameClass(_class, classes, data):\n _class = int(_class)\n assert (isinstance(_class, int))\n assert (classes.shape[1] == 1)\n assert (data.shape[1] == 5)\n\n mask = classes == _class\n return data[mask.squeeze()]", "def classes(attrs):\n return attrs.get('class', '').split()", "def get_classes(html):\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace('\\xa0', ' '))]\n # return classes", "def is_same_class(obj, a_class):\n return type(obj) is a_class", "def is_same_class(obj, a_class):\n return type(obj) is a_class", "def get_class_id_list_from_class_list(self):\n tr_list = self.soup.find(\n id='main-content'\n ).table.tbody.find_all('tr')\n href_list = []\n for tr in tr_list:\n qs = urllib.parse.urlparse(tr.find(title='Edit')['href'])[4]\n id_ = urllib.parse.parse_qs(qs)['id'][0]\n href_list.append(id_)\n return href_list", "def is_same_class(obj, a_class):\n return type(obj) == a_class", "def is_same_class(obj, a_class):\n return type(obj) == a_class", "def match_classvar(typ):\n return abstract_utils.match_type_container(typ, \"typing.ClassVar\")", "def html_class(cls):\n return ' '.join(cls.html_classes)", "def is_same_class(obj, a_class):\n return (type(obj) == a_class)", "def rclass_id_lookup(cur):\n if 'rclass_id' not in _tables:\n cur.execute(\"SELECT name, rclass_id FROM rclass\")\n _tables['rclass_id'] = dict(cur)\n return _tables['rclass_id']", "def is_same_class(obj, a_class):\n return (type(obj) is a_class)", "def is_same_class(obj, a_class):\n return(type(obj) is a_class)", "def is_same_class(obj, a_class):\n return(type(obj) == a_class)", "def test_classes(self):\r\n css_classes = [\r\n ('unsubmitted', 'unanswered'),\r\n ('incomplete', 'incorrect'),\r\n ('queued', 'processing'),\r\n ('correct', 'correct'),\r\n ('test', 'test'),\r\n ]\r\n for status, classname in css_classes:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.classname, classname)", "def html_class(self):\n return '{0} {1}'.format(\n self.primary_html_class, ' '.join(self.html_classes)\n )", "def isClass(self, className):\n return self.characterClass == className or self.baseClass == className", "def has_id_field(class_or_instance: Any) -> bool:\n return hasattr(class_or_instance, _ID_FIELD_NAME)" ]
[ "0.5969814", "0.5116526", "0.5043086", "0.49965757", "0.49743137", "0.48656708", "0.48378223", "0.4804341", "0.48037136", "0.47905084", "0.47905084", "0.47669527", "0.47648126", "0.46888798", "0.46766043", "0.46766043", "0.46712658", "0.46682423", "0.46682423", "0.46661127", "0.4665625", "0.465759", "0.46542665", "0.46530622", "0.46449396", "0.46281344", "0.46134073", "0.45781925", "0.45777386", "0.45697403" ]
0.66846955
0
Convert a boolean to a color value. Used in a binding.
def _bool_to_color(value) -> int: if value is True: return RED return BLACK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bool_converter(self, bool_var):\n if bool_var == True:\n result = 1\n elif bool_var == False:\n result = 0\n return result", "def boolean(self, state, label=None):\n self.savepos()\n label = self._colorize(label, fg = \"base0\")\n\n msg = (self._colorize(\"☑\", fg = \"green\") if state else self._colorize(\"☒\", fg = \"red\")) + \" \" + label\n\n self.out.write(msg)\n self.restorepos()", "def convert_bool(self, v, t):\n return relay.const(v, type_to_np_dtype(t))", "def convert_bool(self, v, t):\n return v.asnumpy().item()", "def to_es_bool(boolean_value):\n return '1' if boolean_value else '0'", "def bool_to_python(self, value):\r\n if value == 'true':\r\n return True\r\n elif value == 'false':\r\n return False", "def convertToBoolean(boolean: bool) -> bool:\n ...", "def translate_if_c_bool(value):\n\n if value.__class__ == bool:\n if value == True:\n return \"1\"\n else:\n return \"0\"\n else:\n return value", "def convert_bool(self, v, t):\n return relay_from_scalar(v, type_to_np_dtype(t))", "def convert_boolean(cls, param, value):\r\n return True", "def convertToInt(boolean: bool) -> int:\n ...", "def getColorFlag(color):\n if color == 0: # MONO\n return 0\n elif color == 1: # BAYER\n return -1\n elif color == 2: # AS IS RBG\n return 1", "def __boolToChar(self, boolean):\n\n if boolean:\n return '1'\n else:\n return '0'", "def light(self, value: bool | int, /) -> None:", "def ret(x):\n color = true_color if x else false_color\n return np.tile(color, (SIZE, SIZE, 1)).astype(np.uint8)", "def visit_Boolean(self, node: Boolean) -> Constant:\n\n if node.token.type == TokenType.FALSE:\n return Constant(IntType(1), 0)\n else:\n return Constant(IntType(1), 1)", "def _cast_boolean(value):\n _BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False, '': False}\n value = str(value)\n if value.lower() not in _BOOLEANS:\n raise ValueError('Not a boolean: %s' % value)\n\n return _BOOLEANS[value.lower()]", "def CONST_BOOL(self, t):\n t.value = False if t.value == '#false' else True\n return t", "def _format_bool_(value):\n\n from ocgis.util.helpers import format_bool\n\n return format_bool(value)", "def convert_bool(string):\n if string == 'True':\n return True, True\n elif string == 'False':\n return True, False\n else:\n return False, False", "def bool_to_on_off(boolean: bool):\n if boolean:\n return \"on\"\n return \"off\"", "def type_cast(value):\n\t\tbool_values = {'True': True, 'False': False}\n\t\tif value in bool_values:\n\t\t\treturn bool_values[value]\n\n\t\treturn value", "def format_bool(b):\n return \"YES\" if b else \"NO\"", "def _bool_to_int(self, bool_arg):\n if bool_arg == True:\n return 1\n else:\n return 0", "def to_python(self, value):\n return force_bool(value)", "def _setForBinding (self, value):\n if not isinstance(value, bool):\n raise TypeError(value)\n self.__forBinding = value\n return value", "def bool(a):\n # Booleans need to be converted to integers for Theano\n if cf.use_theano and isinstance(a, (builtins.bool, np.bool_)):\n return np.int8(a)\n elif cf.use_theano or is_theano_object(a):\n return a\n else:\n return builtins.bool(a)", "def cast_boolean(value):\n if isinstance(value, bool):\n return value\n elif isinstance(value, str):\n value = value.lower()\n if (value == \"true\" or value == \"1\"):\n return True\n elif value == \"false\" or value == \"0\":\n return False\n else:\n raise UndefinedValueError(f\"{value} - is not a valid boolean\")\n else:\n raise UndefinedValueError(f\"{value} - is not a valid boolean\")", "def convertToByte(boolean: bool) -> int:\n ...", "def convertToString(boolean: bool) -> str:\n ..." ]
[ "0.6550001", "0.6254398", "0.6227761", "0.61124694", "0.60178113", "0.6006656", "0.5986612", "0.59549403", "0.58936", "0.5892532", "0.5844509", "0.58332866", "0.5804236", "0.5769234", "0.5754589", "0.57381105", "0.5738081", "0.5699414", "0.56867105", "0.5628775", "0.5615144", "0.55744207", "0.5555237", "0.55516136", "0.5534699", "0.552178", "0.55170053", "0.551359", "0.55081433", "0.5501862" ]
0.83635396
0
Simple integration test for example.
def test_example_runs(self): run_example( verbose=False, testapp=self.testapp, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def test_example(self):\n self.assertEqual(self.example.get_example(), True)", "def test():\n pass", "def test_basic_execution(self):", "def test(self):\n pass", "def unitary_test():", "def setUp(self):\n self.example = Example()", "def test():", "def test():", "def tests():", "def test_main():\n # Setup\n # Exercise\n # Verify", "def test_integration1(self):\n self._test_integration(1)", "def test_dummy():", "def test(ctx):\n pass", "def test_single_test_case():\n pass", "def test_something():", "def test_execute_code_sample():\n hello = Hello()\n assert hello.out() == 'Hello, world!'", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def testing(self):\n print('test successful')", "def test_examples():\n argv = [\"py.test\", \"-examples\"]\n assert get_sargs(argv) is None", "def test_api_helloworld():\n with app.test_client() as c:\n response = c.get('/')\n assert response.status_code == 200", "def test():\r\n pass", "def test_script(self) -> None:\n main()", "def testApi(self):", "def test_orchestrator_http_simple(self):\n pass", "def TestOneStep(self):\n pass", "def test_for_client():" ]
[ "0.75707936", "0.75659436", "0.7493056", "0.7339084", "0.72260743", "0.72219384", "0.7164712", "0.71493244", "0.71493244", "0.71489894", "0.7075676", "0.7047602", "0.7043749", "0.70285046", "0.6992753", "0.6974311", "0.69304705", "0.6910975", "0.6910975", "0.6910975", "0.6910975", "0.6893194", "0.6871064", "0.6867324", "0.6859304", "0.685829", "0.68577313", "0.6850064", "0.6842165", "0.68220663" ]
0.77079654
0
Dummy function to redraw figures in the children classes
def redraw_figures(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redraw(event):\n if np.size(plt.get_figlabels()):\n #Need to check if figure is closed or not and only then do the following\n #operations. Else, the following operations will create a new figure\n ax.clear()\n drawRectangle(ax)\n fig.canvas.draw()\n else:\n pass", "def redraw(self):\n raise NotImplementedError()", "def update_figure(self):\n\n self.draw()", "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure", "def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)", "def plot_refresh():\n figure.canvas.draw()", "def on_draw(self):\n # draw everything", "def refresh_self(self) -> None:\n self._logger.debug(\"running\")\n try:\n self.figure.canvas.draw()\n except Exception as e:\n self._logger.exception(\"issue with drawing canvas.\")\n self._logger.debug(\"done\")", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph()", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph(graph=self.graph, axes=self.subplot)\n self.draw_graph(graph=self.graph2, axes=self.subplot2)\n self.draw_mappings(self.mapping)", "def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()", "def paint(self):\r\n pass", "def update_plot():\n pass", "def repaint(self):\n pass", "def redraw(self):\n self._create()", "def draw(self, force=False):\n for child in self.children.values():\n child.draw(force)", "def _draw_handler(self, bpy_dummy_self, bpy_dummy_context):\r\n self._drawRays()", "def draw(self):\n for obj in self.objects:\n obj.draw()", "def on_draw(self):\n self.clear()\n self.manager.draw()", "def setDrawing(self):\n self.graph_drawing=[]", "def draw(self):", "def refresh_svg_canvas(self):\n if self.ui.tabWidget.currentIndex() == 0:\n self.ui.svg_canvas.build_schematic()\n self.ui.svg_canvas.viewport().update()\n elif self.ui.tabWidget.currentIndex() in (1,2):\n self.ui.svg_canvas.build_pcb()\n self.ui.svg_canvas.viewport().update()\n else:\n raise Exception(\"Unknown view to draw\")", "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "def setup_draw(self):\n pass", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]" ]
[ "0.69585615", "0.68990314", "0.6801519", "0.67597973", "0.6633574", "0.64148325", "0.6317707", "0.6250047", "0.6198424", "0.61798114", "0.61377364", "0.6077331", "0.60669315", "0.60655534", "0.60549927", "0.6053331", "0.60529304", "0.6050833", "0.604263", "0.6037915", "0.60376084", "0.601923", "0.5978028", "0.5971856", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.5968721", "0.5968721" ]
0.78338176
0
Scroll canvas horizontally and redraw the image
def __scroll_x(self, *args, **kwargs): self.canvas.xview(*args) # scroll horizontally self.__show_image() # redraw the image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __scroll_x(self, *args, **kwargs):\n self.canvas_image.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True", "def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))", "def scrollDown_x(self):\r\n if self.x_stack<self.img.shape[2]-1:\r\n self.x_stack+=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def redraw_canvas(self, dy):\n self.scroll = dy/20\n \n if self.scroll > 0:\n \n if self.min_cairo < -20:\n self.min_cairo = 0 \n self.min_text += 1 \n self.max_text += 1\n \n #When bottom of document is reached stop scrolling\n if self.max_text > self.line_count + 2:\n self.min_cairo = 0\n self.min_text = self.line_count - 50\n self.max_text = self.line_count\n self.scroll = 0\n \n elif self.scroll < 0:\n if self.min_cairo > 0:\n self.min_cairo = -20\n self.min_text -= 1\n self.max_text -=1\n\n #Do not scroll up if already at top of document\n if self.min_text < 0:\n self.min_cairo = 20\n self.min_text = 0\n self.max_text = 50\n self.scroll = 0\n \n #Do the scrolling\n self.min_cairo -= self.scroll\n \n self.max_cairo = self.min_cairo\n self.invalidate_canvas()", "def scrollUp_x(self):\r\n if self.x_stack>0:\r\n self.x_stack-=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5) \r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def __scroll_y(self, *args, **kwargs):\n self.canvas_image.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y", "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def __show_image(self):\n box_image = self.canvas_image.coords(self.container) # get image area\n box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas\n self.canvas_image.canvasy(0),\n self.canvas_image.canvasx(self.canvas_image.winfo_width()),\n self.canvas_image.canvasy(self.canvas_image.winfo_height()))\n self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n \n # Get scroll region box\n box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),\n max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = self.box_img_int[0]\n box_scroll[2] = self.box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = self.box_img_int[1]\n box_scroll[3] = self.box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),\n max(box_canvas[1], self.box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas_image.lower(self.imageid) # set image into background\n self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def scrollDown_y(self):\r\n if self.y_stack<self.img.shape[1]-1:\r\n self.y_stack+=1\r\n self.pixmap3=self.drawPixmap(\"xz\") \r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def mover_scroll(self, x, y):\n self.scrollx += x\n self.scrolly += y", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def move_horizontal(self):\r\n if self.movement == \"horizontal\" and self.flag_move:\r\n self.move_ball()\r\n self.canvas.after(50, self.move_horizontal)", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def horizontal_scroll(self, image, padding=True):\n\n image_list = list()\n height = image.size[1]\n\n # Scroll into the blank image.\n if padding:\n for y in range(1,17):\n section = image.crop((0, 0, 8, y))\n print section.width,section.height\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 16 - y, 8, 16))\n image_list.append(display_section)\n return image_list\n\n #Scroll across the input image.\n for y in range(16, height + 1):\n section = image.crop((0, y - 16, 8, y))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 16))\n image_list.append(display_section)\n\n #Scroll out, leaving the blank image.\n if padding:\n for y in range(height - 15, height + 1):\n section = image.crop((0, y, 8, height))\n display_section = self.create_blank_image()\n display_section.paste(section, (0, 0, 8, 15 - (y - (height - 15))))\n image_list.append(display_section)\n\n #Return the list of images created\n return image_list", "def __wheel(self, event):\n x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas_image.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def scroll(self):\n x_position = self._player.get_position()[0]\n half_screen = self._master.winfo_width() / 2\n world_size = self._world.get_pixel_size()[0] - half_screen\n\n # Left side\n if x_position <= half_screen:\n self._view.set_offset((0, 0))\n\n # Between left and right sides\n elif half_screen <= x_position <= world_size:\n self._view.set_offset((half_screen - x_position, 0))\n\n # Right side\n elif x_position >= world_size:\n self._view.set_offset((half_screen - world_size, 0))" ]
[ "0.78580177", "0.6695677", "0.6625122", "0.66034824", "0.64015204", "0.6336621", "0.62105596", "0.6134658", "0.6117314", "0.6087952", "0.60482293", "0.5974955", "0.59536266", "0.59532565", "0.5856979", "0.58364534", "0.58310264", "0.58037436", "0.5784162", "0.5746283", "0.5735475", "0.5700967", "0.56167144", "0.5609998", "0.5547529", "0.5539105", "0.55353874", "0.55134064", "0.55090874", "0.546265" ]
0.7781856
1
Scroll canvas vertically and redraw the image
def __scroll_y(self, *args, **kwargs): self.canvas.yview(*args) # scroll vertically self.__show_image() # redraw the image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __scroll_y(self, *args, **kwargs):\n self.canvas_image.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug > 5:\n print \"refresh: New image (x\", self.zoom, \") \", (self.xint, self.yint), (self.canvas[\"width\"], self.canvas[\"height\"]), [self.zoom * s for s in self.isize]\n\n scaled_isize = [self.xint[1] - self.xint[0],\n self.yint[1] - self.yint[0]]\n\n # Create the image for the canvas\n self.image = self.generator_func(self.zoom, self.xint, self.yint)\n self.canvas_image_id = self.canvas.create_image(0, 0, anchor=N+W,\n image=self.image)\n\n # Figure out where scroll bars should be and put them there.\n if self.xint[0] == 0 and int(self.isize[0] * self.zoom) == self.xint[1]:\n self.hscroll.grid_remove()\n else:\n self.hscroll.grid()\n self.hscroll.set(mapped_number(self.xint[0],\n (0, self.isize[0] * self.zoom -1),\n (0, 1)),\n mapped_number(self.xint[1] -1,\n (0, self.isize[0] * self.zoom -1),\n (0, 1)))\n if self.yint[0] == 0 and int(self.isize[1] * self.zoom) == self.yint[1]:\n self.vscroll.grid_remove()\n else:\n self.vscroll.grid()\n self.vscroll.set(mapped_number(self.yint[0],\n (0, self.isize[1] * self.zoom -1),\n (0, 1)),\n mapped_number(self.yint[1] -1,\n (0, self.isize[1] * self.zoom -1),\n (0, 1)))", "def redraw_canvas(self, dy):\n self.scroll = dy/20\n \n if self.scroll > 0:\n \n if self.min_cairo < -20:\n self.min_cairo = 0 \n self.min_text += 1 \n self.max_text += 1\n \n #When bottom of document is reached stop scrolling\n if self.max_text > self.line_count + 2:\n self.min_cairo = 0\n self.min_text = self.line_count - 50\n self.max_text = self.line_count\n self.scroll = 0\n \n elif self.scroll < 0:\n if self.min_cairo > 0:\n self.min_cairo = -20\n self.min_text -= 1\n self.max_text -=1\n\n #Do not scroll up if already at top of document\n if self.min_text < 0:\n self.min_cairo = 20\n self.min_text = 0\n self.max_text = 50\n self.scroll = 0\n \n #Do the scrolling\n self.min_cairo -= self.scroll\n \n self.max_cairo = self.min_cairo\n self.invalidate_canvas()", "def scrollDown_y(self):\r\n if self.y_stack<self.img.shape[1]-1:\r\n self.y_stack+=1\r\n self.pixmap3=self.drawPixmap(\"xz\") \r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True", "def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def __show_image(self):\n box_image = self.canvas_image.coords(self.container) # get image area\n box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas\n self.canvas_image.canvasy(0),\n self.canvas_image.canvasx(self.canvas_image.winfo_width()),\n self.canvas_image.canvasy(self.canvas_image.winfo_height()))\n self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n \n # Get scroll region box\n box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]),\n max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = self.box_img_int[0]\n box_scroll[2] = self.box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = self.box_img_int[1]\n box_scroll[3] = self.box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]),\n max(box_canvas[1], self.box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas_image.lower(self.imageid) # set image into background\n self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def __show_image(self):\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n if self.__huge and self.__curr_img < 0: # show huge image\n h = int((y2 - y1) / self.imscale) # height of the tile band\n self.__tile[1][3] = h # set the tile band height\n self.__tile[2] = self.__offset + self.imwidth * int(y1 / self.imscale) * 3\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, h) # set size of the tile band\n self.__image.tile = [self.__tile]\n image = self.__image.crop((int(x1 / self.imscale), 0, int(x2 / self.imscale), h))\n else: # show normal image\n image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid\n (int(x1 / self.__scale), int(y1 / self.__scale),\n int(x2 / self.__scale), int(y2 / self.__scale)))\n #\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n if self.last_image_id != 0:\n self.canvas.delete(self.last_image_id)\n\n self.last_image_id = imageid\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def on_configure(self, event):\n self.testCanvas.configure(scrollregion=self.testCanvas.bbox('all'))\n self.testCanvas.yview_moveto(1)", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def __scroll_x(self, *args, **kwargs):\n self.canvas_image.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def scrollDown_x(self):\r\n if self.x_stack<self.img.shape[2]-1:\r\n self.x_stack+=1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)", "def Configure_YScroll( self ):\r\n Label(self.frame_scroll).pack( side = TOP )\r\n self.yscroll = Scrollbar( self.frame_scroll )\r\n self.yscroll.config( command = self.Vertical_Scroll )\r\n self.canvas_one.config( yscrollcommand = self.Double_Expand )\r\n self.canvas_two.config( yscrollcommand = self.Double_Expand )", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def down():\n global y, canvas # y é modificado\n canvas.create_line(x, y, x, y + 10)\n y += 10", "def redraw(self):\n self.vispy_widget.canvas.update()", "def scroll_window(self):\r\n window = tkinter.Frame(self.root)\r\n scroller = tkinter.Scrollbar(self.root, orient=\"vertical\",\r\n command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=scroller.set)\r\n\r\n scroller.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((4, 4), window=window, anchor=\"nw\",\r\n tags=\"self.window\")\r\n return window" ]
[ "0.7920473", "0.70299685", "0.6990068", "0.6820892", "0.6765869", "0.6654134", "0.64846414", "0.6347093", "0.6318105", "0.6262267", "0.62320995", "0.61670595", "0.61647034", "0.6136531", "0.6127709", "0.61248255", "0.6105798", "0.60805285", "0.6061856", "0.6059932", "0.6050039", "0.6046157", "0.6028615", "0.60232615", "0.60147953", "0.5980093", "0.59374744", "0.59356236", "0.59281534", "0.57652473" ]
0.78760535
1
begin drawing a rectangle when mousebutton is pressed
def __begin_rectangle(self, event): self.start_point_rect = Point2D(self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)) global choose_rectangle if choose_rectangle: self.rectangles.append(self.canvas.create_rectangle(self.start_point_rect.x, self.start_point_rect.y, self.end_point_rect.x, self.end_point_rect.y, outline='blue', width=1, activewidth=2, fill='magenta', stipple='gray50'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False", "def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)", "def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_img, self.msg_img_rect)", "def _draw_rectangle(event, x, y, flags, params):\n global img, source_img, state\n global p1, p2\n width, height = params\n if event == cv2.EVENT_LBUTTONDOWN:\n state = True\n img = source_img.copy()\n cv2.rectangle(img, (int(x-width/2), int(y-height/2)),\n (int(x+width/2), int(y+height/2)),\n DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)\n p1 = (x, y)\n elif event == cv2.EVENT_LBUTTONUP:\n state = False\n img = source_img.copy()\n cv2.rectangle(img, (int(x-width/2), int(y-height/2)),\n (int(x+width/2), int(y+height/2)),\n DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)\n p2 = (x, y)\n if event == cv2.EVENT_MOUSEMOVE:\n if state:\n img = source_img.copy()\n cv2.rectangle(img, (int(x-width/2), int(y-height/2)),\n (int(x+width/2), int(y+height/2)),\n DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)", "def on_mouse_press(self, x, y, button):\n\n pass", "def draw(self):\n if context.click():\n self.place()", "def on_mouse_press(self, x, y, button, modifiers):\n self.add_wall()", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw_button(self):\r\n self.surface.fill(self.button_color, self.rect)\r\n self.surface.blit(self.msg_image, self.msg_image_rect)", "def drawRectangle(x, y, width, height):\n pen1.up()\n pen1.goto(x, y)\n pen1.down()\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)\n pen1.right(90)\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)", "def __mouse_release(self, event, right_click=False):\n global choose_rectangle\n if right_click:\n return\n if choose_rectangle:\n self.__finish_rectangle(event)", "def rectan_button(msg,x,y,w=100,h=100,ic=green,ac=green_bright,action=None,size=20,font='freesansbold.ttf'):\n mouse = pygame.mouse.get_pos() #pobiera pozycje myszki i zwraca x w mouse[0] i y w mouse[1]\n click = pygame.mouse.get_pressed() # click[0] lewy, click[1] srodkowy , click[2] prawy przycisk myszy \n \n #print(mouse)\n a = (x+w > mouse[0] and x < mouse[0] and y+h>mouse[1] and y < mouse[1]) #warunek na to , czy pozycja myszki jest w prostokacie przycisku\n if a: \n pygame.draw.rect(gameDisplay,ac,(x,y,w,h)) #rysuje jasniejszy prostokąt, wydaje sie ze podswietlony, gdy myszka na nim.\n \n if click[0]==1 and action!=None:\n #sleep zeby sie nie wcisnely 2 przyciski jak np. wychodzisz z opcji, a w miejscu przycisku 'back' w glownym menu jest 'start'\n time.sleep(0.1)\n action() \n else:\n pygame.draw.rect(gameDisplay,ic,(x,y,w,h)) #rysuje ciemny prostokat, jesli a nie jest prawdą\n \n\n # tutaj tworzy sie napis na srodku ekranu. \n # mozna dorzucic opcje wyboru \n textfont = pygame.font.Font('freesansbold.ttf',20)\n textsurf,textrect = text_objects(msg,textfont,black)\n textrect.center = ((x+(w/2)),(y+(h/2)))\n gameDisplay.blit(textsurf,textrect)", "def draw():", "def _press(self, event):\n self._set_cursor(True)\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if self._active_handle is None or not self._interactive:\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n # self._pressv and self._prev are deprecated but we still need to\n # maintain them\n self._pressv = v\n self._prev = self._get_data(event)\n\n if self._active_handle is None and not self.ignore_event_outside:\n # when the press event outside the span, we initially set the\n # visibility to False and extents to (v, v)\n # update will be called when setting the extents\n self._visible = False\n self.extents = v, v\n # We need to set the visibility back, so the span selector will be\n # drawn when necessary (span width > 0)\n self._visible = True\n else:\n self.set_visible(True)\n\n return False", "def button(win, text, x, y, w, h, c, hc, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(win,hc,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n action()\n else:\n pygame.draw.rect(win,c,(x,y,w,h))\n\n TextSurf, TextRect = textObject(text, rs.smallText, rs.black)\n TextRect.center = ((x+(w/2)),(y+(h/2)))\n win.blit(TextSurf, TextRect)", "def onPress(event):\r\n global rect\r\n if event.inaxes == None:\r\n return# Ignore clicks outside the axes\r\n contains, attr = rect.contains(event)\r\n if not contains:\r\n return# Ignore clicks outside the rectangle\r\n\r\n global initPos # Grab the global variable to update it\r\n initPos = [rect.get_x(), rect.get_y(), event.xdata, event.ydata]", "def draw(self, surface, offset=(0,0)):\n mouse = pg.mouse.get_pos()\n pos = mouse[0]-offset[0], mouse[1]-offset[1]\n if self.clicked:\n fill_color = pg.Color(\"white\")\n text = self.selected_text\n elif self.rect.collidepoint(pos):\n fill_color = (198, 226, 255)\n text = self.selected_text\n else:\n fill_color = self.color\n text = self.text\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(fill_color, self.rect.inflate(-2,-2))\n surface.blit(text, self.text_rect)", "def select_area(ev, x, y, _1, _2):\n global x_init, y_init, drawing, top_left, bottom_right, orig_img, img\n\n if ev == cv.EVENT_LBUTTONDOWN:\n drawing = True\n x_init = x\n y_init = y\n elif ev == cv.EVENT_MOUSEMOVE and drawing:\n draw_rect(img, x_init, y_init, x, y)\n elif ev == cv.EVENT_LBUTTONUP:\n drawing = False\n draw_rect(img, x_init, y_init, x, y)\n\n mask = prepare_mask(x, y)\n remove_from_scene(mask)", "def draw(self):\n self.menu_pointer.draw()", "def handle_mouse_press(self, event):", "def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit the button's text onto it\n self.screen.blit(self.txt_surface, self.txt_surface_rect)", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw_but(self, window):\n # draws the rectangular button\n p1 = graphics.Point(self.cen_point_x - self.width / 2, \n self.cen_point_y - self.height / 2)\n p2 = graphics.Point(self.cen_point_x + self.width / 2, \n self.cen_point_y + self.height / 2)\n self.button = graphics.Rectangle(p1, p2)\n self.button.setOutline(\"Orange\")\n self.button.draw(window)\n \n # draws the text on the button\n self.text.draw(window)", "def draw(self):\n if not self.pressed:\n #draw dialogue prompt\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ALABAMA_CRIMSON)\n arcade.draw_text(\"!\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw dialogue box\n arcade.draw_rectangle_filled(self.center_x, self.center_y, self.width, self.height, self.color)\n arcade.draw_text(self.text, self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")", "def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def keyboardInterface(event):\r\n global rect\r\n if event.key == \"right\":\r\n # Make the rectangle wider\r\n w0 = rect.get_width()\r\n rect.set_width(w0 + 1)\r\n elif event.key == \"left\":\r\n # Make the rectangle narrower\r\n w0 = rect.get_width()\r\n rect.set_width(w0 - 1)\r\n elif event.key == \"up\":\r\n # Make the rectangle shorter\r\n h0 = rect.get_height()\r\n rect.set_height(h0 - 1)\r\n elif event.key == \"down\":\r\n # Make the rectangle taller\r\n h0 = rect.get_height()\r\n rect.set_height(h0 + 1)\r\n################################################################################\r\n# The functions below here will need to be changed for use on Windows!\r\n elif event.key == \"ctrl+right\":\r\n # Make the rectangle wider - faster\r\n w0 = rect.get_width()\r\n rect.set_width(w0 + 10)\r\n elif event.key == \"ctrl+left\":\r\n # Make the rectangle narrower - faster\r\n w0 = rect.get_width()\r\n rect.set_width(w0 - 10)\r\n elif event.key == \"ctrl+up\":\r\n # Make the rectangle shorter - faster\r\n h0 = rect.get_height()\r\n rect.set_height(h0 - 10)\r\n elif event.key == \"ctrl+down\":\r\n # Make the rectangle taller - faster\r\n h0 = rect.get_height()\r\n rect.set_height(h0 + 10)\r\n\r\n rect.figure.canvas.draw()# update the plot window\r", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = GameView()\n self.window.show_view(game_view)", "def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = GameView()\n self.window.show_view(game_view)" ]
[ "0.72806805", "0.69949013", "0.6965726", "0.6957685", "0.69377804", "0.68940526", "0.6857397", "0.68302447", "0.68219036", "0.68137723", "0.6807555", "0.6778434", "0.67169297", "0.6711751", "0.667597", "0.66613656", "0.6624775", "0.6610253", "0.65589386", "0.65369856", "0.65196383", "0.6498843", "0.64928854", "0.64782184", "0.6478151", "0.64592403", "0.64575154", "0.6447767", "0.6444294", "0.6444294" ]
0.71212363
1
expand the begun rectangle
def __expand_rectangle(self, event): global choose_rectangle curX = self.canvas.canvasx(event.x) curY = self.canvas.canvasy(event.y) w, h = self.canvas.winfo_width(), self.canvas.winfo_height() if event.x > 0.9 * w: self.canvas.xview_scroll(1, 'units') elif event.x < 0.1 * w: self.canvas.xview_scroll(-1, 'units') if event.y > 0.9 * h: self.canvas.yview_scroll(1, 'units') elif event.y < 0.1 * h: self.canvas.yview_scroll(-1, 'units') # expand rectangle as you drag the mouse if choose_rectangle: self.canvas.coords(self.rectangles[-1], self.start_point_rect.x, self.start_point_rect.y, curX, curY) self.end_point_rect = Point2D(self.canvas.canvasx(event.x), self.canvas.canvasy(event.y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.rect = (self.x, self.y, self.width, self.height)", "def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y", "def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y", "def update(self):\r\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)", "def expand(self):\n self.vertices[-1, :] = self.expanded", "def update(self):\n\t\n\t\tself.rect.y += (self.settings.rectangle_speed *\n\t\t\t\tself.settings.rectangle_direction)", "def boundary(self):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.pos.x > WIDTH - 48:\n self.pos.x = WIDTH - 48\n if self.pos.y < 0:\n self.pos.y = 0\n if self.pos.y > HEIGHT - 48:\n self.pos.y = HEIGHT - 48\n\n self.rect.topleft = self.pos", "def grow(self, delta_width, delta_height):\r\n self.width += delta_width\r\n self.height += delta_height", "def rectEnlarge(rect, margin):\n return rect + np.array([-margin, margin, -margin, margin])", "def grow_rectangle(self, dwidth, dheight):\n self.width += dwidth\n self.height += dheight\n return(self.width, self.height)", "def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n\n self.life -= 1\n if self.life == 0:\n self.kill()", "def update(self):\n \n self.rect.x += self.change_x\n self.rect.y += self.change_y\n \n if self.rect.x < 0:\n self.rect.x = 0\n if self.rect.x > screen_width - 60:\n self.rect.x = screen_width - 60\n if self.rect.y < 0:\n self.rect.y = 0 \n \n if self.rect.y > screen_height - 60:\n self.rect.y = screen_height - 60", "def Grow(self):\n oldx = self.x\n oldy = self.y\n randBlue = []\n randBrown = []\n for i in range(0,4):\n if i == 0:\n self.y -= 1\n elif i == 1:\n self.x += 1\n elif i == 2:\n self.y += 1\n elif i == 3:\n self.x -= 1\n\n pix = self.display.get_at((self.x,self.y))\n if pix == Colors.A_Water:\n randBlue.append(i)\n elif pix == Colors.A_Wood:\n randBrown.append(i)\n elif pix == Colors.A_Fire or pix == Colors.A_Crazy:\n self.isAlive = False\n self.x = oldx\n self.y = oldy\n \n if len(randBlue) != 0:\n r = random.randint(0,len(randBlue)-1)\n self.facing = randBlue[r]\n self.MoveCurrentSpace()\n elif len(randBrown) != 0:\n r = random.randint(0,len(randBrown)-1)\n self.facing = randBrown[r]\n self.MoveCurrentSpace()", "def update(self):\n if self.control:\n self.rect.centery += self.__dy \n \n if self.rect.right > 1060:\n self.rect.right = 1060\n if self.rect.left < 20:\n self.rect.left = 20\n \n if self.rect.centery > 750:\n self.kill()", "def expand(self, right=0, down=0, left=0, up=0):\n self.min_col -= left\n self.min_row -= up\n self.max_col += right\n self.max_row += down", "def rebuild( self, scene = None ):\n rect = QRectF( 0, 0, self.minimumWidth(), self.minimumHeight() )\n self.setRect( rect )\n return True", "def update_rect(self):\n self._update_tiles()", "def draw_building():\n\n gerardo.penup()\n gerardo.backward(135)\n gerardo.pendown()\n gerardo.begin_fill()\n for i in range(2): # this loop draws out the rectangle for the building\n gerardo.forward(200)\n gerardo.right(90)\n gerardo.forward(100)\n gerardo.right(90)\n gerardo.end_fill()\n gerardo.hideturtle()", "def make_attack(self):\n if self.rect.y < 750:\n # if self.left:\n # self.rect.x -= 2\n # else:\n # self.rect.x += 2\n # if abs(self.rect.x - self.originx) == 150:\n # self.left = True\n # elif abs(self.rect.x - self.originx) == 0:\n # self.left = False\n self.rect.y += 5\n else: self.rect.y = self.originy", "def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height", "def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height", "def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height", "def update(self):\r\n self.rect.y += 12\r\n\r\n if self.rect.y > 500:\r\n self.rect.y = random.randrange(-1000, -60)", "def update(self):\r\n \r\n # Desplaza el bloque un píxel hacia abajo.\r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-300, -20) \r\n \r\n else:\r\n self.rect.y += 5\r\n \r\n # Si el bloque estuviera muy abajo, lo restablecemos a la parte superior de la pantalla.\r", "def move_rectangle(old_rec,dx,dy):\n new_rec = Rectangle()\n new_rec.height = old_rec.height\n new_rec.width = old_rec.width\n new_rec.corner = Point()\n new_rec.corner.x = old_rec.corner.x + dx\n new_rec.corner.y = old_rec.corner.y + dy\n return new_rec", "def update(self):\n self.x -= self.speed\n self.beam_rect.x = self.x", "def grow(self):\n td = 0 if self.travel_direction == 'left' else 1\n\n if self.body.length == 0:\n # 35 only taken away if traveling right, 40 added only if traveling left\n xpos = self.face_xcoord - 35*td - (td-1)*40\n else:\n # 35 * body length (plus initial segment) only if traveling right\n # 40 + 35 * body length only added if traveling right\n xpos = self.face_xcoord - (35*(self.body.length+1)*td) - (td-1)*(40 + 35*self.body.length)\n\n self.body.addSegment(xpos, self.face_ycoord)", "def rectStreatch(self,(x,y,xs,ys),(u,v,us,vs)):\n # do clipping now:\n \n color = Vec4(1,1,1,1)\n \n w = self.w\n h = self.h\n \n u,v,us,vs = u/w,1-v/h,(u+us)/w,1-(v+vs)/h\n \n self.drawer.rectangle( \n x,y,xs,ys,\n u,v,us-u,vs-v,\n #u/self.w,v/self.h,us/self.w,vs/self.h,\n color)", "def grow_stack_arm(top):\n if top is not None and top.name in ['sandwichtop', 'sandwichtop_no_label']:\n _bot = find_sandwich_bottom(top)\n if _bot is None:\n return\n if top.ey > 0:\n top.reset_y()\n _ty = top.spr.get_xy()[1]\n _th = top.spr.get_dimensions()[1]\n _by = _bot.spr.get_xy()[1]\n _dy = _by - (_ty + _th)\n if _dy > 0:\n top.expand_in_y(_dy / top.scale)\n top.refresh()", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)" ]
[ "0.65636665", "0.632413", "0.632413", "0.6275868", "0.626541", "0.6198104", "0.61374", "0.61107606", "0.6077449", "0.6026812", "0.60264236", "0.60256207", "0.60034686", "0.599594", "0.59890485", "0.5981123", "0.59531474", "0.5931869", "0.59192246", "0.5909494", "0.5909494", "0.5909494", "0.5907163", "0.5885315", "0.5868882", "0.58324474", "0.58185303", "0.57928675", "0.5787602", "0.57729584" ]
0.70743585
0
add some points to a polygon stored as line until finish polygon is called
def __draw_polygon(self, event, klick): global creating_polygon curX = self.canvas.canvasx(event.x) curY = self.canvas.canvasy(event.y) if not klick and len(self.polygon_points) >= 2: c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1])) distanceX = curX - c_r_x distanceY = curY - c_r_y if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15: return image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY)) self.polygon_points.extend((image_relative_x, image_relative_y)) self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2, outline='magenta', width=1, activewidth=2)) if not creating_polygon: # start with a new polygon creating_polygon = True return else: # draw a line between the last points c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3])) c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1])) self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2], fill='magenta', width=2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _finish_polygon(self):\n global undo_stack, choose_polygon\n if len(self.polygon_points) < 6:\n messagebox.showinfo(title='Info', message='Too few points for a polygon')\n return 'too_few_points'\n relative_poly_points = []\n for p in range(0, len(self.polygon_points), 2):\n relative_poly_points.extend(self.get_canvas_relative_coords((self.polygon_points[p],\n self.polygon_points[p + 1])))\n if choose_polygon:\n undo_stack.append('p')\n self.polygons.append(self.canvas.create_polygon(relative_poly_points,\n outline='blue', activewidth=3, width=1,\n fill='magenta', stipple='gray50'))\n self.canvas.tag_bind(self.polygons[-1], '<ButtonPress-1>', self.callback_click_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<ButtonRelease-1>', self.callback_release_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<B1-Motion>', self.callback_move_polygon)\n for p in self.polygon_groundstructure:\n self.canvas.delete(p)\n self.polygon_points_history[self.polygons[-1]] = np.reshape(np.asarray(self.polygon_points),\n (round(len(self.polygon_points) / 2),\n 2))\n self.polygon_points.clear()\n self.polygon_groundstructure.clear()\n self.parent_class.activate_save_bt()", "def begin_poly(self):\n self._poly = [self._position]\n self._creatingPoly = True", "def add_polyline(self, layer_to_use,poly,open):\n if type(poly) is not list:\n toplot = [poly]\n else:\n toplot = poly\n\n for y in toplot:\n\n polyline = self.msp.add_polyline2d(\n points=[],\n dxfattribs={'layer': layer_to_use['name']})\n\n if open==True:\n polyline.close(False)\n else:\n polyline.close(True)\n y = np.round(100*y)/100\n if layer_to_use['inversion']==0:\n polyline.append_vertices(y)\n else:\n polyline.append_vertices(-y)", "def _draw_polygon(self):\n xs, ys = zip(*self._xys) if self._xys else ([], [])\n self._selection_artist.set_data(xs, ys)\n self._update_box()\n # Only show one tool handle at the start and end vertex of the polygon\n # if the polygon is completed or the user is locked on to the start\n # vertex.\n if (self._selection_completed\n or (len(self._xys) > 3\n and self._xys[-1] == self._xys[0])):\n self._polygon_handles.set_data(xs[:-1], ys[:-1])\n else:\n self._polygon_handles.set_data(xs, ys)\n self.update()", "def draw_polygon(self, points, line_width, line_color, fill_color = \"\"):\n line_color, fill_color = check_color(line_color), check_color(fill_color)\n STpolygon.polygon(self.canvas, points, line_width, line_color, fill_color)", "def add_poly_edges(self, poly_item, marker_dict=None):\n\n poly = poly_item.polygon()\n\n for i in range(1, poly.size() + 1):\n if i == poly.size():\n p1 = poly.at(i - 1)\n p2 = poly.at(0)\n index = -poly.size()\n\n else:\n p1 = poly.at(i - 1)\n p2 = poly.at(i)\n index = i\n\n line = self.addLine(QLineF(p1, p2))\n line.setZValue(-1)\n display_line = self.addLine(QLineF(p1, p2), QPen(self.LUBronze, 3))\n line.__setattr__(\"localIndex\", index)\n line.setParentItem(poly_item)\n display_line.setParentItem(line)\n self.edge_list.append(line)\n\n # Used to pass markers when loading a g\n if marker_dict:\n if i - 1 in marker_dict:\n self.add_marker(display_line, marker_dict[i - 1])\n display_line.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n text = display_line.childItems()[0]\n text.setVisible(False)", "def addPoly(self,p):\n for t in triangulate(p):\n self.addTri(t)\n return self", "def draw_polygon(self, *points, color=DEFAULT.color):", "def add_poly_corners(self, poly_item, marker_dict=None):\n poly = poly_item.polygon()\n\n for i in range(poly.size()):\n point = poly.at(i)\n p = self.addEllipse(-4, -4, 8, 8, self.LUBronze, self.LUBronze)\n p.setZValue(2) # Make sure corners always in front of polygon surfaces\n p.setParentItem(poly_item)\n p.__setattr__(\"localIndex\", int(i))\n p.setPos(point.x(), point.y())\n p.setFlag(QGraphicsItem.ItemIsSelectable)\n p.setFlag(QGraphicsItem.ItemIsMovable)\n self.point_coord_list = np.append(self.point_coord_list, [[p.x(), p.y()]], axis=0)\n\n self.potential_edge_splitters.append(p)\n\n # Used to pass markers when loading a g\n if marker_dict:\n if i in marker_dict:\n self.add_marker(p, marker_dict[i])\n text = p.childItems()[0]\n text.setVisible(False)", "def add_poly_to_scene(self, polygon, point_marker_dict=None, curve_marker_dict=None, hole_mode=False):\n if hole_mode:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(255, 255, 255)))\n poly.setZValue(1)\n self.poly_list.append(poly)\n self.hole_list.append(poly)\n else:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(0, 0, 0, 50)))\n self.poly_list.append(poly)\n self.add_poly_corners(poly, point_marker_dict)\n self.add_poly_edges(poly, curve_marker_dict)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsMovable)\n return poly", "def draw_polyline(*points):\r\n global _canvas\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n #print(points)\r\n #print(len(points))\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n #print(x)\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n #print(newpoints)\r\n path = Path(*newpoints)\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)", "def drag(self, event):\n\t\tif len(self.coord_list) > 0:\n\t\t\tself.canvas.create_line(event.x, event.y, \n\t\t\t\tself.coord_list[-1][0], self.coord_list[-1][1])\n\n\t\tself.coord_list.append([event.x, event.y])\n\n\t\tpoly_list = check_contained(self.coord_list) - self.drawn_list\n\t\tfor polygon in poly_list:\t\t\t# will accidently draw this multilple times oops \n\t\t\t#self.canvas.create_polygon( self.coord_list[polygon[0]:polygon[1]], fill='black')\n\t\t\tself.drawn_list.add(polygon)", "def give_polygon(vertices, points):\n polygon = np.zeros((len(vertices), 2))\n for i, vertex in enumerate(vertices):\n polygon[i] = points[vertex]\n # End point of a polygon equals to start point\n polygon = polygon.tolist()\n if polygon[-1] != polygon[0]:\n polygon.append(polygon[0])\n return polygon", "def next_line():\r\n set_point(point().next_line())", "def proc_polyline(self, tokens):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polyline(pen=self.pen, points=pts)\n\n return component", "def draw(self):\n if len(self.__points) >= 2:\n self._total_length = 0\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n coords = self.__line_segment(p1, p2)\n if not coords is None:\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 1, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )\n coords = self.__line_cap(p2)\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 0, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )", "def _draw_line(self, event):\n if not self.obstacle_creation_mode:\n return\n\n if self.previous_coordinates is None:\n self.previous_coordinates = event.x, event.y\n self.new_obstacle.append([event.x, event.y])\n return\n\n x1, y1 = event.x, event.y\n\n if self._is_closing_shape(x1, y1, self.new_obstacle):\n x1, y1 = self.new_obstacle[0]\n else:\n self.new_obstacle.append([x1, y1])\n\n x0, y0 = self.previous_coordinates\n self.canvas.create_line(x0, y0, x1, y1, **self.LINE_OPTIONS)\n self.previous_coordinates = x1, y1", "def draw_polygon(*points):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n polygon = Polygon(*newpoints)\r\n _set_not_filled(polygon)\r\n _canvas.add(polygon)", "def add_segment(self, xpos, ypos):\n self.def_field['XY_poly'].append([xpos, ypos])\n self.def_field['count'] += 1", "def draw_shape_polygon(self, poly, xform, colour):\n pts = [xform.chain(p) for p in poly.points]\n self.canvas.polygon([(p.x, p.y) for p in pts], outline=colour)", "def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")", "def plot_startpoints( polylines, mymap ):\n map( \\\n lambda start : mymap.addpoint( start[0][0], start[0][1], \"#FF0000\") if start != [] else [],\n polylines)", "def draw_polyline(self, points, line_width, line_color):\n line_color = check_color(line_color)\n STline.line(self.canvas, points, line_width, line_color)", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component", "def drawPolygon(self,id,points,algorithm):\n if id in self.elements.keys():\n print(\"The id for the polygon has been registered! Please use another one\")\n return\n for i, p in enumerate(points):\n x=p[0]; y=self.h-1-p[1]\n try:\n self.checkInBound(x,0); self.checkInBound(y,1)\n except AssertionError as e:\n # print self.w,self.h,x,y\n print(\"Some value is out of bound! Please check your input\")\n return\n points=[(self.h-1-p[1],p[0]) for p in points]\n polygonEle=Polygon(id,points,algorithm,self.drawColor)\n self.elements[id]=polygonEle\n self.sync=False", "def addPoints(self, points):\r\n self.points = points", "def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)", "def buildLine(self):\n self.clearLineshape()\n if len(self.components)==0:\n y = np.zeros(len(self.x))\n self.lineshape = y\n else:\n '''for component in self.components:\n y = np.array([component.function(x) for x in self.x])\n self.lineshape = np.add(self.lineshape,y)'''\n self.buffer._sum()", "def plot_endpoints( polylines, mymap ):\n map( \\\n lambda start : mymap.addpoint( start[-1][0], start[-1][1], \"#0000FF\") if start != [] else [],\n polylines)", "def event_drag_multipoint_line(self, event):\n\n if self.variables.current_shape_id:\n self.show_shape(self.variables.current_shape_id)\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n coords = self.coords(self.variables.current_shape_id)\n new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.ARROW or vector_object.type == SHAPE_TYPES.LINE:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n pass" ]
[ "0.6654041", "0.65504473", "0.63987935", "0.63902766", "0.63872755", "0.6376831", "0.631317", "0.6277058", "0.62526375", "0.6227516", "0.6221252", "0.61530316", "0.6152681", "0.6141955", "0.6117791", "0.6106976", "0.60684395", "0.60385185", "0.6023373", "0.6020817", "0.60072196", "0.5997911", "0.59877807", "0.59827834", "0.5971743", "0.59644645", "0.596195", "0.5950514", "0.5921533", "0.5918254" ]
0.6825931
0
Checks if the point (x,y) is outside the image area
def outside(self, x, y): bbox = self.canvas.coords(self.container) # get image area if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]: return False # point (x,y) is inside the image area else: return True # point (x,y) is outside the image area
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outside(self, x, y):\n bbox = self.canvas_image.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:\n return False # point (x,y) is inside the image area\n else:\n return True # point (x,y) is outside the image area", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def isOutside(self, point):\n return 1-self.isInside(point)", "def is_out_of_bounds(img_height: int, img_width: int, x: float, y: float, patch_size: int) -> bool:\n patch_half_size_floored = patch_size // 2\n x_low = x - patch_half_size_floored\n x_high = x + patch_half_size_floored\n y_low = y - patch_half_size_floored\n y_high = y + patch_half_size_floored\n\n return x_low < 0 or x_high > img_width or y_low < 0 or y_high > img_height", "def isOutsideBorder(self):\n if (self.posX < -self.myGalaxy.worldWidth or self.posX > self.myGalaxy.worldWidth or\n self.posY < -self.myGalaxy.worldHeight or self.posY > self.myGalaxy.worldHeight):\n return 1\n return 0", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def isInsideImage(x, y, nx, ny, imageNx, imageNy):\r\n return ( ((x+nx) < imageNx) and ((y+ny) < imageNy) )", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def isInside(point_x, point_y, area_left, area_top, area_width, area_height):\n return (area_left <= point_x < area_left + area_width) and (area_top <= point_y < area_top + area_height)", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def __isPointOnArea(self, point, area):\r\n\r\n pointX, pointY = point\r\n areaX,areaY,areaWidth,areaHeight = area\r\n\r\n if (pointX >= areaX and pointX <= areaX+areaWidth) and (pointY >= areaY and pointY <= areaY+areaHeight):\r\n return True\r\n else:\r\n return False", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)", "def insideArea(point, area):\n x=point.real\n y=point.imag\n n = len(area)\n inside = False\n p1x = area[0].real\n p1y = area[0].imag\n for i in range(1, n + 1):\n p2x = area[i % n].real\n p2y = area[i % n].imag\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False", "def filter_point(x, y, xlower, xupper, ylower, yupper):\n ignore = False\n if (x < xlower or x > xupper or y < ylower or y > yupper):\n ignore = True\n return ignore", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def in_area(self, x, y):\n raise NotImplementedError", "def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False", "def test_inside_image(self):\r\n sample_img = Image(np.zeros((100, 200, 3), dtype=np.uint8))\r\n\r\n # test 4 corners\r\n top_left = Point(y=0, x=0)\r\n bottom_left = Point(y=99, x=0)\r\n top_right = Point(y=0, x=199)\r\n bottom_right = Point(y=99, x=199)\r\n\r\n assert top_left.inside(sample_img)\r\n assert bottom_left.inside(sample_img)\r\n assert top_right.inside(sample_img)\r\n assert bottom_right.inside(sample_img)\r\n\r\n # test out-side point\r\n pt1 = Point(y=-1, x=50)\r\n pt2 = Point(y=100, x=50)\r\n pt3 = Point(y=50, x=-1)\r\n pt4 = Point(y=50, x=200)\r\n\r\n assert not pt1.inside(sample_img)\r\n assert not pt2.inside(sample_img)\r\n assert not pt3.inside(sample_img)\r\n assert not pt4.inside(sample_img)", "def validate_in(self, xcoord, ycoord):\r\n x = int(xcoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n y = int(ycoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n if not self.tr.turn_tracker and self.tr.bd.disks[x][y].halo_tag:\r\n return True, x, y\r\n else:\r\n return False, x, y", "def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines", "def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True" ]
[ "0.8744506", "0.73789674", "0.7371134", "0.7322722", "0.7271759", "0.71677816", "0.712399", "0.70662796", "0.7022934", "0.7015038", "0.69635075", "0.69552475", "0.69520825", "0.6922031", "0.68969744", "0.6880167", "0.68586254", "0.6837809", "0.6799358", "0.67987967", "0.6782715", "0.6774802", "0.6766527", "0.6696917", "0.66968685", "0.668874", "0.66808665", "0.6679696", "0.666979", "0.6645654" ]
0.8740559
1
Start a docker container and read out its Python version.
def ping_docker(): with Docker('unittest-36', image='python:3.6') as tun: return tun.call(python_version)[:2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _get_python_version(user_image, python_binary) -> packaging.version.Version:\n\n proc = await asyncio.create_subprocess_exec(\n \"docker\",\n \"run\",\n \"--rm\",\n user_image,\n python_binary,\n \"--version\",\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL,\n )\n out, err = await proc.communicate()\n out = out.decode(\"utf-8\")\n\n python_version = re.sub(r\"^Python\\s+\", r\"\", out, re.IGNORECASE,).strip()\n python_version = packaging.version.Version(python_version)\n if python_version < packaging.version.Version(\"3.5\"):\n raise LowPException(\"\")\n return python_version", "def get_docker_version() -> Union[version.LegacyVersion, version.Version]:\n output = subprocess.check_output(\n [\"docker\", \"version\", \"--format\", \"{{json .Client.Version }}\"],\n stderr=subprocess.DEVNULL,\n ).strip()\n return version.parse(output.strip(b'\"').decode(\"utf8\"))", "def test_python_version(container, python_next_version=\"3.10\"):\n LOGGER.info(f\"Checking that python version is lower than {python_next_version}\")\n c = container.run(\n tty=True,\n command=[\"start.sh\"],\n )\n cmd = c.exec_run(\"python --version\")\n output = cmd.output.decode(\"utf-8\")\n assert \"ERROR\" not in output\n assert \"WARNING\" not in output\n actual_python_version = version.parse(output.split()[1])\n assert actual_python_version < version.parse(\n python_next_version\n ), f\"Python version shall be lower than {python_next_version}\"", "def docker_version(args): # type: (CommonConfig) -> t.Dict[str, t.Any]\n stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)\n return json.loads(stdout)", "def test_python_version(self):\n with Local() as tun:\n res = tun.call(ping_docker)\n self.assertEqual(\n res,\n [3, 6]\n )", "def docker_client():\n return docker.from_env()", "def main():\n extensions = os.getenv('EXTENSIONS', DEFAULT_EXTENSIONS).split(',')\n extensions.sort()\n docker_contents = []\n contents = travis_contents()\n data = yaml.safe_load(contents)\n\n # set the version\n php_versions = data.get('php', [DEFAULT_VERSION])\n php_version = php_versions[0]\n docker_contents.append('FROM php:{0}'.format(php_version))\n\n # ensure all the php shit exists\n\n # LC_ALL=en_US.UTF-8\n docker_contents.append('ENV DEBIAN_FRONTEND=noninteractive LC_ALL=C DOCKER=1') # noqa\n docker_contents.append('RUN apt-get update')\n docker_contents.append('RUN apt-get -qq install -qq -y php5-cli php-pear')\n # for composer\n docker_contents.append('RUN apt-get -qq install -qq -y git-core')\n # for curl\n docker_contents.append('RUN apt-get -qq install -qq -y libcurl4-openssl-dev')\n # for intl\n docker_contents.append('RUN apt-get -qq install -qq -y libicu-dev')\n\n # installs user-specified packages\n packages = os.getenv('PACKAGES', '')\n if len(os.getenv('PACKAGES', '')) > 0:\n packages = packages.split(',')\n docker_contents.append('RUN apt-get -qq install -qq -y {0}'.format(\n ' '.join(packages)\n ))\n\n for extension in extensions:\n if extension in available_extensions:\n docker_contents.append('RUN docker-php-ext-install {0}'.format(\n extension\n ))\n else:\n docker_contents.append('RUN apt-get -qq install -qq -y php5-{0} && pecl install -o -f {0} && \\\\'.format(extension))\n docker_contents.append(' rm -rf /tmp/pear && \\\\')\n if extension in ZEND_EXTENSIONS:\n docker_contents.append(' echo \"zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-{0}/xdebug.so\" > /usr/local/etc/php/conf.d/{1}.ini'.format(\n phpextension_paths[php_version],\n extension\n ))\n else:\n docker_contents.append(' echo \"extension={0}.so\" > /usr/local/etc/php/conf.d/{0}.ini'.format(extension))\n\n # ensure we have all the proper php testing stuff\n docker_contents.append('RUN \\\\')\n docker_contents.append(' curl -sSL https://phar.phpunit.de/phpunit-old.phar > phpunit.phar && \\\\')\n docker_contents.append(' curl -sS https://getcomposer.org/installer | php && \\\\')\n docker_contents.append(' mv composer.phar /usr/local/bin/composer && \\\\')\n docker_contents.append(' mv phpunit.phar /usr/local/bin/phpunit && \\\\')\n docker_contents.append(' chmod +x /usr/local/bin/composer /usr/local/bin/phpunit && \\\\')\n docker_contents.append(' phpunit --version')\n\n # set the environment\n environments = data.get('env', {'matrix': 'CI=1'}).get('matrix', [])\n docker_env = environments[0]\n docker_contents.append('ENV {0}'.format(docker_env))\n\n docker_contents.append('ADD composer.json /app/composer.json')\n docker_contents.append('WORKDIR /app')\n docker_contents.append('RUN echo \"date.timezone = UTC\" > /usr/local/etc/php/conf.d/timezone.ini') # noqa\n\n for script in data.get('before_script', []):\n docker_contents.append('RUN {0}'.format(script))\n\n docker_contents.append('ADD . /app')\n\n # HACK\n docker_contents.append('ENV COVERALLS=1 DEFAULT=1 PHPCS=1')\n\n for script in data.get('script', []):\n docker_contents.append('RUN {0}'.format(script))\n\n with open('{0}/Dockerfile'.format(os.getcwd()), 'w') as f:\n for line in docker_contents:\n f.write(\"{0}\\n\\n\".format(line))", "def detect(): # type: () -> t.Optional[DockerCommand]\n if os.environ.get('ANSIBLE_TEST_PREFER_PODMAN'):\n commands = list(reversed(DOCKER_COMMANDS))\n else:\n commands = DOCKER_COMMANDS\n\n for command in commands:\n executable = find_executable(command, required=False)\n\n if executable:\n version = raw_command([command, '-v'], capture=True)[0].strip()\n\n if command == 'docker' and 'podman' in version:\n continue # avoid detecting podman as docker\n\n display.info('Detected \"%s\" container runtime version: %s' % (command, version), verbosity=1)\n\n return DockerCommand(command, executable, version)\n\n return None", "def test_container_version_label_matches(version_container):\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n version_container.labels[\"org.opencontainers.image.version\"] == project_version\n ), \"Dockerfile version label does not match project version\"", "def _setup_python(container, util, shell):\n config_python = \"setup/project/configure_python.py\"\n\n py_ver = util.language_version(\"python3\")\n py_cont = container.fetch_and_import(config_python).run(container,\n util,\n shell,\n py_ver)\n return py_cont", "def docker_client():\n client = docker.from_env()\n return client", "def start(self):\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n\n return start", "def test_log_version(version_container):\n version_container.wait() # make sure container exited if running test isolated\n log_output = version_container.logs().decode(\"utf-8\").strip()\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n log_output == project_version\n ), f\"Container version output to log does not match project version file {VERSION_FILE}\"", "def get_client():\n info = {}\n host = os.environ.get('DOCKER_HOST')\n net_host = os.environ.get('DOCKER_NET_HOST')\n\n client_api_version = os.environ.get('DOCKER_API_VERSION')\n if not client_api_version:\n client_api_version = \"auto\"\n\n # IP to use for started containers\n if net_host:\n info['host'] = net_host\n elif host:\n info['host'] = urlparse.urlparse(host).netloc.split(':')[0]\n else:\n info['host'] = 'localhost'\n\n verify = os.environ.get('DOCKER_TLS_VERIFY') == '1'\n if verify: # use TLS\n assert_hostname = None\n cert_path = os.environ.get('DOCKER_CERT_PATH')\n if cert_path:\n client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))\n ca_cert = os.path.join(cert_path, 'ca.pem')\n else:\n client_cert = ca_cert = None\n\n tls_config = docker.tls.TLSConfig(\n client_cert=client_cert,\n ca_cert=ca_cert,\n verify=verify,\n assert_hostname=assert_hostname,\n )\n return docker.Client(base_url=host, tls=tls_config, version=client_api_version), info\n else:\n return docker.Client(base_url=host, version=client_api_version), info", "def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')", "def match_pip_to_py(pip_versions, docker_image):\n\n # First get path pythons:\n cmd = ['sudo', 'docker', 'run', '--env', \"LD_LIBRARY_PATH=''\", '--rm', '-ti',\n '--entrypoint=/bin/bash', '-v', '{}/commands:/tmp/my_commands'.format(pwd),\n docker_image, \"/tmp/my_commands/bash_crawl.sh\", 'python*']\n\n print(' '.join(cmd))\n r = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)\n r.wait()\n output = str(r.stdout.read())\n print(output)\n output = output.split('\\n')\n\n exp = \".*python([0-9]?\\.?[0-9]?[0-9]?\\.?[0-9]?[0-9]?)$\"\n py_list = []\n\n for result in output:\n m = None\n m = re.match(exp, result)\n if not m == None:\n p = pathlib.Path(result.rstrip())\n print('{} poiting to {}'.format(p, p.resolve()))\n p = p.resolve().as_posix()\n\n cmd = ['sudo', 'docker', 'run', '--env', \"LD_LIBRARY_PATH=''\", '--rm', '-ti',\n '--entrypoint={}'.format(p), docker_image, '--version']\n\n print(' '.join(cmd))\n r = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)\n r.wait()\n output = str(r.stdout.read().rstrip())\n print('python version: \"{}\"'.format(output))\n # output = str(r.stdout.read().rstrip())\n if output == '':\n continue\n\n full_python_vers = output.split()[-1]\n vlist = full_python_vers.split('.')\n if len(vlist) > 2:\n mainv = '.'.join(vlist[0:2])\n else:\n mainv = full_python_vers\n\n pair = (p, full_python_vers, mainv)\n if pair not in py_list:\n print('adding {}'.format(p))\n py_list.append(pair)\n\n pprint.pprint(py_list)\n pprint.pprint(pip_versions)\n py_2_pip = []\n\n for pip_path, pip_vers in pip_versions:\n # python_match = []\n\n # pip_dir = os.path.dirname(pip_path)\n # ANY python that matches the pip version gets added\n # No pip for your python version? No dice.\n for py_path, py_vers, main_vers in py_list:\n if main_vers == pip_vers:\n new_py2pip = (py_path, py_vers, main_vers, pip_path, pip_vers)\n if new_py2pip not in py_2_pip:\n py_2_pip.append(new_py2pip)\n\n pprint.pprint(py_2_pip)\n\n return (py_2_pip, py_list)", "def get_python_version(self, dock_image: str) -> str:\n # version string typically looks like: \"Python 3.9.2\"\n # But other variations exist such as: \"Python 3.6.9 :: Anaconda, Inc.\"\n # and for whatever reason, the conda version prints to stderr instead...\n python_version_string: str = check_output(\n [\n self.engine, 'run', '--rm', '-w', '/',\n '--entrypoint', 'python',\n dock_image,\n '--version'\n ],\n text=True,\n stderr=STDOUT\n )\n for line in python_version_string.split('\\n'):\n words = line.split()\n if not words:\n continue\n if words[0] == 'Python':\n if len(words) < 2 or not words[1][0].isdigit():\n raise PythonVersionLookupError(f'Expected \"Python x.y.z\", got \"{line}\"')\n return words[1]\n raise PythonVersionLookupError(f'Version string not found in:\\n{python_version_string}')", "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def test_docker_build(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"src/BUILD\": \"docker_image(name='test-image', image_tags=['1.0'])\",\n \"src/Dockerfile\": \"FROM python:3.8\",\n }\n )\n target = rule_runner.get_target(Address(\"src\", target_name=\"test-image\"))\n result = run_docker(rule_runner, target)\n assert len(result.artifacts) == 1\n assert len(result.artifacts[0].extra_log_lines) == 2\n assert \"Built docker image: test-image:1.0\" == result.artifacts[0].extra_log_lines[0]\n assert \"Docker image ID:\" in result.artifacts[0].extra_log_lines[1]\n assert \"<unknown>\" not in result.artifacts[0].extra_log_lines[1]", "def main():\n client = docker.from_env()\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--package_id', default='0',\n help='provide id for the work package, comma separated if multiple')\n parser.add_argument('--load_quicksave', default=\"no\", help='wanna load? -> yes/no')\n args = parser.parse_args()\n packages = args.package_id.split(\",\")\n print('Building docker container. This might take a while.')\n build_container(client)\n print('Build finished. Starting containers.')\n for package in packages:\n start_container(client, package, args.load_quicksave)\n print('Containers are running. Check Docker Dashboard for container health. Script will exit.')", "def docker_test(c, rebuild_venv=False):\n cli_tasks.docker_test.run(c, rebuild_venv)", "def test_start(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n container.stop()\n\n from dockerdb.commands.start import start\n\n start()\n\n assert container.status == \"running\"", "def handle_api_which_docker(self, http_context):\n\n try:\n self.docker = subprocess.check_output(['which', 'docker']).decode().strip()\n except subprocess.CalledProcessError as e:\n raise EndpointError(_('Docker is not installed on this host'))", "def test_version(webserver, docker_client):\n airflow_version = get_label(docker_client, 'io.astronomer.docker.airflow.version')\n version_output = webserver.check_output('airflow version')\n assert airflow_version in version_output", "def docker():\n try:\n client = docker_from_env(\n version=os.environ.get('DOCKER_API_VERSION', '1.24'))\n\n containers = []\n\n for container in client.containers.list():\n include_container = False\n if INTERESTING_CONTAINERS.search(container.name):\n include_container = True\n else:\n for tag in container.image.attrs.get('RepoTags', []):\n if INTERESTING_TAGS.match(tag):\n include_container = True\n break\n\n if not include_container:\n continue\n\n docker_metrics = {\n \"stats_type\": \"docker\",\n \"docker\": {\n \"id\": container.short_id,\n \"name\": container.name,\n \"status\": container.status,\n \"labels\": [\"%s=%s\" % (k, v)\n for k, v in container.labels.items()],\n \"tags\": container.image.attrs['RepoTags'],\n 'created': container.image.attrs['Created'],\n }\n }\n if 'version' in container.labels:\n docker_metrics['docker']['image_version'] = \\\n container.labels['version']\n containers.append(docker_metrics)\n\n except Exception as exc:\n logging.debug(\"Error gathering Docker info: %s\", exc)\n return []\n\n return containers", "def docker_build(c):\n cli_tasks.docker_build.run(c)", "def run(self, container_config: ContainerConfig) -> Container:", "def require_docker(): # type: () -> DockerCommand\n if command := get_docker_command():\n return command\n\n raise ApplicationError(f'No container runtime detected. Supported commands: {\", \".join(DOCKER_COMMANDS)}')", "async def get_docker_client(self) -> \"DockerClient\":", "def run_docker_container():\n creds = {\n 'user': 'root',\n 'password': 'root',\n 'host': '127.0.0.1',\n 'db': 'test',\n 'port': 3307\n }\n client = from_env()\n client.containers.list()\n container = client.containers.run('mariadb:10.3', name='test-mariadb-database',\n ports={3306: creds['port']}, environment={'MYSQL_ROOT_PASSWORD': creds['password'], 'MYSQL_DATABASE': creds['db']},\n detach=True)\n url = (\"mysql+pymysql://{}:{}@{}:{}/{}\".format(creds['user'], creds['password'], creds['host'], creds['port'], creds['db']))\n return url, container" ]
[ "0.6702616", "0.6522058", "0.6303183", "0.6298336", "0.62177324", "0.6161299", "0.6140888", "0.59771216", "0.59576744", "0.5911534", "0.58886117", "0.5851652", "0.57637846", "0.5735787", "0.56933355", "0.5655075", "0.56307906", "0.5624655", "0.56032175", "0.55927694", "0.557225", "0.55360645", "0.5513935", "0.549666", "0.5496628", "0.549248", "0.5491867", "0.5483217", "0.5470295", "0.5433378" ]
0.6773678
0
Infinite recursion, requiring depth limit to stop.
def recursive(): with Local() as tun: tun.call(recursive)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_depth_limit(self):\n with self.assertRaisesRegexp(\n RemoteException,\n r'.*DepthLimitExceeded: Depth limit of 2 ' +\n 'exceeded at localhost -> localhost -> localhost'):\n recursive()", "def getrecursionlimit(): # real signature unknown; restored from __doc__\n pass", "def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)", "def run(self, max_depth):\n while len(self.stack) > 0:\n state = self.get_next_state()\n\n if state.is_solution():\n self.solutions.append(state.moves)\n\n if len(state.moves) < max_depth:\n self.create_children(state)\n\n self.archive[state.get_tuple()] = len(state.moves)\n\n # sort solutions best to worst\n self.solutions.sort(key=len)\n\n if self.solutions:\n return self.solutions[0]\n\n print(\"This depth is not sufficient.\")\n return []", "def depth(self):\n return 0", "def _loop_depth(self, start, connections):\n # This is just a slightly modified breadth-first search\n visited = {start: 1}\n frontier = [start]\n\n limit = []\n while len(frontier):\n node = frontier.pop(0)\n prev_depth = visited[node]\n if prev_depth >= self.depth:\n limit.append(node)\n continue\n\n for x in connections[node]:\n if x in visited:\n continue\n visited[x] = prev_depth + 1\n frontier.append(x)\n return limit", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe() # Top frame\n # if two and three depth frames exist and if the code at the top\n # recursion and the three depth frame use the same code \n if f.f_back and f.f_back.f_back and f.f_back.f_back.f_back \\\n and f.f_back.f_back.f_back.f_code == f.f_code:\n # Break the recursion\n raise TailRecurseException(args, kwargs)\n else:\n # Here you run the frame in a try catch setup\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException as e:\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def setrecursionlimit(n): # real signature unknown; restored from __doc__\n pass", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe()\n if f.f_back and f.f_back.f_back \\\n and f.f_back.f_back.f_code == f.f_code:\n # 抛出异常\n raise TailRecurseException(args, kwargs)\n else:\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException, e:\n # 捕获异常,重新调用栈\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def factorial_recursion(n):\n pass # @todo -fix this", "def max_depth(self) -> int:\n return 0", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def tail_call_optimized(g):\n\n def func(*args, **kwargs):\n f = sys._getframe()\n # 为什么是grandparent, 函数默认的第一层递归是父调用,\n # 对于尾递归, 不希望产生新的函数调用(即:祖父调用),\n # 所以这里抛出异常, 拿到参数, 退出被修饰函数的递归调用栈!(后面有动图分析)\n if f.f_back and f.f_back.f_back and f.f_back.f_back.f_code == f.f_code:\n # 抛出异常\n raise TailRecurseException(args, kwargs)\n else:\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException as e:\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def fn(x):\n nonlocal ans \n if x < ans: \n if min(depth) == n: ans = x # all tiled\n else: \n i = min(depth)\n j = jj = depth.index(i) # (i, j)\n while jj < m and depth[jj] == depth[j]: jj += 1\n k = min(n - i, jj - j)\n for kk in reversed(range(1, k+1)): \n for jj in range(j, j+kk): depth[jj] += kk\n fn(x+1)\n for jj in range(j, j+kk): depth[jj] -= kk", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe()\n if f.f_back and f.f_back.f_back and f.f_back.f_back.f_code == f.f_code:\n raise TailRecurseException(args, kwargs)\n else:\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException as e:\n args = e.args\n kwargs = e.kwargs\n\n func.__doc__ = g.__doc__\n return func", "def at_depth_limit(self):\n return self.depth > self.depth_limit", "def go_deeper(cls, *args, **kwargs):\n\t\treturn True", "def depth_limited_search(initial_state, goal_state, limit):\n\n return recursive_dls(createRootNode(initial_state), goal_state, limit)", "def max_recursion_depth(self) -> ConfigNodePropertyInteger:\n return self._max_recursion_depth", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def depth_limited_search(problem, limit):\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path", "def zig_zag_traversal(root):\n return", "def _find_recursion_limit(parse, j=1001):\n i = 1\n while True:\n try:\n parse(('[' * i) + (']' * i))\n # Don't just catch RecursionError in case a library throws\n # something else; if it's not a recursion error, this loop\n # will quickly go to 0\n except Exception:\n j = i\n i = int(i / 2)\n if i <= 1:\n break\n else:\n if j - i <= 1:\n break\n i += int((j - i) / 2)\n return i", "def __init__(self, depth_limit=float(20)):\n\n self.root = None\n self.depth_limit = depth_limit", "def depth(self):\n raise NotImplementedError()", "def DEFAULT_MAX_DEPTH(self): # real signature unknown; restored from __doc__\n pass", "def flatten_again(collection, depth):\n collection_is_not_iterable = not hasattr(collection, '__contains__')\n depth_reached = depth==0\n if depth_reached or collection_is_not_iterable:\n return [collection]\n x = []\n for entry in collection:\n flattened = flatten_again(entry, depth-1)\n x = x + flattened\n return x" ]
[ "0.67078143", "0.66165847", "0.6310526", "0.6277235", "0.6264654", "0.62610734", "0.62260467", "0.6224823", "0.6135212", "0.607142", "0.606345", "0.60588396", "0.6054196", "0.60538924", "0.6049625", "0.5928221", "0.5916244", "0.5903939", "0.59015894", "0.5863307", "0.5840322", "0.5840271", "0.5834897", "0.58322936", "0.5828936", "0.58014446", "0.5789902", "0.5763353", "0.57461536", "0.57119715" ]
0.6727311
0
Recursive tunneling is limited by a depth limit.
def test_depth_limit(self): with self.assertRaisesRegexp( RemoteException, r'.*DepthLimitExceeded: Depth limit of 2 ' + 'exceeded at localhost -> localhost -> localhost'): recursive()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loop_depth(self, start, connections):\n # This is just a slightly modified breadth-first search\n visited = {start: 1}\n frontier = [start]\n\n limit = []\n while len(frontier):\n node = frontier.pop(0)\n prev_depth = visited[node]\n if prev_depth >= self.depth:\n limit.append(node)\n continue\n\n for x in connections[node]:\n if x in visited:\n continue\n visited[x] = prev_depth + 1\n frontier.append(x)\n return limit", "def max_recursion_depth(self) -> ConfigNodePropertyInteger:\n return self._max_recursion_depth", "def recursive():\n with Local() as tun:\n tun.call(recursive)", "def depth_limited_search(initial_state, goal_state, limit):\n\n return recursive_dls(createRootNode(initial_state), goal_state, limit)", "def max_recursion_depth(self, max_recursion_depth: ConfigNodePropertyInteger):\n\n self._max_recursion_depth = max_recursion_depth", "def max_depth(self) -> int:\n return 0", "def getrecursionlimit(): # real signature unknown; restored from __doc__\n pass", "def DEFAULT_MAX_DEPTH_DIFF(self): # real signature unknown; restored from __doc__\n pass", "def DEFAULT_MAX_DEPTH(self): # real signature unknown; restored from __doc__\n pass", "def setrecursionlimit(n): # real signature unknown; restored from __doc__\n pass", "def run(self, max_depth):\n while len(self.stack) > 0:\n state = self.get_next_state()\n\n if state.is_solution():\n self.solutions.append(state.moves)\n\n if len(state.moves) < max_depth:\n self.create_children(state)\n\n self.archive[state.get_tuple()] = len(state.moves)\n\n # sort solutions best to worst\n self.solutions.sort(key=len)\n\n if self.solutions:\n return self.solutions[0]\n\n print(\"This depth is not sufficient.\")\n return []", "def __count_recursion_depth(link_size, recursion_depth, prev_link_size, first_run):\n if not first_run:\n if link_size == prev_link_size + 1:\n recursion_depth += 1\n prev_link_size = link_size\n for i in range(1, 20):\n if link_size == prev_link_size - i:\n recursion_depth -= i\n prev_link_size = link_size\n else:\n prev_link_size = link_size\n first_run = False\n\n return [recursion_depth, prev_link_size, first_run]", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def depth_limited_search(problem, limit):\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path", "def at_depth_limit(self):\n return self.depth > self.depth_limit", "def reset_max_depth(self) -> None:\n # The max depth is now calculated on the fly, so this is a no-op.\n pass", "def __init__(self, depth_limit=float(20)):\n\n self.root = None\n self.depth_limit = depth_limit", "def depth(x):\n return max(int(x * depth_multiplier), 8)", "def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")", "def depth(self):\n return 0", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def increment_depth(self):\r\n self.depth = self.depth + 1", "def update_max_search_depth(self, depth):\n if self.max_search_depth < depth:\n self.max_search_depth = depth", "def get_max_depth(self):\n return self.MAX_DEPTH", "def depth(self):\n raise NotImplementedError()", "def depth(self, v):\n # method here", "def depth_check(self, depth):\r\n if depth >= self.ply:\r\n return True\r\n return False", "def depth(self):\n return self._max_depth", "def progressive_deepening(state, heuristic_fn=always_zero, depth_limit=INF,\n maximize=True) :\n anytime_value = AnytimeValue() # TA Note: Use this to store values.\n depth = 0\n while depth<=depth_limit-1:\n depth+=1\n best_option=minimax_search_alphabeta(state,-INF,INF, heuristic_fn=heuristic_fn,depth_limit=depth, maximize=True)\n anytime_value.set_value(best_option)\n return anytime_value" ]
[ "0.65145206", "0.6437202", "0.6319541", "0.6313615", "0.6255119", "0.620241", "0.61591923", "0.6117179", "0.6089963", "0.6020909", "0.5939277", "0.5929417", "0.5891039", "0.5885006", "0.56439644", "0.5581242", "0.55780214", "0.55770856", "0.55101794", "0.5502034", "0.55014837", "0.54859227", "0.5452928", "0.5420784", "0.53945166", "0.53593093", "0.53483063", "0.53267616", "0.5306974", "0.5295956" ]
0.66625166
0
r""" Return the busses' efficiency.
def calc_bus_efficiency(self, bus): return bus.comps.loc[self, 'char'].evaluate(self.calc_bus_expr(bus))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(self) -> float:", "def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def get_expected_cost(self):", "def cost_b(self):\n return self._cost_b", "def calculate_cost(self):\n booking_days, booking_hours = self.calculate_daily_hourly_billable_counts()\n day_cost = booking_days * Decimal(self.vehicle.type.daily_rate)\n hour_cost = booking_hours * Decimal(self.vehicle.type.hourly_rate)\n if hour_cost > self.vehicle.type.daily_rate:\n hour_cost = self.vehicle.type.daily_rate\n return float(day_cost + hour_cost)", "def topo_efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.topo_shortestpathij(i, j) == None):\n continue\n Temp += 1/self.topo_shortestpathij(i, j)\n \n self.topo_efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def compute_cost(self, chrome):\n return 1", "def cost_a(self):\n return self._cost_a", "def cost(self):\n cost = 0\n for battery in self.grid.batteries:\n for house in battery.connections:\n cost += house.distances[house.connection] * 9\n cost += battery.cost\n return cost", "def calc_slow(self, demands: Demand, routing: Routing) -> float:\n edge_capacities = [e[2]['weight'] for e in\n sorted(self.graph.edges(data=True))]\n link_utilisation = self.calc_overall_link_utilisation(demands, routing)\n # Because utilisation compared to link width is what we care about here\n ratio_capacities = np.divide(link_utilisation, edge_capacities)\n\n return np.max(ratio_capacities)", "def efficiency(self):\n if self.byte_total == 0:\n return 1\n return self.entropy() / 8", "def speed(self) -> int:", "def speed(self) -> int:", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def rate(way):\n cost = 0\n for i in range(len(way)-1):\n cost += DISTANCES[way[i]][way[i+1]]\n return cost", "def total_sdram_requirements(self):", "def cost(self):\n\t\treturn self.g + self.h", "def servicing_cost_long(self) -> float:\n return self.__servicing_cost_long", "def get_cost(self) -> float:\n return math.e / self.fitness", "def GetSpeed(self):\n pass", "def calculate_total_cost(state):\n pass", "def resource_cost(bw_matrix, standby_vr):\n #host_cpu = float(cpu_info[0][standby_vr])\n host_bw = 0\n num_rows = len(bw_matrix)\n #num_cols = len(bw_matrix[1,:])\n for port in range(num_rows):\n if bw_matrix[port][standby_vr] != '.':\n host_bw += float(bw_matrix[port][standby_vr])\n else:\n pass\n #return host_cpu * host_bw\n return host_bw", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def get_performance(self):\n return self.sim.tps", "def find_best(self):\n best_st = 0\n best_bt = 0\n best_perf = -1.1\n for bt in self.btl:\n for st in self.stl:\n if self.total[bt, st, \"perf\"] > best_perf:\n best_perf = self.total[bt, st, \"perf\"]\n best_st = st\n best_bt = bt\n return (best_perf, self.total[best_bt, best_st, \"count\"], best_bt, best_st)", "def get_ib_speed():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.get_ib_speed()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.get_ib_speed()\n return None" ]
[ "0.7053986", "0.68898183", "0.63519907", "0.63385314", "0.6318781", "0.63130236", "0.627369", "0.62707245", "0.6202245", "0.6139958", "0.6115106", "0.6112748", "0.60495883", "0.60211414", "0.5992771", "0.5992771", "0.5950813", "0.5923411", "0.5913143", "0.58888257", "0.58772916", "0.58727103", "0.5862174", "0.5851683", "0.5821937", "0.581605", "0.5816", "0.57962745", "0.579053", "0.57870096" ]
0.7440501
0
r""" Return the busses' value of the component's energy transfer.
def calc_bus_value(self, bus): b = bus.comps.loc[self] comp_val = self.bus_func(b) expr = self.calc_bus_expr(bus) if b['base'] == 'component': return comp_val * b['char'].evaluate(expr) else: return comp_val / b['char'].evaluate(expr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_energy(self):\r\n return self._energy", "def energy(self):\n return self.mc.energy(self.chain)", "def energy(self):\n return self._energy", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def get_value(self):\n return self.sensor.get_value()", "def energy(self):\n return self._accelerator.energy", "def bus_read_x(self) -> float:\n return self.dss_obj.BUSF(1, 0)", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def value(self, channel):\n if channel == 1:\n value = self.gas_turbo\n if channel == 2:\n value = self.gas_system_wrg\n if channel == 3:\n value = self.mass_spectrometer\n if channel == 4:\n value = self.gas_system_baratron\n return value", "def value(self) -> float:", "def bus_func(self, bus):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n val = i[0] * (o[2] - i[2])\n\n return val", "def bus_func(self, bus):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n val = i[0] * (o[2] - i[2])\n\n return val", "def send_energy(self) -> float:\n # Ensure that the molecule currently passes validation\n if not self.molecule_validated:\n raise Exception(\"MDI attempting to compute energy on an unvalidated molecule\")\n self.run_energy()\n properties = self.compute_return.properties.dict()\n energy = properties[\"return_energy\"]\n MDI_Send(energy, 1, MDI_DOUBLE, self.comm)\n return energy", "def _value(self):\n return self.device.value(*self._id[1:])", "def energy(energy_name: str) -> float:\n pass", "def getValue(self):\n return self.__diastolic", "def ComputeEnergyConsumption(self):\r\n pass", "def total_energy(self):\n return self._total_energy", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def bus_distance(self) -> float:\n return self.dss_obj.BUSF(5, 0)" ]
[ "0.7103761", "0.6741589", "0.6661972", "0.6645079", "0.65683067", "0.65582204", "0.6549883", "0.6479461", "0.6479461", "0.6452638", "0.6380666", "0.6349827", "0.6336084", "0.6301303", "0.62337786", "0.62261325", "0.62261325", "0.6221748", "0.6215831", "0.62015873", "0.61991996", "0.61763394", "0.6169186", "0.6149263", "0.6149263", "0.6149263", "0.61469984", "0.6129767", "0.6118308", "0.6117614" ]
0.68374914
1
r""" Propagate the fluids towards connection's target in recursion.
def propagate_fluid_to_target(self, inconn, start, entry_point=False): if not entry_point and inconn == start: return conn_idx = self.inl.index(inconn) outconn = self.outl[conn_idx] for fluid, x in inconn.fluid.val.items(): if (not outconn.fluid.val_set[fluid] and not outconn.good_starting_values): outconn.fluid.val[fluid] = x outconn.target.propagate_fluid_to_target(outconn, start)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def propagate_fluid_to_target(self, inconn, start, entry_point=False):\n return", "def _propagate_step(self):\n\n # optical depth to next interaction\n self.tau = -np.log(self.RNG.rand(self.N_active))\n # optical depth to sphere edge\n self.tau_edge = np.sqrt(self.tau_sphere**2 - self.tau_i**2 *\n (1. - self.mu_i**2)) - self.tau_i * self.mu_i\n\n # identify packets that escape\n self.esc_mask = self.tau_edge < self.tau\n # update number of escaping packets\n self.N_esc += self.esc_mask.sum()\n\n # identify interacting packets\n self.nesc_mask = np.logical_not(self.esc_mask)\n\n # decide which interacting packets scatter and which get absorbed\n self.abs_mask = self.RNG.rand(self.nesc_mask.sum()) >= self.albedo\n self.scat_mask = np.logical_not(self.abs_mask)\n\n # select properties of scattering packets\n self.tau = self.tau[self.nesc_mask][self.scat_mask]\n self.tau_i = self.tau_i[self.nesc_mask][self.scat_mask]\n self.mu_i = self.mu_i[self.nesc_mask][self.scat_mask]\n\n # update number of active packets\n self.N_active = self.scat_mask.sum()\n\n # update properties (position in optical depth space, propagation\n # direction) of scattering packets\n self.tau_i = np.sqrt(self.tau_i**2 + self.tau**2 +\n 2. * self.tau * self.tau_i * self.mu_i)\n self.mu_i = 2 * self.RNG.rand(self.N_active) - 1.", "def forward(self, output, target):\n raise NotImplementedError", "def update_target(self):\n pass", "def forward(self, target, ref, gain=1):\n x = torch.cat((target, ref), 1)\n x = self.body(x) * gain\n return x", "def send_one(self, target):\n # NOTE: Variable nodes in self.neighbors are in same order as in the\n # factor table tuples.\n target_index = self.neighbors.index(target)\n msg = -np.Inf * np.ones(len(target.domain))\n for comb, fvalue in self.table.items():\n s = 0\n for i, vnode in enumerate(self.neighbors):\n if vnode != target:\n s += self.received[vnode][comb[i]]\n s += fvalue\n msg[comb[target_index]] = np.logaddexp(msg[comb[target_index]], s)\n target.receive(self, msg)", "def arrived_at_target(cls, ctx, user, target):\n return", "def send_one(self, target):\n msg = np.zeros(len(self.domain))\n for fnode in self.neighbors:\n if fnode != target:\n msg += self.received[fnode]\n target.receive(self, normalize(msg))", "def propagate_fluid_to_source(self, outconn, start, entry_point=False):\n return", "def __changeroot(self):\n if self.edges[self.best_edge].get_status() == EdgeStatus.branch:\n self.__edge_stub(self.best_edge, Message.changeroot)\n else:\n # changeroot received by father node\n self.__edge_stub(self.best_edge, Message.connect, [self.level])\n self.__change_edge_status(self.best_edge, EdgeStatus.branch)", "def set_target(self, target, useAvoidance=False, verbose=False):\n self.logger.info(\"Deprecated function set_target called. Please call head_direction.\")\n self.head_direction(self, target, useAvoidance, verbose)", "def backPropagate(self, targets):\n target = np.array(targets)\n output_deltas = -(target - self.ao)\n\n error = output_deltas.dot(self.wo.T)\n hidden2_deltas = dtanh(self.ah2) * error\n\n error = hidden2_deltas.dot(self.wh.T)\n hidden1_deltas = dtanh(self.ah1) * error\n\n ############output ----> hidden_2##############\n change = output_deltas.T.dot(self.ah2).T\n self.wo -= (self.learning_rate * change) + (self.co * self.momentum)\n self.co = change\n ############hidden_2 ----> hidden_1##############\n change = hidden2_deltas.T.dot(self.ah1).T\n self.wh -= (self.learning_rate * change) + (self.ch * self.momentum)\n self.ch = change\n ############hidden_1 ----> input##############\n change = hidden1_deltas.T.dot(self.ai).T\n self.wi -= (self.learning_rate * change) + (self.ci * self.momentum)\n self.ci = change\n\n return np.mean(-output_deltas)", "def forward(self, target, ref, gain=1):\n flow_c = self.coarse_flow(torch.cat((ref, target), 1))\n wc = self.warp_c(ref, flow_c[:, 0], flow_c[:, 1])\n flow_f = self.fine_flow(torch.cat((ref, target, flow_c, wc), 1)) + flow_c\n flow_f *= gain\n return flow_f", "def become_target(self):\n\t\traise NotImplementedError", "def target(self):", "def backPropagate(self, targets):\n if len(targets) != self.output:\n raise ValueError('NOT MATCH ERROR -- output number!')\n\n # calculate error terms for output\n # the delta tell you which direction to change the weights\n output_deltas = [0.0] * self.output\n for k in range(self.output):\n error = -(targets[k] - self.ao[k])\n output_deltas[k] = dsigmoid(self.ao[k]) * error\n\n # calculate error terms for hidden\n # delta tells you which direction to change the weights\n hidden_deltas = [0.0] * (self.hidden - 1)\n for j in range(self.hidden - 1):\n error = 0.0\n for k in range(self.output):\n error += output_deltas[k] * self.wo[j][k]\n hidden_deltas[j] = dsigmoid(self.ah[j]) * error\n\n # update the weights connecting hidden to output\n for j in range(self.hidden):\n for k in range(self.output):\n change = output_deltas[k] * self.ah[j]\n self.wo[j][k] -= self.learning_rate * change + self.co[j][k] * self.momentum\n self.co[j][k] = change\n\n # update the weights connecting input to hidden\n for i in range(self.input):\n for j in range(self.hidden - 1):\n change = hidden_deltas[j] * self.ai[i]\n self.wi[i][j] -= self.learning_rate * change + self.ci[i][j] * self.momentum\n self.ci[i][j] = change\n\n # calculate error\n error = 0.0\n for k in range(len(targets)):\n error += 0.5 * (targets[k] - self.ao[k]) ** 2\n return error", "def _fledgling_move(self,fledgling,parent_hex):\n options = fledgling.get_move_options(parent_hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options, weights=[x.quality**2 for x in options])\n fledgling.move(target[0])", "def forward(self)->None:", "def pearlmutter_forward_pass(self, inputs, unflattened_labels, feature_sequence_lens, direction, batch_size, hiddens=None, outputs=None, model=None, check_gradient=False, stop_at='output'): #need to test\n \n if model == None:\n model = self.model\n if hiddens == None or outputs == None:\n outputs, hiddens = self.forward_pass(inputs, feature_sequence_lens, model, return_hiddens=True)\n \n architecture = self.model.get_architecture()\n max_sequence_observations = inputs.shape[0]\n num_hiddens = architecture[1]\n num_sequences = inputs.shape[1]\n num_outs = architecture[2]\n hidden_deriv = np.zeros((max_sequence_observations, num_sequences, num_hiddens))\n output_deriv = np.zeros((max_sequence_observations, num_sequences, num_outs))\n# if stop_at == 'loss':\n# loss_deriv = np.zeros(output_deriv.shape)\n \n #propagate hiddens\n# print model.init_hiddens.shape\n \n hidden_deriv[0,:,:] = (self.forward_layer(inputs[0,:], direction.weights['visible_hidden'], direction.bias['hidden'], \n model.weight_type['visible_hidden'], prev_hiddens=model.init_hiddens, \n hidden_hidden_weights=direction.weights['hidden_hidden']) \n + np.dot(direction.init_hiddens, model.weights['hidden_hidden'])) * hiddens[0,:,:] * (1 - hiddens[0,:,:])\n linear_layer = (self.weight_matrix_multiply(hiddens[0,:,:], direction.weights['hidden_output'], \n direction.bias['output']) +\n np.dot(hidden_deriv[0,:,:], model.weights['hidden_output']))\n if stop_at == 'linear':\n output_deriv[0,:,:] = linear_layer\n elif stop_at == 'output':\n output_deriv[0,:,:] = linear_layer * outputs[0,:,:] - outputs[0,:,:] * np.sum(linear_layer * outputs[0,:,:], axis=1)[:,np.newaxis]\n# if stop_at == 'loss':\n# output_deriv[model.num_layers+1] = -np.array([(hidden_deriv[model.num_layers][index, labels[index]] / hiddens[model.num_layers][index, labels[index]])[0] for index in range(batch_size)])\n for sequence_index in range(1, max_sequence_observations):\n sequence_input = inputs[sequence_index,:]\n hidden_deriv[sequence_index,:,:] = (self.forward_layer(sequence_input, direction.weights['visible_hidden'], direction.bias['hidden'], \n model.weight_type['visible_hidden'], prev_hiddens=model.init_hiddens, \n hidden_hidden_weights=direction.weights['hidden_hidden']) \n + np.dot(hidden_deriv[sequence_index-1,:,:], model.weights['hidden_hidden'])) * hiddens[sequence_index,:,:] * (1 - hiddens[sequence_index,:,:])\n linear_layer = (self.weight_matrix_multiply(hiddens[sequence_index,:,:], direction.weights['hidden_output'], \n direction.bias['output']) +\n np.dot(hidden_deriv[sequence_index,:,:], model.weights['hidden_output']))\n #find the observations where the sequence has ended, \n #and then zero out hiddens and outputs, so nothing horrible happens during backprop, etc.\n zero_input = np.where(feature_sequence_lens <= sequence_index)\n hidden_deriv[sequence_index,zero_input,:] = 0.0\n output_deriv[sequence_index,zero_input,:] = 0.0\n if stop_at == 'linear':\n output_deriv[sequence_index,:,:] = linear_layer\n else:\n output_deriv[sequence_index,:,:] = linear_layer * outputs[sequence_index,:,:] - outputs[sequence_index,:,:] * np.sum(linear_layer * outputs[sequence_index,:,:], axis=1)[:,np.newaxis]\n# if stop_at == 'loss':\n# loss_deriv[sequence_index,:,:] = -np.array([(hidden_deriv[model.num_layers][index, labels[index]] / hiddens[model.num_layers][index, labels[index]])[0] for index in range(batch_size)])\n if not check_gradient:\n return output_deriv, hidden_deriv\n #compare with finite differences approximation\n else:\n epsilon = 1E-5\n if stop_at == 'linear':\n calculated = output_deriv\n finite_diff_forward = self.forward_pass(inputs, model = model + direction * epsilon, linear_output=True)\n finite_diff_backward = self.forward_pass(inputs, model = model - direction * epsilon, linear_output=True)\n elif stop_at == 'output':\n calculated = output_deriv\n finite_diff_forward = self.forward_pass(inputs, model = model + direction * epsilon)\n finite_diff_backward = self.forward_pass(inputs, model = model - direction * epsilon)\n# elif stop_at == 'loss':\n# calculated = hidden_deriv[model.num_layers + 1]\n# finite_diff_forward = -np.log([max(self.forward_pass(inputs, model = model + direction * epsilon).item((x,labels[x])),1E-12) for x in range(labels.size)]) \n# finite_diff_backward = -np.log([max(self.forward_pass(inputs, model = model - direction * epsilon).item((x,labels[x])),1E-12) for x in range(labels.size)]) \n for seq in range(num_sequences):\n finite_diff_approximation = ((finite_diff_forward - finite_diff_backward) / (2 * epsilon))[:,seq,:]\n print \"At sequence\", seq\n print \"pearlmutter calculation\"\n print calculated[:,seq,:]\n print \"finite differences approximation, epsilon\", epsilon\n print finite_diff_approximation\n sys.exit()", "def faceTowards(self, target):\n current_tile = self.current_tile()\n if(target and current_tile):\n x_dist = target.coordinates()[0] - current_tile.coordinates()[0]\n if x_dist == 0: return\n self.direction_val = x_dist/abs(x_dist)\n #TEMP\n if self.direction_val == -1:\n self.direction_id = 'left'\n if self.direction_val == 1:\n self.direction_id = 'right'", "def passing_message(target, message):\n \n shortest_path = strong_peer_graph.findShortestPath(STRONG_PEER_ID,target)\n next_node = shortest_path[1]\n\n send_message(neighbor_strong_peer_sockets[next_node],'', message)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def forward(self):\n pass", "def forward(self):\n pass", "def trigger(self, target: \"pwncat.target.Target\") -> \"pwncat.manager.Session\":", "def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()", "def set_target_relative(blendshape, target, base):\n target = pm.PyNode(target)\n base = pm.PyNode(base)\n\n blendshape = pm.PyNode(blendshape)\n intermediate = pm.createNode('mesh')\n in_mesh = blendshape.input[0].inputGeometry.listConnections(plugs=True)[0]\n in_mesh >> intermediate.inMesh\n intermediate.outMesh.get(type=True) # this force mesh evaluation\n in_mesh // intermediate.inMesh\n\n selection_list = om2.MSelectionList()\n selection_list.add(target.name())\n selection_list.add(base.name())\n selection_list.add(intermediate.name())\n\n target_fn_mesh = om2.MFnMesh(selection_list.getDagPath(0))\n target_points = target_fn_mesh.getPoints()\n base_points = om2.MFnMesh(selection_list.getDagPath(1)).getPoints()\n intermediate_points = om2.MFnMesh(selection_list.getDagPath(2)).getPoints()\n selection_list.clear()\n\n i = 0\n while (i < len(target_points)):\n intermediate_points[i] = (\n intermediate_points[i] + (target_points[i] - base_points[i]))\n i += 1\n\n target_fn_mesh.setPoints(intermediate_points)\n target_fn_mesh.updateSurface()\n pm.delete(intermediate.getParent())", "def chase(self, target):\n linear_dist = lambda x1, x2, y1, y2: math.sqrt((x1 - x2)**2 + \n (y1 - y2)**2)\n min_dist_to_target = linear_dist(self.x, target.x, \n self.y, target.y)\n possible_posn = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n move_to_make = None\n\n for posn in possible_posn:\n if (self.x + posn[0] == self.handler.player.x and \n self.y + posn[1] == self.handler.player.y and \n self.handler.game_state != data.DEAD):\n dmg = self.deal_damage(self.handler.player)\n\n if dmg:\n self.handler.message_box.add_msg(\"{} attacks you for {} damage!\".format(self.name, dmg), \n data.COLOURS['mob_atk_text'])\n else:\n self.handler.message_box.add_msg(\"{} missed!\".format(self.name), \n data.COLOURS['mob_atk_text'])\n\n if self.handler.game_state == data.DEAD:\n self.handler.message_box.add_msg(\"{} killed you!\".format(self.name),\n data.COLOURS['player_die_text'])\n elif not self.handler.world.is_solid(self.x + posn[0], self.y + posn[1]):\n new_dist = linear_dist(self.x + posn[0], target.x,\n self.y + posn[1], target.y)\n if new_dist < min_dist_to_target:\n min_dist_to_target = new_dist\n move_to_make = posn\n\n if move_to_make:\n self.move(move_to_make[0], move_to_make[1])", "def _downward(self, root=0):\n\n if self._verbosity > 0:\n print(\"sending messages towards the leaf nodes\", end=\"\", flush=True)\n ready_to_send = set([root])\n while len(ready_to_send) > 0:\n current = ready_to_send.pop()\n self.clique_beliefs[current] = self._calc_message(current, self.children[current], False)\n ready_to_send.update(self.children[current])\n if self._verbosity > 0:\n print(\".\", end=\"\", flush=True)\n if self._verbosity > 0:\n print(\"\", end=\"\\n\", flush=True)", "def forward_pass(self):" ]
[ "0.64130163", "0.55329174", "0.5395205", "0.53396946", "0.5322844", "0.5240561", "0.5212087", "0.5199893", "0.51931745", "0.5155537", "0.51272595", "0.5123761", "0.51091444", "0.51039255", "0.5070662", "0.5069562", "0.505843", "0.50526226", "0.50483257", "0.5047778", "0.5045817", "0.5036694", "0.5023471", "0.5023471", "0.5019052", "0.5016927", "0.50055295", "0.49880347", "0.49795642", "0.49754766" ]
0.58511007
1
r"""Check parameter value limits.
def check_parameter_bounds(self): for p in self.variables.keys(): data = self.get_attr(p) if isinstance(data, dc_cp): if data.val > data.max_val + err: msg = ( 'Invalid value for ' + p + ': ' + p + ' = ' + str(data.val) + ' above maximum value (' + str(data.max_val) + ') at component ' + self.label + '.') logger.warning(msg) elif data.val < data.min_val - err: msg = ( 'Invalid value for ' + p + ': ' + p + ' = ' + str(data.val) + ' below minimum value (' + str(data.min_val) + ') at component ' + self.label + '.') logger.warning(msg) elif isinstance(data, dc_cc) and data.is_set: expr = self.get_char_expr(data.param, **data.char_params) data.char_func.get_domain_errors(expr, self.label) elif isinstance(data, dc_gcc) and data.is_set: for char in data.elements: char_data = self.get_attr(char) expr = self.get_char_expr( char_data.param, **char_data.char_params) char_data.char_func.get_domain_errors(expr, self.label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def check_value(self, pos):\n if self.limits is not None:\n low, high = self.limits\n if low != high and not (low <= pos <= high):\n raise ValueError(\"{} outside of user-specified limits\" \"\".format(pos))\n else:\n self.setpoint.check_value(pos)", "def _check_paramrange(value, parameter):\n\n if parameter not in PARAMETER_RANGES.keys():\n raise ValueError('parameter {} not found in dictonary {}'\n .format(parameter, PARAMETER_RANGES))\n ranges = PARAMETER_RANGES[parameter]\n lo = ranges[0]\n hi = ranges[1]\n INRANGE = True\n if not (lo <= value < hi):\n INRANGE = False\n\n return INRANGE, lo, hi", "def check_the_parameters(parameter, name, type_of_parameter, min_value=None, max_value=None):\n # check the type\n if not isinstance(parameter, type_of_parameter):\n raise ValueError('%s must be of %s type. '\n '%s of type %s was passed.'\n % (name ,type_of_parameter,parameter, type(parameter)))\n # check the values\n if min_value:\n if parameter < min_value:\n raise ValueError('%s should be bigger than %s'\n % (name ,min_value))\n if max_value:\n if parameter > max_value:\n raise ValueError('%s should be smaller than %s'\n % (name ,max_value))", "def _validate_value(self, value):\n if self.limits[0] <= value <= self.limits[1]:\n return True\n else:\n return False", "def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def __check_args_val(self):\n if self.__min_range < 0:\n error_msg = \"min_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < 0:\n error_msg = \"max_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < self.__min_range:\n error_msg = \"max_range must be greater than or equal to min_range\"\n raise ValueError(error_msg)", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def check_limits(self):\n\n too_hi = False\n too_lo = False\n if is_empty(self.value):\n return None\n\n # Check for limit violations.\n if is_valid(self.limit_lower):\n too_lo = (self.value < self.limit_lower)\n if is_valid(self.limit_upper):\n too_hi = (self.value > self.limit_upper)\n\n # If a limit is violated, return the designated limit action.\n if too_hi or too_lo:\n return self.limit_action\n else:\n return None", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def check_parameter(cls, par: str, value):\n\n global dtParameterDesc\n if par not in dtParameterDesc:\n return False\n\n pardata = dtParameterDesc[par]\n\n if isinstance(value, str):\n try:\n value = float(value.replace(',', '.')) * dtg.units[pardata['dunit']]['multiple']\n except ValueError:\n return False\n\n if pardata['type'] is Integral and value != int(value):\n return False\n\n if 'uplim' in pardata and (value > pardata['uplim'] or value < pardata['lowlim']):\n return False\n\n return True", "def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")", "def bounds_check(session):\n\n max_ = session.field.opts.max\n min_ = session.field.opts.min\n\n if max_ is not None and len(session.data) > max_:\n raise session.field.invalid(error_type='out_of_bounds')\n if min_ is not None and len(session.data) < min_:\n raise session.field.invalid(error_type='out_of_bounds')\n\n return session.data", "def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit", "def check_binning_parameter_range(x_min, x_max, ws_unit):\n if ws_unit == 'dSpacing' and not 0 < x_min < x_max < 20:\n # dspacing within (0, 20)\n x_range_is_wrong = True\n elif ws_unit == 'TOF' and not 1000 < x_min < x_max < 1000000:\n # TOF within (1000, 1000000)\n x_range_is_wrong = True\n elif ws_unit != 'dSpacing' and ws_unit != 'TOF':\n raise NotImplementedError('Impossible case for unit {}'.format(ws_unit))\n else:\n # good cases\n x_range_is_wrong = False\n\n if x_range_is_wrong:\n ero_msg = 'For {0}, X range ({1}, {2}) does not make sense' \\\n ''.format(ws_unit, x_min, x_max)\n print('[ERROR CAUSING CRASH] {}'.format(ero_msg))\n raise RuntimeError(ero_msg)\n\n return", "def validMisc(self, p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity):\n checks = 0;\n for val in [p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity]:\n if val is None:\n checks += 1;\n #check p_lowerRateLim\n if not (p_lowerRateLim is None):\n for valid in frange(30,50,5):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n for valid in frange(51,90,1):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n for valid in frange(95,175,5):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n #check p_upperRateLim\n if not (p_upperRateLim is None):\n for valid in frange(50,175,5):\n if p_upperRateLim == valid: #and p_upperRateLim >= p_lowerRateLim: #need to implement\n checks += 1;\n break\n if not (p_modulationSensitivity is None):\n if p_modulationSensitivity > 0 and p_modulationSensitivity <= 16:\n if p_modulationSensitivity is int:\n checks += 1;\n if not (p_fixedAVDelay is None):\n for valid in frange(70,300,10):\n if valid == p_fixedAVDelay:\n checks += 1\n if checks == 4:\n return True\n return False", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def validateVoltage( self, name, voltage ):\n channel = self.d[name]\n (MIN,MAX) = channel.limits\n if not MIN <= voltage <= MAX: raise Exception('Invalid voltage {}'.format(voltage))", "def _validate_value(self, value):\n return (self.maximum_value is None) or (value <= self.maximum_value)", "def _withinRangeChecker(entity, params):\n\n for constraint in constraints:\n type = constraint.get('type')\n field = constraint.get('field')\n\n if not type or not field:\n raise ProtocolError()\n\n min_value = constraint.get('min_value', 0)\n max_value = constraint.get('max_value', 1)\n\n if type == 'size':\n value = entity.__getattribute__(field)\n if len(value) < min_value or len(value) > max_value:\n return False\n else:\n raise ProtocolError()\n \n return True", "def getIsValidParameters(self):\n isVolumValid = self.volume > 0\n if not isVolumValid:\n return isVolumValid, 'Invalid input volume'\n\n if not self.lowerLimitPressureInspiratory <= self.pressureInspiratory <= self.higherLimitPressureInspiratory:\n return False, f'Selected inspiratory value must be between {self.lowerLimitPressureInspiratory} cmH20 ' \\\n f'and {self.higherLimitPressureInspiratory} cmH2O.'\n\n if not self.lowerLimitPressureExpiratory <= self.pressureExpiratory <= self.higherLimitPressureExpiratory:\n return False, f'Selected expiratory value must be between {self.lowerLimitPressureExpiratory} cmH20 ' \\\n f'and {self.higherLimitPressureExpiratory} cmH2O.'\n\n return self._getIsValidParameters()", "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val", "def __allowed_values_inccorrect_number(self):\n strTestName = 'Values of a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 1.4\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def checkSanity(self, valuePreviouslySet):\n firstGet = self._pfwClient.get(self._paramPath)\n\n try:\n returnValue = Decimal(firstGet)\n except ValueError:\n print(\"ERROR: Can't convert %s to a decimal\" % firstGet)\n return firstGet, False\n\n upperAllowedValue = Decimal(valuePreviouslySet) + (Decimal(self._quantum) / Decimal(2))\n lowerAllowedValue = Decimal(valuePreviouslySet) - (Decimal(self._quantum) / Decimal(2))\n\n if not (lowerAllowedValue <= returnValue <= upperAllowedValue):\n print('%s <= %s <= %s is not true' %\n (lowerAllowedValue, returnValue, upperAllowedValue))\n return firstGet, False\n\n return firstGet, True", "def checkBounds(self, valueToSet):\n (success, errorMsg) = self._pfwClient.set(self._paramPath, str(valueToSet))\n\n return valueToSet, success", "def ge(value, limit):\n return value >= limit", "def constrain(inputVal, lower_limit, upper_limit):\n \n if (inputVal < lower_limit):\n return lower_limit\n elif (inputVal > upper_limit):\n return upper_limit\n else:\n return inputVal", "def _validate(self, instance, value):\n\n if not isinstance(value, Real):\n raise TypeError(f\"Value for {self.prop_name} shoulde be real numbers.\")\n\n if (\n self.min_val is not None\n and value < self.min_val\n and not isclose(value, self.min_val)\n ):\n raise ValueError(\n f\"Value should be greater than or equal to {self.min_val}.\"\n )\n\n if (\n self.max_val is not None\n and value > self.max_val\n and not isclose(value, self.max_val)\n ):\n raise ValueError(f\"Value should be less than or equal to {self.max_val}.\")", "def check_constrained(self, limit=None):\n\n # Set the 'well-constrained' limit at 10% (arbitrary) if not provided.\n limit = (Decimal(0.1) if not limit else Decimal(limit))\n\n if is_empty(self.value) or is_empty(self.uncertainty):\n return False\n elif self.uncertainty > (Decimal(self.value) * Decimal(limit)):\n self.well_constrained = False\n else:\n self.well_constrained = True" ]
[ "0.72562593", "0.7207735", "0.6949408", "0.6937725", "0.68831706", "0.6846565", "0.68388087", "0.6836606", "0.6768127", "0.6702471", "0.66531205", "0.6622947", "0.6586079", "0.65730226", "0.65702236", "0.65590054", "0.6541621", "0.6513924", "0.64859587", "0.64766264", "0.6450017", "0.64412296", "0.6438236", "0.64375496", "0.6425751", "0.64171517", "0.6408636", "0.64052886", "0.64024764", "0.63935727" ]
0.7210946
1
r"""Entropy balance calculation method.
def entropy_balance(self): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)", "def conditional_entropy(self) -> float:\n pass", "def efficiency(self):\n if self.byte_total == 0:\n return 1\n return self.entropy() / 8", "def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)", "def conditional_entropy_hyper(self) -> float:\n pass", "def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent", "def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)", "def balanceFactor(self):\n leftHeight = self.left.height if self.left != None else 0\n rightHeight = self.right.height if self.right != None else 0\n return rightHeight - leftHeight", "def bans(self) -> float:\n return self.entropy(base=10)", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def entropy(self):\n raise NotImplementedError", "def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def bits(self) -> float:\n return self.entropy(base=2)", "def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party == \"R\":\r\n republicans+=1\r\n elif party == \"D\":\r\n democrats+=1\r\n total+=1\r\n\r\n if total == 0: return 0\r\n prob_dem = democrats/total\r\n prob_rep = republicans/total\r\n if prob_dem == 0: return -(prob_rep * math.log(prob_rep, 2))\r\n if prob_rep == 0: return -(prob_dem * math.log(prob_dem, 2))\r\n\r\n entropy = (-prob_dem * math.log(prob_dem, 2)) -(prob_rep * math.log(prob_rep, 2))\r\n return entropy", "def calculate_entropy():\n\tstat = {} # dictionary - chars and number of repetitions\n\tallchar = 0.0 # total number of characters\n\tentropy = 0.0 # initial entropy\n\n\tfor line in sys.stdin.readlines():\n\t\tline = re.sub(r'\\s', '', line)\n\t\tfor znak in line:\n\t\t\tif znak in stat:\n\t\t\t\tstat[znak] += 1\n\t\t\telse:\n\t\t\t\tstat[znak] = 1\n\t\t\tallchar += 1\n\n\tfor znak in stat:\n\t\tstat[znak] = stat[znak]/allchar\n\t\tentropy += stat[znak] * log(stat[znak], 2)\n\n\tentropy *= -1\n\treturn entropy", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy -= p_x * math.log(p_x, 2)\n\n return entropy", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))", "def entropycell(self):\n cells = [0] * self.k\n for i in range(self.width):\n cells[int(self.config[self.t, i])] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(self.k):\n if(cells[i] != 0):\n probability = cells[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_cell = (self.average_cell * self.t + shannon) / (self.t + 1)", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def _entropies(self):\n H_C = fentropy(self.row_totals)\n H_K = fentropy(self.col_totals)\n H_actual = fentropy(self.itervalues())\n H_expected = H_C + H_K\n I_CK = H_expected - H_actual\n return H_C, H_K, I_CK", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def entropy(*args):\n\n\n values = []\n leaf = -1\n\n for i, val in enumerate(args):\n if(val != 0):\n values.append(val * math.log(val, len(args)))\n if(val == 1):\n leaf = i\n \n return -sum(values), leaf", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def entropy(self, complete=True, dl=False, partition_dl=True, edges_dl=True,\n degree_dl=True, dense=False, multigraph=True, norm=False,\n dl_ent=False, **kwargs):\n\n S = 0\n if self.layers:\n if dl and partition_dl:\n S += self.total_state.entropy(dl=True, partition_dl=True,\n degree_dl=False, edges_dl=False,\n norm=False) - \\\n self.total_state.entropy(dl=False, norm=False)\n for state in self.states[1:]:\n S += state.entropy(complete=complete, dl=dl, partition_dl=False,\n edges_dl=False, degree_dl=degree_dl,\n dense=dense, multigraph=multigraph,\n norm=False, dl_ent=dl_ent, **kwargs)\n else:\n for l, state in enumerate(self.states):\n if l == 0:\n S += state.entropy(complete=complete, dl=dl,\n partition_dl=partition_dl,\n edges_dl=False, degree_dl=degree_dl,\n dense=dense, multigraph=multigraph,\n norm=False, dl_ent=dl_ent, **kwargs)\n S -= libcommunity.covariate_entropy(state.bg._Graph__graph,\n _prop(\"e\", state.bg,\n state.mrs))\n if multigraph:\n S -= libcommunity.entropy_parallel(state.g._Graph__graph,\n _prop(\"e\", state.g,\n state.eweight))\n else:\n S += libcommunity.covariate_entropy(state.bg._Graph__graph,\n _prop(\"e\", state.bg,\n state.mrs))\n if multigraph:\n S += libcommunity.entropy_parallel(state.g._Graph__graph,\n _prop(\"e\", state.g,\n state.eweight))\n\n if dl and edges_dl:\n bstate = self.get_block_state(b=zeros(self.B), layers=True)[0]\n S += bstate.entropy(dense=True, multigraph=True, dl=False,\n norm=False)\n\n if dl:\n S += self.__get_layer_entropy()\n\n if norm:\n S /= self.E\n return S" ]
[ "0.6509844", "0.6371908", "0.6213984", "0.6136312", "0.6065171", "0.6035214", "0.5988753", "0.59741235", "0.593793", "0.59257174", "0.5884737", "0.5825986", "0.58200336", "0.5820027", "0.5815513", "0.5708887", "0.5693141", "0.56926143", "0.5690234", "0.56868804", "0.56770515", "0.5664085", "0.5647388", "0.5619204", "0.56187755", "0.5611757", "0.5605174", "0.5605001", "0.5577826", "0.5576533" ]
0.7791232
1
r""" Exergy balance calculation method.
def exergy_balance(self, T0): self.E_P = np.nan self.E_F = np.nan self.E_bus = { "chemical": np.nan, "physical": np.nan, "massless": np.nan } self.E_D = np.nan self.epsilon = np.nan
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __balance__(self) -> float:\n\n with dataset.connect(database.get_db()) as db:\n # Find last bank transaction.\n statement = statement = f\"\"\"\n SELECT opening_balance, transaction_amount\n FROM bank\n WHERE author_id = {self.user.id}\n ORDER BY id DESC\n LIMIT 1\n \"\"\"\n result = db.query(statement)\n\n for row in result:\n balance = row[\"opening_balance\"] + row[\"transaction_amount\"]\n break\n else:\n # If there was no result for the user, default balance is given.\n balance = 500\n\n return float(balance)", "def balance(self):\n return self._rbal - self._lbal", "def balance(self):\n return sum(self.operations.select())\n 11", "def _balance_update(self):\n return_rate = self.df.loc[self.currentStep, \"return_Close\"]\n self.buy_amount += return_rate * self.buy_amount\n self.sell_amount -= return_rate * self.sell_amount", "def balance(self):\n #a couple of assumptions not clear in assignment\n #1) there is always an invalid transaction\n #2) there is only 1 invalid transaction\n closeBalance=0\n invalidTrans=0\n withdrawCount=0\n depositCount=0\n# print(self.numList)\n for i in range(len(self.numList)):\n addValue=0\n if self.numList[i]<0:\n if (-1*self.numList[i])>closeBalance:\n invalidTrans=self.numList[i]\n else:\n addValue=self.numList[i]\n withdrawCount+=1\n elif self.numList[i]>0:\n if i!=0:depositCount+=1\n addValue=self.numList[i]\n closeBalance+=addValue\n# print(i,addValue,closeBalance)\n print(\"Invalid transaction %.2f\" %invalidTrans)\n print(\"Closing balance = %.2f\" %closeBalance)\n print(\"Number of withdrawals = %d\" %withdrawCount)\n print(\"Number of deposits = %d\" %depositCount)", "def balance(self):\n if self.bf == -2:\n if self.left.bf <= 0:\n return self.leftleft()\n else:\n return self.leftright()\n elif self.bf == 2:\n if self.right.bf >= 0:\n return self.rightright()\n else:\n return self.rightleft()\n return self", "def double_declining_balance():\r\n cost = float(input(\"Please Enter The Cost Of Asset: \"))\r\n accdepreciation = float(input(\"Please Enter The Value Of Accumulated Depreciation: \"))\r\n life = float(input(\"Please Enter Estimated Useful Life Of Asset(Years): \"))\r\n rv = float(input(\"Please Enter Estimated Residual Value Of Asset: \"))\r\n n = 0\r\n a = (float(cost)-float(accdepreciation)) * (float(2)/float(life))\r\n bn = float(a)/float(12)\r\n print \">> Your Monthly Depreciation For First Year is\",bn\r\n while(n != (life-1)):\r\n bk = float(cost)\r\n a = ((float(cost)-float(accdepreciation)) * (float(2)/float(life)))\r\n cost -= float(a)\r\n bk -= float(a)\r\n n += 1\r\n vvv = float(bk)-float(rv)\r\n print \">> Your Depreciation For Year No.\",n,\"is\",a\r\n print \">> Your Book Value After\",n,\"Years is\",bk,\"\\n\"\r\n print \">> Your Depreciation For Year No.\",int(life),\"is\",vvv\r\n print \">> Your Book Value After\",int(life),\"Years is\",rv", "def do_balance(self,args):\n \"\"\"Can show total, available(available for trading), or reserved(reserved in open orders)\"\"\"\n \"\"\"usage: balance [available/reserved](optional)\"\"\"\n args = stripoffensive(args)\n if 'available' in args:\n btc,usd = available() \n elif 'reserved' in args:\n btc,usd = reserved()\n else:\n btc,usd = bal()\n word = args if args else \"total\"\n print 'Your %s balance is %.8f BTC and $%.2f USD ' % (word,btc,usd)\n if word == \"total\":\n last = D(bitstamp.ticker()['last'])\n print 'Account Value: $%.2f @ Last BTC Price of $%.2f' % (btc*last+usd,last)", "def balance(self) -> Decimal:\n withdrawals = self.withdrawal_requests.filter(\n status=WithdrawalStatus.open,\n )\n if len(withdrawals) == 0:\n return self.internal_balance\n else:\n withdrawal_total = sum(map(lambda w: w.amount, withdrawals))\n return self.internal_balance - withdrawal_total", "def test_update_balance(self):\n current_year_tuple = (0.1, 0.1, 0.8)\n iteration_balance = 90\n contribution = 10\n expected_result = 110\n test_balance = investment_growth.update_balance(iteration_balance, contribution, current_year_tuple)\n self.assertEqual(test_balance, expected_result)", "def balance(self, date=None):\r\n\r\n qs = self._entries()\r\n if date:\r\n qs = qs.filter(transaction__t_stamp__lt=date)\r\n r = qs.aggregate(b=Sum('amount'))\r\n b = r['b']\r\n\r\n flip = self._DEBIT_IN_DB()\r\n if self._positive_credit():\r\n flip *= -1\r\n\r\n if b == None:\r\n b = Decimal(\"0.00\")\r\n b *= flip\r\n\r\n #print \"returning balance %s for %s\" % (b, self)\r\n return b", "def elec_balance(index):\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_elec_out])\n - pulp.lpSum([component_input[i, t] for i in index_elec_in])\n + elec_from_grid[t]\n - elec_to_grid[t]\n + pulp.lpSum([storage_disch[i, t] for i in elec_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in elec_storage_names])\n + elec_unserve[t]\n - elec_dump[t]\n == forecast[\"elec_load\"][t]\n )", "def get_balance(self):\n final_amount = 0\n for i in range(len(self.ledger)):\n final_amount += self.ledger[i]['amount']\n return final_amount", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def get_balance(self):\r\n return self.balance", "def electricity_balance(dh: DataHandler):\n # creates DataFrame of all electricity flows in TWh with index = ['alltec','r']\n supply = dh.get(\"o_supply\").groupby([\"tec_supply\", \"r\"]).sum()\n supply.index.names = [\"alltec\", \"r\"]\n supply = pd.concat([supply], keys=[\"supply\"], names=[\"type\"])\n demand = dh.get(\"o_demand\").groupby([\"tec_demand\", \"r\"]).sum() * -1\n demand.index.names = [\"alltec\", \"r\"]\n demand = pd.concat([demand], keys=[\"demand\"], names=[\"type\"])\n imp = pd.concat(\n [\n pd.concat(\n [dh.get(\"o_import\").groupby(\"r\").sum()],\n keys=[\"import\"],\n names=[\"alltec\"],\n )\n ],\n keys=[\"demand\"],\n names=[\"type\"],\n )\n cur = pd.concat(\n [\n pd.concat(\n [dh.get(\"o_cur\").groupby(\"r\").sum().mul(-1)],\n keys=[\"cur\"],\n names=[\"alltec\"],\n )\n ],\n keys=[\"demand\"],\n names=[\"type\"],\n )\n\n elec_flow = pd.concat([supply, demand, imp, cur])\n elec_flow = elec_flow.div(1000)\n\n mI = pd.MultiIndex.from_product(\n [\n dh.merge_stored_sets(\"alltec\"),\n dh.merge_stored_sets(\"r\"),\n [\"supply\", \"demand\"],\n ],\n names=[\"alltec\", \"r\", \"type\"],\n )\n elec_flow = add_zeros(elec_flow, mI)\n\n return elec_flow", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def do_balance(self, args):\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n print(\"Your current balance is $%.2f\" % balance)", "def __get_balance(self):\n return self.__balance", "def calculate_value(self):\n base = self.balance\n bag = self.bag * self.state['price'] * (1-self.gap)\n value = base + bag\n self.value = value\n return value", "def withdraw(amt) :\r\n global bal \r\n bal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\t\"\"\"{1.OK amt >= 0\tpremise\r\n\t\t2.OK bal >= 0\tpremise\r\n\t\t3.OK bal == bal_in\tpremise\r\n\t}\"\"\"\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (bal == bal_in)\r\n\tif amt <= bal:\r\n\t\t#PREMISES FOR THEN-ARM: \r\n\t\t# (amt <= bal)\r\n\t\t# (bal == bal_in)\r\n\t\t\"\"\"{1.OK amt <= bal\tpremise\r\n\t\t\t4.OK bal == bal_in\tpremise\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal == bal_in)\r\n\t\tbal = bal - amt\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (bal == (bal_old - amt))\r\n\t\t# (bal_old == bal_in)\r\n\t\t\"\"\"{1.OK bal == bal_old - amt\tpremise\r\n\t\t\t2.OK amt <= bal_old\talgebra 1\r\n\t\t\t3.OK amt >= 0\talgebra 1\r\n\t\t\t4.OK bal_old >= 0\talgebra 1\r\n\t\t\t5.OK bal_old == bal_in\tpremise\r\n\t\t\t6.OK amt == bal_in - bal\talgebra 1 5\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (amt == (bal_in - bal))\r\n\t\tcash = amt\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (cash == amt)\r\n\t\t# (amt == (bal_in - bal))\r\n\t\t\"\"\"{1.OK amt == bal_in - bal\tpremise\r\n\t\t\t2.OK cash == amt\tpremise\r\n\t\t\t3.OK cash == bal_in - bal\t\tsubst 2 1\r\n\t\t\t4.OK bal >= 0\talgebra 1\r\n\t\t\t5.OK bal_in == bal + cash\talgebra 3\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal_in == (bal + cash))\r\n\telse :\r\n\t\t#PREMISES FOR ELSE-ARM: \r\n\t\t# not (amt <= bal)\r\n\t\t# (bal == bal_in)\r\n\t\t\"\"\"{1.OK not(amt <= bal)\tpremise\r\n\t\t\t4.OK bal == bal_in\tpremise\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal == bal_in)\r\n\t\tcash = 0\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (cash == 0)\r\n\t\t# (bal == bal_in)\r\n\t\tassert not (amt <= bal) # UNABLE TO VERIFY\r\n\t\t\"\"\"{1.OK cash == 0\tpremise\r\n\t\t\t2.OK bal == bal_in\tpremise\r\n\t\t\t3.?? not(amt <= bal)\tpremise\r\n\t\t\t4.OK bal >= 0\talgebra 3\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal >= 0)\r\n# ERROR: uneven indentation of commands\r\n # prove here that bal >= 0 and bal + cash == bal_in\r\n return cash\r\n #PREMISES FOR NEXT LINE: \r\n # (bal >= 0)\r\n # ((bal + cash) == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# ((bal_in == (bal + cash)) or ((bal >= 0) and ((bal + cash) == bal_in)))\r\n\tassert (bal >= 0) # UNABLE TO VERIFY\r", "def get_balance_eq(self):\n usd_balance = self.get_balance(\"btc\")[\"usd_balance\"]\n btc_balance = self.get_balance(\"btc\")[\"btc_balance\"]\n ltc_balance = self.get_balance(\"ltc\")[\"ltc_balance\"]\n eth_balance = self.get_balance(\"eth\")[\"eth_balance\"]\n eur_balance = self.get_balance(\"eur\")[\"eur_balance\"]\n return {'usd': str(round(float(usd_balance), 2)),\n 'btc': str(round(float(btc_balance), 2)),\n 'ltc': str(round(float(ltc_balance), 2)),\n 'eth': str(round(float(eth_balance), 2)),\n 'eur': str(round(float(eur_balance), 2))}", "def calculate(self):", "def get_balance(self) -> float:\n return self._balance", "def returnBalances(self):\n pass", "def total_balance(self) -> Decimal:\n return self.incomes_from_outside + self.expenses_to_outside", "def balance(self) -> float:\n\t\tbalance = 0\n\t\tfor transaction in self.transactions:\n\t\t\tsign = 1 if transaction.receiving_account == self.__number else -1\n\t\t\tbalance += sign*transaction.usd*transaction.completed\n\t\t# The bank has infinite money\n\t\tif self.name == Account.BANK:\n\t\t\tbalance = Decimal('Infinity')\n\t\treturn balance", "def hydrogen_balance(dh: DataHandler):\n supply = (\n dh.get(\"o_h2_gene\").fillna(0).groupby([\"tec_h2d\", \"r\"]).sum().astype(\"Float64\")\n )\n supply.index.names = [\"alltec\", \"r\"]\n\n demand = (\n dh.get(\"o_h2_usage\")\n .fillna(0)\n .groupby([\"tec_h2g\", \"r\"])\n .sum()\n .mul(-1)\n .astype(\"Float64\")\n )\n demand.index.names = [\"alltec\", \"r\"]\n\n imp = dh.get(\"o_h2_imports\")\n imp = add_zeros(imp, pd.Index(dh.merge_stored_sets(\"r\"), name=\"r\"))\n imp = pd.concat([imp], keys=[\"import\"], names=[\"alltec\"]).astype(\"Float64\")\n\n h2_bal = pd.concat([supply, demand, imp]).div(1000)\n\n mI = pd.MultiIndex.from_product(\n [dh.merge_stored_sets(\"tec_h2\"), dh.merge_stored_sets(\"r\")],\n names=[\"alltec\", \"r\"],\n )\n h2_bal = add_zeros(h2_bal, mI)\n\n return h2_bal", "def bmi_calculate(self):\r\n try:\r\n manager_obj = BMI_Manager(self.data)\r\n manager_obj.bmi_calculation()\r\n except Exception as ex:\r\n print(\"Exception in bmi_calculate function\")", "def get_balance(self):\n balance = 0\n for transaction in self.ledger:\n balance += transaction[\"amount\"]\n return balance" ]
[ "0.6611536", "0.6485161", "0.6480992", "0.62875473", "0.6287387", "0.62516195", "0.6235926", "0.62153035", "0.62097895", "0.6207586", "0.6202252", "0.6169626", "0.6166075", "0.6151702", "0.61431146", "0.6121842", "0.6106136", "0.60940135", "0.60891706", "0.60594755", "0.6057621", "0.60565126", "0.60298145", "0.60117376", "0.60103685", "0.60030955", "0.59832036", "0.5956507", "0.59556556", "0.5954567" ]
0.66281956
0
r""" Calculate the vector of residual values for fluid balance equations. Returns
def fluid_func(self): residual = [] for i in range(self.num_i): for fluid, x in self.inl[0].fluid.val.items(): residual += [x - self.outl[0].fluid.val[fluid]] return residual
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def residuals(self) -> npt.NDArray[np.float64]:\n return self.data - self.theory", "def fluid_func(self):\n residual = []\n for fluid, x in self.inl[0].fluid.val.items():\n res = x * self.inl[0].m.val_SI\n for o in self.outl:\n res -= o.fluid.val[fluid] * o.m.val_SI\n residual += [res]\n return residual", "def _compute_residuals(self):\n residuls = self.I - self.E\n return residuls", "def get_residual(self) -> np.ndarray:\n return self._calculate_residual(self.coefficients)", "def _calculate_residual(original_values: List[Any], new_values: List[Any]) -> List[float]:\n residual = []\n for i in range(len(original_values)):\n residual.append(original_values[i] - new_values[i])\n return residual", "def residuals(self):\r\n return self.__residuals", "def residual(us):\n return self.h_S(z0, us) - h_P", "def residual(var, matrix, RHSvector):\n from fipy.tools.numerix import array, LINFnorm\n \n Lx = matrix * array(var)\n return LINFnorm(Lx - RHSvector)", "def residuals_(self):\n return self._residuals", "def _residual_lattice(self, params):\n model = np.sqrt(self.calc_q_square())\n data = np.absolute(self.q)\n return (model[self.mask] -data[self.mask])", "def residuals(self, b):\n x, y = self.xvals, self.yvals\n return self._numexpr(x, *b) - y", "def _residuals(params: List[float], xs: np.ndarray, ys: np.ndarray) -> float:\n return _model(params=params, xs=xs) - ys", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def _raw_residuals(self):\n if self.model.assortativity == 'positive':\n traj = self._solution[::-1]\n else:\n traj = self._solution\n\n # compute the residuals\n xi = np.linspace(traj[0, 0], traj[-1, 0], 10 * traj.shape[0])\n resids_arr = self.ivp.compute_residual(traj[:, :3], xi, k=5, ext=2)\n\n # convert to a data frame\n resids_arr[:, 0] = xi\n col_names = ['x', r'$\\hat{\\mu}(x)$', r'$\\hat{\\theta}(x)$']\n df = pd.DataFrame(resids_arr, columns=col_names)\n\n return df.set_index('x')", "def residual_sensor_data(self):\r\n residual = []\r\n data_0_damage = self.sensor_data_original_shape_[0, :, :, :]\r\n\r\n for i in range(self.num_of_hm_stages):\r\n subtract_vector = self.sensor_data_original_shape_[i, :, :, :] - data_0_damage\r\n residual.append(subtract_vector)\r\n\r\n self.residual = np.asarray(residual)\r\n self.residual_flattened = self.residual.reshape(-1, self.signal_length, 1)\r\n\r\n return", "def residuals(data: DataVector, theory: TheoryVector) -> npt.NDArray[np.float64]:\n assert isinstance(data, DataVector)\n assert isinstance(theory, TheoryVector)\n return (data - theory).view(np.ndarray)", "def calculateElementResidual(self):\n import pdb\n\n for ci in range(self.nc):\n self.elementResidual[ci].fill(0.0)\n #\n self.ellamDiscretization.updateElementResidual(self.elementResidual)", "def get_residual(self, beta: ndarray) -> ndarray:\n return self.data.weight*(self.data.obs -\n self.fevar.mapping(beta))", "def energy_balance_func(self):\n residual = []\n T_in = T_mix_ph(self.inl[0].get_flow(), T0=self.inl[0].T.val_SI)\n for o in self.outl:\n residual += [T_in - T_mix_ph(o.get_flow(), T0=o.T.val_SI)]\n return residual", "def residual(S):\n rho = seawater.density(T, S, Pa)\n return (rho_1 - rho)", "def _get_residuals(self, model: Model) -> np.ndarray:\n try:\n # pyre-fixme[16]: `Model` has no attribute `model`.\n return model.model.resid.values\n except Exception:\n fcst = model.predict(steps=1, freq=\"D\", include_history=True)\n # pyre-fixme[16]: `None` has no attribute `merge`.\n # pyre-fixme[16]: `Optional` has no attribute `to_dataframe`.\n merge = fcst.merge(model.data.to_dataframe(), on=\"time\")\n for col in merge.columns:\n if col != \"time\" and (\"fcst\" not in col):\n return merge[col].values - merge[\"fcst\"].values\n raise ValueError(\"Couldn't find residual or forecast values in model\")", "def residuals_Arr(self, p, data, x):\n err = data - self.Arr(x,p)\n return err", "def residual(self,name):\n state = self.getstate(name)\n m = self.hit.vec \n x = state.vec\n res = m - self.hmatrix*x\n debug('kfnode.residual',(name,res))\n return res", "def _residual(self, x):\n h = x\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n h = F.avg_pool2d(h, 2)\n\n return h", "def _resid(self):\n # get the needed data\n sigma_sub_df = self.draws[\"sigma_sub\"]\n env_df = self.draws[self.me_map[\"env\"]]\n\n # if it is a squeeze type then we use the absolute value of the diff\n resid_df = (env_df - sigma_sub_df)[(sigma_sub_df <= env_df)].fillna(0)\n return resid_df", "def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return", "def mass_flow_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]\n return residual", "def mass_flow_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]\n return residual", "def residuals(p, r, theta):\n return r - f(theta, p)" ]
[ "0.74340457", "0.7347378", "0.7319442", "0.7217461", "0.69636905", "0.6963552", "0.6871364", "0.68401945", "0.681984", "0.6801746", "0.672087", "0.6656552", "0.6597367", "0.65881217", "0.65841734", "0.65810835", "0.65665925", "0.65098804", "0.64105034", "0.64079887", "0.6395862", "0.6337479", "0.63301075", "0.63069177", "0.6304865", "0.629369", "0.62907547", "0.62894905", "0.62894905", "0.62729454" ]
0.7524545
0
r""" Calculate partial derivatives for all fluid balance equations. Returns
def fluid_deriv(self): deriv = np.zeros((self.fluid_constraints['num_eq'], 2 * self.num_i + self.num_vars, self.num_nw_vars)) for i in range(self.num_i): for j in range(self.num_nw_fluids): deriv[i * self.num_nw_fluids + j, i, j + 3] = 1 deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1 return deriv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_partial_derivatives(self) -> List[Callable]:\n return [self.dfda, self.dfdb, self.dfdc]", "def get_partial_derivatives(self) -> List[Callable]:\n return [self.dfdm, self.dfdc]", "def get_partial_derivatives(self) -> List[Callable]:\n pass", "def get_partial_derivatives(self) -> List[Callable]:\n return [self.dfdmu, self.dfdsig]", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids * 2 + 2\n\n ######################################################################\n # derivatives for energy balance equation\n for i in range(2):\n self.jacobian[k, i, 0] = (\n self.outl[i].h.val_SI - self.inl[i].h.val_SI)\n self.jacobian[k, i, 2] = -self.inl[i].m.val_SI\n\n self.jacobian[k, 2, 2] = self.inl[0].m.val_SI\n self.jacobian[k, 3, 2] = self.inl[1].m.val_SI\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 2, 2] = self.inl[0].m.val_SI\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer coefficient\n if self.kA.is_set:\n f = self.kA_func\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n for i in range(4):\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer coefficient\n if self.kA_char.is_set:\n f = self.kA_char_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(f, 'm', 0)\n if not increment_filter[1, 0]:\n self.jacobian[k, 1, 0] = self.numeric_deriv(f, 'm', 1)\n for i in range(4):\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n f = self.ttd_u_func\n for i in [0, 3]:\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n f = self.ttd_l_func\n for i in [1, 2]:\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.jacobian[k, 0, 1] = self.pr1.val\n self.jacobian[k, 2, 1] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.jacobian[k, 1, 1] = self.pr2.val\n self.jacobian[k, 3, 1] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified zeta at hot side\n if self.zeta1.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[2, 1]:\n self.jacobian[k, 2, 1] = self.numeric_deriv(\n f, 'p', 2, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[2, 2]:\n self.jacobian[k, 2, 2] = self.numeric_deriv(\n f, 'h', 2, zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta at cold side\n if self.zeta2.is_set:\n f = self.zeta_func\n if not increment_filter[1, 0]:\n self.jacobian[k, 1, 0] = self.numeric_deriv(\n f, 'm', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[3, 1]:\n self.jacobian[k, 3, 1] = self.numeric_deriv(\n f, 'p', 3, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[3, 2]:\n self.jacobian[k, 3, 2] = self.numeric_deriv(\n f, 'h', 3, zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def test_partial_derivative_f1(self):\r\n # Verified with Wolfram Alpha.\r\n\r\n # f2 > 0\r\n obs = self.estimator1._partial_derivative_f1(2, 3, 10, 42)\r\n assert_almost_equal(obs, 1.22672908818)\r\n\r\n # f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(2, 0, 10, 42)\r\n assert_almost_equal(obs, 1.272173492918482)\r\n\r\n # f1 == 0, f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(0, 0, 10, 42)\r\n assert_almost_equal(obs, 1.2961664362634027)", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids + 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 1, 2] = self.inl[0].m.val_SI\n # custom variable Q\n if self.Q.is_var:\n self.jacobian[k, 2 + self.Q.var_pos, 0] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio\n if self.pr.is_set:\n self.jacobian[k, 0, 1] = self.pr.val\n self.jacobian[k, 1, 1] = -1\n # custom variable pr\n if self.pr.is_var:\n self.jacobian[k, 2 + self.pr.var_pos, 0] = (\n self.inl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta\n if self.zeta.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta')\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta')\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta')\n # custom variable zeta\n if self.zeta.is_var:\n self.jacobian[k, 2 + self.zeta.var_pos, 0] = (\n self.numeric_deriv(f, 'zeta', 2, zeta='zeta'))\n k += 1\n\n ######################################################################\n # derivatives for specified hydro-group parameters\n if self.hydro_group.is_set:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(func, 'm', 0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(func, 'p', 0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(func, 'h', 0)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(func, 'p', 1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(func, 'h', 1)\n # custom variables of hydro group\n for var in self.hydro_group.elements:\n if var.is_var:\n self.jacobian[k, 2 + var.var_pos, 0] = (\n self.numeric_deriv(func, self.vars[var], 2))\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def get_fractional_degradation(bt):\n\n\n\n NomIch = 0.125 # Nominal charge current\n NomId = 0.25 # Nominal discharge current\n NomSoC = 0.5 # Nominal state of charge_mode\n NomDoD = 1.0 # Nominal depth of discharge\n B = 5 #Battery capacity\n qt = 5 * 0.5 # Amount of energy in the battery at the start\n # Determin charge of discharge\n if bt > 0:\n Id = bt/(B*1) # time interval differnece is 1\n Ich = NomIch\n else:\n Ich = bt/(B*1)\n Id = NomId\n\n #Calculate average State of Charge\n SoC = 100 * (qt - 0.5*bt)/B\n\n #Calculate Depth of Discharge\n DoD = 100 * bt /B\n\n # Functions\n nCL1 = (e * np.exp (f * Id) + g * np.exp(h * Id))/ (e * np.exp (f * NomId) + g * np.exp(h * NomId))\n nCL2 = (m * np.exp (n * Ich) + o * np.exp(p * Ich))/ (m* np.exp (n* NomIch) + o * np.exp(p * NomIch))\n nCL3 = get_CL4(DoD, SoC)/get_CL4(NomDoD, NomSoC)\n nCL = nCL1 * nCL2 * nCL3\n Fractional_D = (0.5/3650)/ nCL\n return Fractional_D", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n \n for nl in self.nlists: \n nl.separations()\n \n for force in self.forces:\n force.apply()\n\n # Controllers is the new implementation of forces\n for controller in self.controllers:\n controller.apply()", "def compute_partials(self, inputs, partials):\n I_bat = inputs['I_bat']\n SOC = inputs['SOC']\n\n self.dCh_dg, self.dCh_drho = KSfunction.derivatives(I_bat - self.Imax, self.rho)\n self.dDs_dg, self.dDs_drho = KSfunction.derivatives(self.Imin - I_bat, self.rho)\n self.dS0_dg, self.dS0_drho = KSfunction.derivatives(self.SOC0 - SOC, self.rho)\n self.dS1_dg, self.dS1_drho = KSfunction.derivatives(SOC - self.SOC1, self.rho)\n\n self.dCh_dg = self.dCh_dg.flatten()\n self.dCh_drho = self.dCh_drho.flatten()\n self.dDs_dg = self.dDs_dg.flatten()\n self.dDs_drho = self.dDs_drho.flatten()\n self.dS0_dg = self.dS0_dg.flatten()\n self.dS0_drho = self.dS0_drho.flatten()\n self.dS1_dg = self.dS1_dg.flatten()\n self.dS1_drho = self.dS1_drho.flatten()", "def func_deriv(x,remain):\n #df_x0 = -1*remain[0]/x[0]**2\n #df_x1 = -1*remain[2]/x[1]**2\n #df_x2 = -1*remain[1]/x[2]**2\n return np.array(-1*remain/x**2)", "def _centred_first_derivs(self, f):\n return ((f[2:, 1:-1] - f[0:-2, 1:-1]) / (2 * self._dx),\n (f[1:-1, 2:] - f[1:-1, 0:-2]) / (2 * self._dy))", "def dfr(self):\n return self.table[1, 0] / (self.table[1, 0] + self.table[1, 1])", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def _compute_f(self, p, dh, dv):\n return dh / (self.beta * p * dv)", "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-1] ) / 6.\r\n M[i,i] = ( x_p[i+1] - x_p[i-1] ) / 3.\r\n M[i,i+1] = ( x_p[i+1] - x_p[i] ) /6.\r\n d[i,0 ] = ( y_p[i+1] - y_p[i] ) / ( x_p[i+1] - x_p[i] ) - ( y_p[i] - y_p[i-1] ) / ( x_p[i] - x_p[i-1] )\r\n \r\n M[0,0],M[-1,-1] = 1.,1. # compactly sets the BCs\r\n \r\n LU = lu.LU_decomp(M) # solves the matrix equations\r\n return lu.FB_sub(LU.Low, LU.Upp, d) # find and return 2nd derivatives\r", "def partial_derivative(f, x, i, epsilon = 1e-10):\n x_ = np.copy(x).astype(np.float64)\n x_[i] = x_[i] + epsilon\n value = (f(x_) - f(x)) / epsilon\n\n return value", "def compute_partials(self, inputs, partials):\n SOC = inputs['SOC']\n temperature = inputs['temperature']\n P_bat = inputs['P_bat']\n\n # dI_dP\n dV_dvoc = IR * self.exponential\n dV_dT = - IR * self.voc * np.exp(alpha*(temperature[4, :] - T0)/T0) * alpha / T0\n dVoc_dSOC = np.exp(SOC[0, :]) / (np.e-1)\n\n self.dI_dP = 1.0 / self.V\n tmp = -P_bat/(self.V**2)\n self.dI_dT = tmp * dV_dT\n self.dI_dSOC = tmp * dV_dvoc * dVoc_dSOC", "def _gradients(self, partial):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = self.noise_model._laplace_gradients(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #len(dlik_dthetaL)\r\n num_params = len(self._get_param_names())\r\n # make space for one derivative for each likelihood parameter\r\n dL_dthetaL = np.zeros(num_params)\r\n for thetaL_i in range(num_params):\r\n #Explicit\r\n dL_dthetaL_exp = ( np.sum(dlik_dthetaL[:, thetaL_i])\r\n #- 0.5*np.trace(mdot(self.Ki_W_i, (self.K, np.diagflat(dlik_hess_dthetaL[thetaL_i]))))\r\n + np.dot(0.5*np.diag(self.Ki_W_i)[:,None].T, dlik_hess_dthetaL[:, thetaL_i])\r\n )\r\n\r\n #Implicit\r\n dfhat_dthetaL = mdot(I_KW_i, self.K, dlik_grad_dthetaL[:, thetaL_i])\r\n dL_dthetaL_imp = np.dot(dL_dfhat, dfhat_dthetaL)\r\n dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp\r\n\r\n return dL_dthetaL", "def derivatives(self, state):\n raise NotImplementedError()", "def FiniteDiff(u, dx, d):\n \n n = u.size\n ux = np.zeros(n, dtype=u.dtype)\n \n if d == 1:\n for i in range(1,n-1):\n ux[i] = (u[i+1]-u[i-1]) / (2*dx)\n \n ux[0] = (-3.0/2*u[0] + 2*u[1] - u[2]/2) / dx\n ux[n-1] = (3.0/2*u[n-1] - 2*u[n-2] + u[n-3]/2) / dx\n return ux\n \n if d == 2:\n for i in range(1,n-1):\n ux[i] = (u[i+1]-2*u[i]+u[i-1]) / dx**2\n \n ux[0] = (2*u[0] - 5*u[1] + 4*u[2] - u[3]) / dx**2\n ux[n-1] = (2*u[n-1] - 5*u[n-2] + 4*u[n-3] - u[n-4]) / dx**2\n return ux\n \n if d == 3:\n for i in range(2,n-2):\n ux[i] = (u[i+2]/2-u[i+1]+u[i-1]-u[i-2]/2) / dx**3\n \n ux[0] = (-2.5*u[0]+9*u[1]-12*u[2]+7*u[3]-1.5*u[4]) / dx**3\n ux[1] = (-2.5*u[1]+9*u[2]-12*u[3]+7*u[4]-1.5*u[5]) / dx**3\n ux[n-1] = (2.5*u[n-1]-9*u[n-2]+12*u[n-3]-7*u[n-4]+1.5*u[n-5]) / dx**3\n ux[n-2] = (2.5*u[n-2]-9*u[n-3]+12*u[n-4]-7*u[n-5]+1.5*u[n-6]) / dx**3\n return ux\n \n if d > 3:\n return FiniteDiff(FiniteDiff(u,dx,3), dx, d-3)", "def first_derivative(x, y, finitediff_scheme = 'central'):\n if finitediff_scheme == 'central':\n return np.gradient(y, x[1]-x[0])\n \n elif finitediff_scheme == 'five point stencil':\n y = [0, 0] + list(y) + [0, 0]\n numerator = np.array([y[i-2] - 8*y[i-1] + 8*y[i+1] - y[i+2] for i in range(2, len(y)-2)])\n denominator = 12*(x[1] - x[0]) \n return numerator/denominator", "def fluid_func(self):\n residual = []\n for i in range(self.num_i):\n for fluid, x in self.inl[0].fluid.val.items():\n residual += [x - self.outl[0].fluid.val[fluid]]\n return residual", "def bus_deriv(self, bus):\n deriv = np.zeros((1, 4, self.num_nw_vars))\n f = self.calc_bus_value\n deriv[0, 0, 0] = self.numeric_deriv(f, 'm', 0, bus=bus)\n deriv[0, 0, 2] = self.numeric_deriv(f, 'h', 0, bus=bus)\n deriv[0, 2, 2] = self.numeric_deriv(f, 'h', 2, bus=bus)\n return deriv", "def calc_derivatives(self, first=False, second=False, savebase=False):\n\n self.driver.calc_derivatives(first, second, savebase)", "def compute_dz(self):\n el_geom_w = self.compute_geom_weights()\n el_geom_grad = self.compute_geom_grads()\n\n # Sum of weights coeffs\n w_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(el_geom_w))\n\n # Sum of weighted df = (dfx, dfy)\n dfx_el_w = np.empty_like(el_geom_w)\n dfy_el_w = np.empty_like(el_geom_w)\n for iapex in range(3):\n dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]\n dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]\n dfx_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(dfx_el_w))\n dfy_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(dfy_el_w))\n\n # Estimation of df\n dfx_estim = dfx_node_sum/w_node_sum\n dfy_estim = dfy_node_sum/w_node_sum\n return np.vstack([dfx_estim, dfy_estim]).T", "def ddalf(x):\n return dalf_spl.derivatives(x)[1]", "def fluid_deriv(self, increment_filter, k):\n i = 0\n for fluid in self.nw_fluids:\n j = 0\n for o in self.outl:\n self.jacobian[k, j + 1, 0] = -o.fluid.val[fluid]\n self.jacobian[k, j + 1, i + 3] = -o.m.val_SI\n j += 1\n self.jacobian[k, 0, 0] = self.inl[0].fluid.val[fluid]\n self.jacobian[k, 0, i + 3] = self.inl[0].m.val_SI\n k += 1\n i += 1", "def _computeDerivative(self,angles, distances):\n slope=[]\n slope.append(0)\n for i in xrange(1,len(angles)):\n der = (distances[i]-distances[i-1])/(angles[i]-angles[i-1])\n slope.append(der)\n #slope.append(0)\n return slope" ]
[ "0.7008659", "0.68837386", "0.67990386", "0.6779742", "0.651725", "0.6399954", "0.6385995", "0.626156", "0.623674", "0.61964655", "0.61451423", "0.61306727", "0.6099219", "0.60581386", "0.6001761", "0.59012693", "0.58673", "0.5847264", "0.58267736", "0.57926023", "0.57854134", "0.577504", "0.57484466", "0.57427", "0.571561", "0.5712205", "0.5710193", "0.5709309", "0.5699218", "0.5678253" ]
0.6893844
1
r""" Equation for pressure equality. Returns
def pressure_equality_func(self): residual = [] for i in range(self.num_i): residual += [self.inl[i].p.val_SI - self.outl[i].p.val_SI] return residual
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pressure_equality_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 1] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 1] = -1\n return deriv", "def get_eqn(p0, p1):\n m = (p0[1] - p1[1]) / (p0[0] - p1[0])\n return (m, p0[1] - m * p0[0])", "def pressure_equality_func_doc(self, label):\n indices = list(range(1, self.num_i + 1))\n if len(indices) > 1:\n indices = ', '.join(str(idx) for idx in indices)\n else:\n indices = str(indices[0])\n latex = (\n r'0=p_{\\mathrm{in,}i}-p_{\\mathrm{out,}i}'\n r'\\; \\forall i \\in [' + indices + r']')\n return generate_latex_eq(self, latex, label)", "def eq(self, y):\n return 1 - self.ne(y)", "def approx_eq(a, b):\n return abs(a-b) < approx_eq.eps", "def _correct_p(self, f0, f1):\n return self.p * np.exp(self.dbeta * (f0 + f1) / 2)", "def test_isentropic_pressure_p_increase():\n lev = [85000, 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 1000. * units.hPa\n assert_almost_equal(isentprs[0], trueprs, 3)", "def exner_function(pressure, reference_pressure=P0):\n return (pressure / reference_pressure)**kappa", "def get_exact(self):\n from sympy.polys.domains import QQ\n return QQ", "def test_q(self):\n assert np.allclose(self.stepper.q, self.ODE.exact(self.stepper.t), rtol=1e-3, atol=1e-5)", "def correct_venturi_pressure_differential(dP:pd.Series,\n P:pd.Series) -> pd.Series:\n \n denominator = 0.019969+ 0.131*P*1e-6-0.003967\n return 23.867*((dP+5.9175)/1491.69 - 0.0003967)/denominator", "def getPureComponentVaporPressure(self,Temperature):\n\t\tA = self.Antoine_params[0]\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\t\t\n\t\t# Antoine's Equation\n\t\tPmmHg = 10**(A - B / (C + Temperature - 273.15))\n\t\treturn PmmHg * 133.322 # to get Pa", "def pressure():\n global correct_pressure\n atmg = random()\n mmHgg = atmg * 760\n Kpag = atmg * 101.325\n barg = atmg * 1.01325\n psig = atmg * 14.696\n all_pressure = [(mmHgg,\" mmHg gage\"),\n (atmg,\" atm gage\"),\n (Kpag,\" Kpa gage\"),\n (psig,\" psi gage\"),\n (barg,\" bar gage\"),\n (mmHgg - 760,\" mmHg absolute\"),\n (atmg - 1,\" atm absolute\"),\n (Kpag - 101.325,\" Kpa absolute\"),\n (psig - 14.696,\" psi absolute\"),\n (barg - 1.01323,\" bar absolute\")]\n P1,P2 = tuple(sample(set(all_pressure), 2))\n\n print(\"\\n\\nThis is a Pressure question.\\n\")\n print(\"For an Pressure of {:0.2f}{}\\nPlease find its equivanlent value in{}\".format(*P1,P2[1]))\n guess = get_user_input(float)\n\n if check_answer(P2[0], guess, 0.2):\n print(\"correct\")\n correct_pressure += 1\n else:\n print(\"false the answer is {:0.2f}{}\".format(*P2))", "def eqnState(p,rho,u,gamma):\n e = p/(gamma-1)+rho*u**2/2\n return e", "def return_expression(b, p):\n # Properties of Gases and Liquids, Eq. 9-5.14\n # and Eq. 10-6.4\n ViscosityWilke.build_phi_ij(b, p)\n if not hasattr(b, \"_therm_cond_phase_comp\"):\n b._make_therm_cond_phase_comp() # pylint: disable=protected-access\n\n # Properties of Gases and Liquids, Eq. 10-6.2\n return sum(\n [\n b.mole_frac_phase_comp[p, i]\n * b._therm_cond_phase_comp[p, i] # pylint: disable=protected-access\n / sum(\n [\n b.mole_frac_phase_comp[p, j] * b.visc_d_phi_ij[i, j]\n for j in b.components_in_phase(p)\n ]\n )\n for i in b.components_in_phase(p)\n ]\n )", "def test_isentropic_pressure():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmp[:, :, -1] = np.nan\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def __eq__(self, frac):\n return self.equal == frac.equal", "def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h", "def point_on_curve(self, P):\n x, y = modp(self.p, P.x, P.y)\n lhs = y ** 2\n rhs = x ** 3 + x * self.a + self.b\n return lhs == rhs", "def liqpressure(temp):\n tau = temp/_TTP\n pres = 1.\n for (a,b) in _C_PMELT:\n pres += a * (1 - tau**b)\n pres *= _PTPE\n return pres", "def eq(w, x):\n return (-w[1]*x - w[0]) / w[2]", "def p(e, t):\n return b * e ** 2", "def eq_const(free_g, temp):\n\n return np.exp(-free_g / (R * temp))", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def test_pressure(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.pressure[0], 223599111111.10834)", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def test_equivalent_potential_temperature():\n p = 1000 * units.mbar\n t = 293. * units.kelvin\n td = 280. * units.kelvin\n ept = equivalent_potential_temperature(p, t, td)\n assert_almost_equal(ept, 311.18586467284007 * units.kelvin, 3)", "def vaporPressure(temp: float) -> float:\n exponent = (17.27*temp)/(temp + 237.3)\n vp = 611*np.exp(exponent)\n\n return vp", "def test_eq(self):\n st_1 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n st_2 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n assert st_1 == st_2", "def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True" ]
[ "0.66087", "0.60439175", "0.6018254", "0.59146416", "0.5873855", "0.5870745", "0.5859539", "0.58267796", "0.58157915", "0.5770944", "0.5750991", "0.56742233", "0.56737494", "0.5672614", "0.5670559", "0.56237346", "0.56087416", "0.5586878", "0.5584568", "0.558083", "0.5577955", "0.5575006", "0.55577683", "0.55516607", "0.55462086", "0.5544628", "0.55433136", "0.5540476", "0.55361575", "0.5514403" ]
0.6142468
1
Return the list of logged messages. If the bitvector function includes `log_msg` calls in its ``eval``, this method return the list of messages logged in the last evaluation with the format field objects applied. Otherwise, an exception is raised. See also `log_msg`.
def get_formatted_logged_msgs(cls): if cls._logger is None: raise ValueError("eval must be called before get_formatted_logged_msgs") list_msgs = [] for format_string, format_field_objects in cls._logger: list_msgs.append(format_string.format(*format_field_objects)) return list_msgs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetchLogs(self):\n return [record.msg for record in self.handler.buffer]", "def logged_messages(self):\n return self._logged_messages", "def get_log_messages(self, instance):\n return LogMessageSerializer(instance.log_messages, many=True).data", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def assertInLogs(self, msg):\n in_logs = [record.msg\n for record in self.handler.buffer if msg in record.msg]\n if not in_logs:\n raise AssertionError(\"Expected `%s' is not in logs\" % msg)\n return in_logs", "def define_log_post_format_hooks(self):\n # TODO remove this once structlog supports hooks or handlers\n # these hooks accept a 'msg' and do not return anything\n return []", "def getLogs():", "def getLogs():", "def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif key == 'args':\n for (exp, act) in zip(value, record.get(key)):\n if not re.search(str(exp), str(act)):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches", "def recorded_messages(self):\n messages = []\n for time in sorted(self.reception_records):\n messages.extend(self.reception_records[time])\n return messages", "def messages(self):\n return list(iter(self))", "def messages_list(self, level=WARNING):\n result = []\n for message in self.messages:\n if message['level'] in self.equal_or_greater_levels[self.level_names[level]]:\n result.append(message)\n return result", "def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches", "def log_msg(cls, format_string, format_field_objects=None):\n if cls._logger is None:\n cls._logger = []\n assert isinstance(format_string, str)\n if format_field_objects is None:\n format_field_objects = []\n elif isinstance(format_field_objects, core.Term):\n format_field_objects = [format_field_objects]\n assert isinstance(format_string, collections.abc.Sequence)\n cls._logger.append((format_string, format_field_objects))", "def get_msgs(self):\n msgs = []\n while True:\n try:\n msgs.append(self.get_msg(block=False))\n except Empty:\n break\n return msgs", "def get_msgs(self):\n msgs = []\n while True:\n try:\n msgs.append(self.get_msg(block=False))\n except Empty:\n break\n return msgs", "def GetLogs(self):\n utcnow = datetime.datetime.utcnow()\n lower_filter = self.log_position.GetFilterLowerBound()\n upper_filter = self.log_position.GetFilterUpperBound(utcnow)\n new_filter = self.base_filters + [lower_filter, upper_filter]\n entries = logging_common.FetchLogs(\n log_filter=' AND '.join(new_filter),\n order_by='ASC',\n limit=self.LOG_BATCH_SIZE)\n return [entry for entry in entries if\n self.log_position.Update(entry.timestamp, entry.insertId)]", "def iter_log_messages(self, build_id=None, max_date=None,\n min_date=None, min_level=None):\n pass", "def __msgtolist(self) -> List[str]:\n return self.msg.splitlines()", "def list_messages(self):", "def get_log_messages_from_response(response):\n return [' '.join(line.split()[2:]) for line in response]", "def get_messages_log(orderid): \n data = order_obj.get_messages_log(orderid)\n return data", "def get_logs(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogList(self._results, runtime=self._runtime)", "def _get_logs(self):\n logstart = self.LOGSTART%(self.session.uuid, self.session.run_counter)\n logend = self.LOGEND%(self.session.uuid, self.session.run_counter)\n log = self.container.logs().decode('UTF-8')\n while log.find(logstart) == -1 or log.find(logend) == -1:\n log = self.container.logs().decode('UTF-8')\n cleaned_log = self._get_cleaned_logs(log, logstart, logend)\n self.session.run_counter = self.session.run_counter + 1\n self.session.save()\n return cleaned_log", "def _messages_list(self, queue):\n\n return queue.messages()", "def logs(self):\n return self.logger.logs()", "def after_rotate_logs(msg, config, checklist):\n return []", "def error_messages(self) -> List[str]:\n spatial_msgs = []\n temporal_msgs = []\n if self.spatial:\n spatial_msgs = [m for v, m in self.spatial_validations if not v(self.spatial)]\n if self.temporal:\n temporal_msgs = [m for v, m in self.temporal_validations if not v(self.temporal)]\n\n return spatial_msgs + temporal_msgs", "def logged_messages_tagged(self):\n return self._logged_messages_tagged", "def retrieve_error_messages(self):\n return self.errors_seen[:]" ]
[ "0.6443969", "0.6088012", "0.59338963", "0.5926019", "0.5836273", "0.58012664", "0.5783213", "0.5783213", "0.5766307", "0.5717272", "0.57159305", "0.5616483", "0.55946606", "0.5492802", "0.5484665", "0.5484665", "0.5435299", "0.54241", "0.53707105", "0.5340541", "0.5327934", "0.5311856", "0.5297458", "0.5289315", "0.52647537", "0.5214876", "0.52046686", "0.51877123", "0.5186773", "0.51826084" ]
0.7781618
0
Return the class name and the current number of rounds.
def get_name(cls): return f"{super().get_name()}_{cls.num_rounds}R"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report_rounds(self):\n return print(f\"Total Rounds played: {sum([self.wins, self.draws, self.losses])}\")", "def vrepr(cls):\n return f\"{super().vrepr()}.set_num_rounds_and_return({cls.num_rounds})\"", "def count_one_round(self):\n\t\tself.round_count+=1\n\t\treturn self.round_count", "def name(self) -> str:\n return self.class_names[self.class_num]", "def rounds(self) -> Row:\n return rounds(self.stage)", "def num_class(self):\r\n return self._num_class", "def __repr__(self):\n\t\treturn '{}: {}, {}, {}'.format(self.name,\n\t\t\t\t\t\t\t\t\tself.roundNumber,\n\t\t\t\t\t\t\t\t\tself.pointsThisRound,\n\t\t\t\t\t\t\t\t\tself.pointsTotal)", "def class_name(self) -> str:\n return pulumi.get(self, \"class_name\")", "def name(self) -> str:\n return f\"{self._obj_name} count\"", "def rounds(self):\n if self.round_number > 0:\n for i in range(self.round_number):\n yield Round(i + 1)", "def get_num_classes(self):", "def rounds(self) -> List[Round]:\n return self._rounds", "def newRound():\r\n pass", "def init_round_numbers(self):\r\n for round_num in range(1, 13):\r\n lbl_round_num = tk.Label(self.master, text=str(round_num), font='courier 10 bold',\r\n fg='green', pady=2)\r\n lbl_round_num.grid(row=round_num+1, column=0)\r\n row = 14\r\n for trump in [\"C\", \"D\", \"H\", \"S\", \"NT\"]:\r\n lbl_round_num = tk.Label(self.master, text=\"13\"+trump, font='courier 10 bold',\r\n fg='green')\r\n lbl_round_num.grid(row=row, column=0)\r\n row += 1", "def round(self):\n return {\n 0: 'preflop',\n 3: 'flop',\n 4: 'turn',\n 5: 'river',\n }[len(self.community)]", "def get_round() -> int:\n return store.round", "def _getGameName(self):\n className = self.__class__.__name__\n gameName = className[0].lower() + className[1:]\n return gameName", "def getClassName(self):\n n = type(self).__name__\n return n", "def generate_class_name(self):\n self.class_name = self.profile_id_dict[self.profile_id]\n return self.class_name", "def f_get_class_name(self):\n return self.__class__.__name__", "def _get_class_presentation(self) -> str:\n return \"{}.{}.{}\".format(self.day, self.month, self.year[-2:])", "def className(self):\n namevalue = self.__class__.__name__\n return str(namevalue)", "def _increment_round_number():\n store.round += 1", "def class_name(self) -> str:\n return self.__class__.__name__", "def num_classes(self):\n\t\treturn 10", "def print_end_of_round_rankings(self):\n print('\\nFINAL SCORES')\n print('-'*12)\n for k, v in enumerate(self.ranking_dict):\n print(f\"{k+1} {v[0]}: {v[1]}\")\n print('\\n')", "def __str__(self):\n return f\"WINS: {self.wins}, LOSSES: {self.losses}, DRAWS: {self.draws}\"", "def class_num(self) -> int:\n return int(np.argmax(self.class_scores))", "def __init__(self, name, roundNumber = 0, pointsThisRound=0.0,pointsTotal = 0.0):\n\t\tself.name = name\n\t\tself.roundNumber = roundNumber\n\t\tself.pointsThisRound = pointsThisRound\n\t\tself.pointsTotal = pointsTotal", "def num(self, cls):\n try:\n return self.classes.index(cls) + 1\n except:\n raise Exception(\"Someone asked for \" + str(cls) + \", which is not here \" + str(self))" ]
[ "0.6289936", "0.6277111", "0.61140394", "0.59419346", "0.585802", "0.5823698", "0.58067185", "0.57185745", "0.5677853", "0.5667394", "0.56487215", "0.559757", "0.5528206", "0.54875326", "0.5471102", "0.5463627", "0.5463372", "0.5451482", "0.5443987", "0.54125917", "0.54034907", "0.5382465", "0.5350858", "0.53297555", "0.53026515", "0.5295861", "0.52953166", "0.5288599", "0.5269466", "0.52656466" ]
0.72071
0
Return the `SSA` object of the roundbased bitvector function. This method calls `BvFunction.to_ssa` with the same argument list, and stores the round outputs in the `SSA` object if `add_round_outputs` calls were added in ``eval``.
def to_ssa(cls, input_names, id_prefix, decompose_sec_ops=False, **ssa_options): my_ssa = super().to_ssa(input_names, id_prefix, decompose_sec_ops=decompose_sec_ops, **ssa_options) if cls._rounds_outputs is not None: my_ssa._rounds_outputs = cls._rounds_outputs[:] return my_ssa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_bvfunction(self):\n _input_widths = [v.width for v in self.input_vars]\n _output_widths = [v.width for v in self.output_vars]\n\n class MyBvFunction(BvFunction):\n input_widths = _input_widths\n output_widths = _output_widths\n _ssa = self\n\n @classmethod\n def vrepr(cls):\n return f\"{self.vrepr()}.to_bvfunction()\"\n\n @classmethod\n def to_ssa(cls, input_names, id_prefix, decompose_sec_ops=False, **ssa_options):\n old_names = [str(v) for v in cls._ssa.input_vars]\n if list(input_names) != old_names or decompose_sec_ops is True or \\\n ssa_options.get(\"replace_multiuse_vars\", False) != cls._ssa._replace_multiuse_vars_bool:\n raise ValueError(f\"the arguments of to_ssa(input_names={input_names}, \"\n f\"decompose_sec_ops={decompose_sec_ops}, {ssa_options})\"\n f\" are not the arguments used to generate {cls._ssa}\")\n return cls._ssa\n\n @classmethod\n def eval(cls, *input_vals):\n d = {}\n\n for var, val in zip(cls._ssa.input_vars, input_vals):\n d[var] = val\n\n for outvar, expr in cls._ssa.assignments.items():\n # avoiding expr.xreplace(d)\n # (only calls Operation.__new__ if there is some substitution)\n out_val = type(expr)(*[d.get(arg, arg) for arg in expr.args])\n d[outvar] = out_val\n\n output_vals = []\n for var in cls._ssa.output_vars:\n output_vals.append(d.get(var, var))\n\n return tuple(output_vals)\n\n return MyBvFunction", "def get_ssa(self):\n return copy.deepcopy(self.ssa)", "def _make_spsa_optimizer(self):\n def optimize(maxiter: int = 1000,\n tol = None,\n save_steps: int = 1,\n c0: float = 0.62,\n c1: float = 0.1,\n c2: float = 0.602,\n c3: float = 0.101,\n c4: float = 0):\n \"\"\"\n This method is heavily based on qiskits optimizers.spsa method, \n adapted here to worth with on quibs tn's without exact gradients \n\n Parameters\n ----------\n maxiter: Maximum number of iterations to perform.\n tol : None or float stops optim if tol is reached (default none - completes all steps)\n save_steps: Save intermediate info every save_steps step. It has a min. value of 1.\n last_avg: Averaged parameters over the last_avg iterations.\n If last_avg = 1, only the last iteration is considered. It has a min. value of 1.\n c0: The initial a. Step size to update parameters.\n c1: The initial c. The step size used to approximate gradient.\n c2: The alpha in the paper, and it is used to adjust a (c0) at each iteration.\n c3: The gamma in the paper, and it is used to adjust c (c1) at each iteration.\n c4: The parameter used to control a as well.\n \n Returns\n -------\n TYPE : updated object? (same return as TNOptimize)\n \"\"\"\n _spsa_vars = [c0, c1, c2, c3, c4]\n theta = self.vectorizer.vector\n nb_params = len(theta)\n use_exact_grads = 'grads' in self._method\n \n if save_steps:\n theta_vec = [theta]\n cost_vec = [self.vectorized_value_and_grad(theta)[0]]\n \n \n pbar = tqdm(total=maxiter, disable=not self.progbar)\n def callback(_):\n pbar.clear()\n pbar.update()\n val = round(self.loss, 5)\n pbar.set_description(str(val))\n\n if self.loss_target is not None:\n if self.loss < self.loss_target:\n # returning True doesn't terminate optimization\n pbar.close()\n raise KeyboardInterrupt\n \n for ii in range(maxiter):\n \n a_spsa = float(_spsa_vars[0]) / ((ii + 1 + _spsa_vars[4])**_spsa_vars[2])\n c_spsa = float(_spsa_vars[1]) / ((ii + 1)**_spsa_vars[3])\n delta = 2 * randint(0, 2, size=nb_params) - 1\n # plus and minus directions\n \n if use_exact_grads:\n raise NotImplementedError('Will use grad calc to project on to SP-direction')\n else:\n theta_plus = theta + c_spsa * delta\n theta_minus = theta - c_spsa * delta\n\n cost_plus = self.vectorized_value_and_grad(theta_plus)[0]\n cost_minus = self.vectorized_value_and_grad(theta_minus)[0]\n # derivative estimate\n g_spsa = (cost_plus - cost_minus) * delta / (2.0 * c_spsa)\n # updated theta\n theta = theta - a_spsa * g_spsa\n \n callback(ii)\n \n if tol is not None:\n if (cost_plus + cost_minus)/2 < tol:\n pbar.close()\n break\n \n if save_steps:\n theta_vec.append(theta)\n cost_vec.append(cost_plus/2+cost_minus/2)\n \n \n result_dict = {'hyper_parameters':_spsa_vars,\n 'maxiter':maxiter,\n 'theta_opt':theta,\n 'cost_opt':self.vectorized_value_and_grad(theta)[0],\n 'grad_opt':self.vectorized_value_and_grad(theta)[1]}\n if save_steps:\n result_dict['theta_history'] = theta_vec\n result_dict['cost_history'] = cost_vec\n self.result_dict = result_dict\n pbar.close()\n\n return self.inject_res_vector_and_return_tn()\n return optimize", "def get_ssa(self, name='main'):\n\n ssa = NetworkEnsemble()\n ssa.add_function(name, self.get_function())\n return ssa", "def get_function(self):\n return SSAFunction(self.get_graph())", "def crude_ssa(cfg):\r\n add_phi_def(cfg)\r\n version_tmp(cfg)\r\n update_uses(cfg)\r\n fill_phi_args(cfg)", "def Sbox_one_round(R,fk,sbox):\n s = R ^ fk\n i = s[(5,0)].ival\n j = s[(4,3,2,1)].ival\n return Bits(S(sbox,j+(i<<4)),4)[::-1]", "def generate_A_from_binning(t_binning):\n return generate_A_from_S(generate_S_from_binning(t_binning))", "def makeit_ssa(exprs):\n # Identify recurring LHSs\n seen = {}\n for i, e in enumerate(exprs):\n seen.setdefault(e.lhs, []).append(i)\n # Optimization: don't waste time reconstructing stuff if already in SSA form\n if all(len(i) == 1 for i in seen.values()):\n return exprs\n # SSA conversion\n c = 0\n mapper = {}\n processed = []\n for i, e in enumerate(exprs):\n where = seen[e.lhs]\n rhs = e.rhs.xreplace(mapper)\n if len(where) > 1:\n needssa = e.is_Scalar or where[-1] != i\n lhs = Symbol(name='ssa%d' % c, dtype=e.dtype) if needssa else e.lhs\n if e.is_Increment:\n # Turn AugmentedAssignment into Assignment\n processed.append(e.func(lhs, mapper[e.lhs] + rhs, is_Increment=False))\n else:\n processed.append(e.func(lhs, rhs))\n mapper[e.lhs] = lhs\n c += 1\n else:\n processed.append(e.func(e.lhs, rhs))\n return processed", "def __call__(self, *vFuncArgs):\n # return CRRAutility(self.func(*vFuncArgs), gam=self.CRRA)\n return CRRAutility(self.vFuncNvrs(*vFuncArgs), self.CRRA)", "def sqsignit(fun):\n\n def wrapper(self, x, *arg, **kwarg):\n return sqrtsigned(fun(self, sqsigned(x), *arg, **kwarg))\n\n return wrapper", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def calculate_S(func, a, b, N):\n # Trapezoid width\n h = (b - a)/N\n\n # Every even slice\n new_part = func(a) + func(b)\n for i in range(2, N, 2):\n new_part += 2 * func(a + i*h) \n \n return 1/3. * new_part", "def sq_custom(f,T,a=0,b=0):\n fs=44100\n t=np.linspace(0,T,T*fs)\n A=np.floor(a*fs*T)\n D=np.floor(b*fs*T)\n S1=np.linspace(0,1,A)\n S2=np.ones(T*fs-A-D)\n S3=np.linspace(1,0,D)\n S0=signal.square(2 * np.pi * f * t)\n return(np.hstack((S1,S2,S3))*S0)", "def _make_cas_function():\n # Generate IR\n mod = lc.Module.new('generate-cas')\n llint = lc.Type.int()\n llintp = lc.Type.pointer(llint)\n fnty = lc.Type.function(llint, [llintp, llint, llint])\n fn = mod.add_function(fnty, name='.numba.parallel.ufunc.cas')\n ptr, old, repl = fn.args\n bb = fn.append_basic_block('')\n builder = lc.Builder.new(bb)\n outpack = builder.cmpxchg(ptr, old, repl, ordering='monotonic')\n out = builder.extract_value(outpack, 0)\n failed = builder.extract_value(outpack, 1)\n builder.ret(builder.select(failed, old, out))\n\n # Build & Link\n llmod = ll.parse_assembly(str(mod))\n\n target = ll.Target.from_triple(ll.get_process_triple())\n tm = target.create_target_machine()\n engine = ll.create_mcjit_compiler(llmod, tm)\n ptr = engine.get_function_address(fn.name)\n return engine, ptr", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def TorchRound():\n class identity_quant(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n out = torch.round(input)\n return out\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output\n\n return identity_quant().apply", "def generate_S_u(t):\n t_binning = unique_binning(t)\n return generate_S_from_binning(t_binning)", "def Sbox_one_round2(R,fk,log=False):\n RE = E(R)\n Z = Bits(0,32)\n s = RE ^ fk\n ri,ro = (0,0)\n for n in xrange(8):\n (nri,nro) = (ri+6,ro+4)\n x = s[ri:nri]\n #if n == 1: print x.ival\n i = x[(5,0)].ival\n j = x[(4,3,2,1)].ival\n Z[ro:nro] = Bits(S(n,j + (i << 4)),4)[::-1]\n (ri,ro) = (nri,nro)\n return Z", "def getSarsaLambda(self, shortMemory):\n # get temporal difference error\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(\n self.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(shortMemory[-1].action)\n\n # states\n netInput = []\n for memory in shortMemory:\n netInput.append(\n torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))\n\n netInput = torch.stack(netInput)\n\n # updates for every state in memory with respect to its eligibility\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility)\n\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n\n return netInput, labels", "def sym_sipexprs():\n u, v = symbols('u v')\n\n sipu = 0\n sipv = 0\n for m in range(8):\n for n in range(0, 8-m):\n ac = symbols('a_%d_%d' % (m, n))\n bc = symbols('b_%d_%d' % (m, n))\n sipu += ac*u**m*v**n\n sipv += bc*u**m*v**n\n sipu.expand()\n sipv.expand()\n return sipu, sipv", "def optimize(spv_bin):\n\n tmp_dir = utils.tempdir()\n tmp_in = tmp_dir.relpath(\"input.spv\")\n tmp_out = tmp_dir.relpath(\"output.spv\")\n with open(tmp_in, \"wb\") as out_file:\n out_file.write(bytes(spv_bin))\n\n sdk = os.environ.get(\"VULKAN_SDK\", None)\n cmd = os.path.join(sdk, \"bin/spirv-opt\") if sdk else \"spirv-opt\"\n args = [cmd, \"-O\", tmp_in, \"-o\", tmp_out]\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (out, _) = proc.communicate()\n\n if proc.returncode != 0:\n msg = \"Opitmizationerror using spirv-opt:\\n\"\n msg += py_str(out)\n raise RuntimeError(msg)\n\n return bytearray(open(tmp_out, \"rb\").read())", "def payToSStx(addr):\n # Only pay to pubkey hash and pay to script hash are\n # supported.\n scriptType = PubKeyHashTy\n if isinstance(addr, addrlib.AddressPubKeyHash):\n if addr.sigType != crypto.STEcdsaSecp256k1:\n raise NotImplementedError(\n \"unable to generate payment script for \"\n \"unsupported digital signature algorithm\"\n )\n elif isinstance(addr, addrlib.AddressScriptHash):\n scriptType = ScriptHashTy\n else:\n raise NotImplementedError(\n \"unable to generate payment script for \"\n \"unsupported address type %s\" % type(addr)\n )\n\n if scriptType == PubKeyHashTy:\n return payToStakePKHScript(addr.scriptAddress(), opcode.OP_SSTX)\n return payToStakeSHScript(addr.scriptAddress(), opcode.OP_SSTX)", "def get_random_bvfunction(width, num_inputs, num_outputs, num_assignments, seed,\n external_variable_prefix=None, operation_set_index=0, num_rounds=None,\n extra_operations=None):\n assert num_inputs + num_assignments >= num_outputs\n\n import random\n import functools\n from cascada.bitvector.operation import (\n BvAnd, BvOr, BvXor, BvShl, BvLshr, RotateLeft, RotateRight, BvAdd,\n BvSub, BvNeg, BvNot, Concat, BvIdentity, make_partial_operation\n )\n from cascada.bitvector.secondaryop import BvIf, BvMaj\n\n PRNG = random.Random()\n PRNG.seed(seed)\n\n # SimpleReverse contains Concat and Extract\n # implemented as a class to later use issubclass()\n class SimpleReverse(object):\n def __new__(cls, bv):\n return functools.reduce(Concat, [bv[i] for i in range(bv.width)])\n\n if operation_set_index == 0:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub, BvNeg,\n BvIdentity,\n BvIf, BvMaj,\n RotateLeft, RotateRight,\n BvShl, BvLshr,\n SimpleReverse\n # Concat, Extract, Ite,\n # BvComp, BvUlt, BvUle, BvUgt, BvUge,\n # BvMul, BvUdiv, BvUrem,\n # PopCount, Reverse, PopCountSum2, PopCountSum3, PopCountDiff, LeadingZeros,\n )\n elif operation_set_index == 1:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub, BvNeg,\n BvIdentity,\n BvIf, BvMaj,\n RotateLeft, RotateRight,\n make_partial_operation(BvShl, (None, core.Constant(PRNG.randint(1, width - 1), width))),\n make_partial_operation(BvLshr, (None, core.Constant(PRNG.randint(1, width - 1), width))),\n )\n elif operation_set_index == 2:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub,\n BvIdentity,\n RotateLeft, RotateRight,\n BvShl, BvLshr,\n SimpleReverse\n )\n elif operation_set_index == 3:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub, BvNeg,\n BvIdentity,\n BvIf, BvMaj,\n RotateLeft, RotateRight,\n BvShl, BvLshr,\n SimpleReverse\n )\n else:\n raise ValueError(\"operation_set_index must be 0, 1, 2, or 3\")\n\n extra_operations_vrepr = None\n if extra_operations is not None:\n list_ops += tuple(extra_operations)\n extra_operations_vrepr = f\"({','.join(op.__name__ for op in extra_operations)},)\"\n\n while True: # outer loop to check RandomBvFunction does not return Constant\n list_of_lambda_assignments = []\n\n class RandomBvFunction(BvFunction):\n input_widths = [width] * num_inputs\n output_widths = [width] * num_outputs\n \n @classmethod\n def vrepr(cls):\n evp = external_variable_prefix.__repr__()\n return f\"get_random_bvfunction({width}, {num_inputs}, {num_outputs}, \" \\\n f\"{num_assignments}, {seed}, {evp}, {operation_set_index}, {num_rounds}, \" \\\n f\"{extra_operations_vrepr})\"\n\n if external_variable_prefix is not None:\n RandomBvFunction.round_keys = []\n\n def get_random_var_index():\n if len(list_of_lambda_assignments) >= num_inputs + 2:\n min_index = num_inputs\n else:\n min_index = 0\n return PRNG.randint(min_index, len(list_of_lambda_assignments) + num_inputs - 1)\n\n def get_random_var_indices(num_indices, my_unique_indices):\n indices = []\n while True:\n new_index = get_random_var_index()\n if not my_unique_indices or new_index not in indices:\n indices.append(new_index)\n if len(indices) == num_indices:\n break\n return indices\n\n while True:\n _op = list_ops[PRNG.randint(0, len(list_ops) - 1)]\n\n if _op in [RotateLeft, RotateRight]: # only operations with scalar inputs\n class Op(object): # need a class to store the randomness\n op = _op\n my_index = get_random_var_index()\n offset = PRNG.randint(1, width - 1) # != 0\n # kwargs required for external vars (see below)\n def __new__(cls, args, **kwargs): return cls.op(args[cls.my_index], cls.offset)\n\n elif _op == SimpleReverse:\n class Op(object):\n op = _op\n my_index = get_random_var_index()\n def __new__(cls, args, **kwargs): return cls.op(args[cls.my_index])\n\n elif _op in [BvShl, BvLshr]: # ensure 2nd operand ct\n class Op(object):\n op = _op\n my_index = get_random_var_index()\n offset = core.Constant(PRNG.randint(1, width - 1), width)\n def __new__(cls, args, **kwargs): return cls.op(args[cls.my_index], cls.offset)\n\n else:\n assert issubclass(_op, operation.Operation)\n assert _op.arity[1] == 0\n\n if _op.arity[0] >= 2 and PRNG.randint(0, 3) == 0:\n # 1 in 4 to have a ct or external var (each 50%)\n if external_variable_prefix is None or PRNG.randint(0, 1) == 0:\n extra_arg = core.Constant(PRNG.randint(1, 2**width - 2), width) # != 0, allones\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0] - 1, False)\n ct = extra_arg\n def __new__(cls, args, **kwargs): return cls.op(*([args[i] for i in cls.indices] + [cls.ct]))\n else:\n evi = PRNG.randint(0, len(RandomBvFunction.round_keys))\n if evi == len(RandomBvFunction.round_keys):\n RandomBvFunction.round_keys.append(core.Variable(f\"{external_variable_prefix}{evi}\", width))\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0] - 1, False)\n ev_index = evi\n # kwargs[\"round_keys\"] instead of RandomBvFunction.round_keys\n # to provide round_keys from cls in running time (in an argument of Op(\n # (otherwise round_keys cannot be changed)\n def __new__(cls, args, **kwargs): return cls.op(*([args[i] for i in cls.indices] +\n [kwargs[\"round_keys\"][cls.ev_index]]))\n else:\n unique_indices = _op in [BvAnd, BvOr, BvXor, BvSub]\n if unique_indices and len(list_of_lambda_assignments) + num_inputs == 1:\n # avoid using duplicated inputs for these operations\n continue\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0], unique_indices)\n def __new__(cls, args, **kwargs): return cls.op(*[args[i] for i in cls.indices])\n\n if external_variable_prefix is not None and len(RandomBvFunction.round_keys) == 0:\n # ensure at least 1 external variable\n _op = BvXor\n evi = 0\n RandomBvFunction.round_keys.append(core.Variable(f\"{external_variable_prefix}{evi}\", width))\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0] - 1, False)\n ev_index = evi\n def __new__(cls, args, **kwargs): return cls.op(*([args[i] for i in cls.indices] +\n [kwargs[\"round_keys\"][cls.ev_index]]))\n\n ## debugging\n # print(f\"{len(list_of_lambda_assignments)+1}/{num_assignments} | op: {Op.op}\")\n # if hasattr(Op, \"my_index\"): print(\"\\tmy_index:\", Op.my_index)\n # if hasattr(Op, \"indices\"): print(\"\\tindices:\", Op.indices)\n # if hasattr(Op, \"ct\"): print(\"\\tct:\", Op.ct)\n # if hasattr(Op, \"offset\"): print(\"\\toffset:\", Op.offset)\n # if hasattr(Op, \"ev_index\"): print(\"\\tev_index:\", Op.ev_index)\n\n list_of_lambda_assignments.append(Op)\n if len(list_of_lambda_assignments) == num_assignments:\n break\n\n list_of_lambda_assignments = tuple(list_of_lambda_assignments)\n\n @classmethod\n def eval_method(cls, *args):\n assert isinstance(args, collections.abc.Sequence)\n all_vars = list(args)\n round_keys = getattr(cls, \"round_keys\", None)\n ## debugging\n # print(f\"eval_method({args}):\")\n for index_assign, lambda_assign in enumerate(list_of_lambda_assignments):\n result = lambda_assign(all_vars, round_keys=round_keys)\n\n Op = lambda_assign\n ffo_arg0, ffo_arg1, ffo_arg2 = \"\", \"\", \"\"\n if hasattr(Op, \"my_index\"):\n ffo_arg0 = all_vars[Op.my_index]\n else:\n assert hasattr(Op, \"indices\")\n ffo_arg0 = all_vars[Op.indices[0]]\n if len(Op.indices) >= 2:\n ffo_arg1 = all_vars[Op.indices[1]]\n if len(Op.indices) >= 3:\n ffo_arg2 = all_vars[Op.indices[2]]\n ffo_extra_args = \"\"\n if hasattr(Op, \"ct\"):\n ffo_extra_args = Op.ct\n elif hasattr(Op, \"offset\"):\n ffo_extra_args = Op.offset\n elif hasattr(Op, \"ev_index\"):\n ffo_extra_args = round_keys[Op.ev_index]\n format_string = \"assignment {}/{}: {}({}, {}, {}, extra={}) = {}\"\n ffo = [index_assign, len(list_of_lambda_assignments) - 1,\n Op.op.__name__, ffo_arg0, ffo_arg1, ffo_arg2,\n ffo_extra_args, result]\n cls.log_msg(format_string, ffo)\n\n ## debugging\n # print(f\"\\t{result.width}-width op: {Op.op.__name__}\")\n # if hasattr(Op, \"my_index\"): print(\"\\t\\targ:\", all_vars[Op.my_index])\n # if hasattr(Op, \"indices\"): print(\"\\t\\targs:\", [all_vars[i] for i in Op.indices])\n # if hasattr(Op, \"ct\"): print(\"\\t\\tct:\", Op.ct)\n # if hasattr(Op, \"offset\"): print(\"\\t\\toffset:\", Op.offset)\n # if hasattr(Op, \"ev_index\"): print(\"\\t\\text_arg:\", round_keys[Op.ev_index])\n # print(f\"\\t\\tresult:\", result)\n all_vars.append(result)\n return all_vars[-num_outputs:]\n\n RandomBvFunction.eval = eval_method\n\n # check RandomBvFunction does not return Constant\n try:\n RandomBvFunction(*[core.Variable(f\"x{i}\", width) for i in range(num_inputs)],\n symbolic_inputs=True, simplify=False)\n except ValueError as e:\n if not str(e).startswith(\"if symbolic_inputs, expected no Constant values\"):\n raise e\n else:\n continue\n\n if num_rounds is None:\n return RandomBvFunction\n else:\n _num_rounds = num_rounds\n\n class RandomRBF(RandomBvFunction, RoundBasedFunction):\n num_rounds = _num_rounds\n\n @classmethod\n def eval(cls, *args):\n for index_round in range(cls.num_rounds):\n args = super().eval(*args)\n\n if index_round < cls.num_rounds - 1:\n if len(args) < len(cls.input_widths):\n args = args * ((len(cls.input_widths) // len(args)) + 1)\n args = args[:len(cls.input_widths)]\n\n cls.add_round_outputs(*args)\n\n format_string = \"round {}/{}: outputs = (\" + \"{}, \"*len(args) + \")\"\n ffo = [index_round, cls.num_rounds - 1] + list(args)\n cls.log_msg(format_string, ffo)\n return args\n\n @classmethod\n def set_num_rounds(cls, new_num_rounds):\n cls.num_rounds = new_num_rounds\n\n # check RandomRBF does not return Constant\n try:\n RandomRBF(*[core.Variable(f\"x{i}\", width) for i in range(num_inputs)],\n symbolic_inputs=True, simplify=False)\n except ValueError as e:\n if not str(e).startswith(\"if symbolic_inputs, expected no Constant values\"):\n raise e\n else:\n continue\n else:\n return RandomRBF", "def SqrtSwap():\n\n return Operator(np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.5 * (1 + 1j)]],\n [[ 0.0, 0.0],\n [ 0.5 * (1 - 1j), 0.0]]],\n [[[ 0.0, 0.5 * (1 - 1j)],\n [ 0.0, 0.0]],\n [[ 0.5 * (1 + 1j), 0.0],\n [ 0.0, 1.0]]]]))", "def get_san(infp, outfp):\n\n return ...", "def scalar_conv(self, round_mode, dst, src):\n # check Scalar\n TikCheckUtil.check_type_match(\n dst, Scalar, 'scalar conv dst must be a scalar')\n TikCheckUtil.check_type_match(\n src, Scalar, 'scalar conv src must be a scalar')\n # check dtype\n dtype_str = DTYPE_MAP[src.dtype] + '2' + DTYPE_MAP[\n dst.dtype] + ROUND_MODE_MAP[round_mode]\n TikCheckUtil.check_equality(api_check_support(\"tik.\" +\n \"scalar_conv\",\n dtype_str), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dtype_str, \"scalar_conv\"))\n # code gen\n with self.new_scope():\n # f322s32z: convert f32 to s32, any number out of s32 range\n # will be +/- s32 max number.\n # round mode = Z. round to zero(c language trunc)\n if dtype_str in ('s322f32', 'f322f16', 'f162f32', 'f322s32z'):\n self.emit(\n tvm.call_extern(dst.dtype, \"reg_set\", dst.get(),\n dtype_convert(src, dst.dtype)), ONE_IR)\n else:\n self.emit(\n tvm.call_extern(\n dst.dtype, \"reg_set\", dst.get(),\n tvm.call_extern(src.dtype, 'conv_' + dtype_str,\n src.get())), ONE_IR)", "def symsqrt_v2(A, func='symeig'):\n if func == 'symeig':\n s, v = A.symeig(eigenvectors=True) # This is faster in GPU than CPU, fails gradcheck. See https://github.com/pytorch/pytorch/issues/30578\n elif func == 'svd':\n _, s, v = A.svd() # But this passes torch.autograd.gradcheck()\n else:\n raise ValueError()\n\n above_cutoff = s > s.max() * s.size(-1) * torch.finfo(s.dtype).eps\n\n ### This doesn't work for batched version\n\n ### This does but fails gradcheck because of inpalce\n\n ### This seems to be equivalent to above, work for batch, and pass inplace. CHECK!!!!\n s = torch.where(above_cutoff, s, torch.zeros_like(s))\n\n sol =torch.matmul(torch.matmul(v,torch.diag_embed(s.sqrt(),dim1=-2,dim2=-1)),v.transpose(-2,-1))\n\n return sol", "def binary_round(x):\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)", "def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])" ]
[ "0.5443392", "0.4841223", "0.48364985", "0.48349917", "0.4701788", "0.4483575", "0.44465578", "0.44207594", "0.4366836", "0.43557042", "0.4316889", "0.43090743", "0.42799342", "0.4275801", "0.42707747", "0.4246866", "0.42396498", "0.41845697", "0.41824216", "0.4165671", "0.41649425", "0.41292188", "0.41040978", "0.4103091", "0.40979448", "0.40895852", "0.40773976", "0.40671706", "0.4062804", "0.40606064" ]
0.5147074
1
Set `RoundBasedFunction.num_rounds` and update `input_widths` and ``output_widths`` if necessary.
def set_num_rounds(cls, new_num_rounds): raise NotImplementedError("subclasses need to override this method")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_num_rounds_and_return(cls, new_num_rounds):\n cls.set_num_rounds(new_num_rounds)\n return cls", "def roundParameters(self, roundNum):\n ruleName = random.choice(self.state.ruleNames)\n return SupportedRules[ruleName].makeRoundParameters(self, roundNum)", "def _refresh_width(self):\n self._width = curses.tigetnum('cols')\n self._writer = formatter.DumbWriter(self._output, maxcol=self._width)", "def rounds(self):\n if self.round_number > 0:\n for i in range(self.round_number):\n yield Round(i + 1)", "def rounds(self, rounds: List[Round]):\n\n self._rounds = rounds", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def vrepr(cls):\n return f\"{super().vrepr()}.set_num_rounds_and_return({cls.num_rounds})\"", "def update_numeric_width(self, eval_dict):\n # If width is already a number, do nothing\n if isinstance(self.width, int):\n self.width_numeric = self.width\n return\n self.width_numeric = eval(self.width.replace(\"`\", \"\"), eval_dict)\n if not isinstance(self.width_numeric, int):\n logger.error(\"Could not evaluate width {} of wire {}\".format(self.width_numeric, self.name))", "def run_trt_precision_tests(self) -> None:\n\n def trt_converter_params_updater(params: trt.TrtConversionParams):\n for precision_mode in self._precision_modes:\n yield params._replace(\n precision_mode=precision_mode,\n use_calibration=(precision_mode == trt.TrtPrecisionMode.INT8))\n\n self._run_impl(\n test_name=\"precision_mode_test\",\n default_trt_converter_params=DEFAUL_TRT_CONVERT_PARAMS,\n trt_converter_params_updater=trt_converter_params_updater)", "def new_round(self, round_number: int):\n self.turn = 0\n self.round = round_number", "def round(self, round):\n\n self._round = round", "def init_round_numbers(self):\r\n for round_num in range(1, 13):\r\n lbl_round_num = tk.Label(self.master, text=str(round_num), font='courier 10 bold',\r\n fg='green', pady=2)\r\n lbl_round_num.grid(row=round_num+1, column=0)\r\n row = 14\r\n for trump in [\"C\", \"D\", \"H\", \"S\", \"NT\"]:\r\n lbl_round_num = tk.Label(self.master, text=\"13\"+trump, font='courier 10 bold',\r\n fg='green')\r\n lbl_round_num.grid(row=row, column=0)\r\n row += 1", "def update_print_width(self,val=None):\n if val is not None:\n self.print_w = val\n for i in self:\n classname=i.__class__.__name__\n if classname == 'int':\n print(i.__class__.__name__,file=out2)\n print(self.__class__.__name__,file=out2)\n print(i,file=out2)\n print(self.is_vector,file=out2)\n print(len(self),file=out2)\n i.update_print_width(val)\n else:\n for i in self:\n if type(i) == Matrix or type(i) == MatrixElement:\n i.update_print_width()\n if i.print_w > self.print_w:\n self.print_w = i.print_w\n for i in self:\n if type(i) == Matrix or type(i) == MatrixElement:\n i.update_print_width(self.print_w)", "def init_round_results(self):\r\n for player in range(0, 4):\r\n for round_num in range(0, 17):\r\n spin_bid = tk.Spinbox(self.master, from_=-1, to=min(round_num+1, 13), width=10)\r\n spin_bid.grid(row=round_num+2, column=player*3+1, padx=2)\r\n self.spin_bids[player].append(spin_bid)\r\n spin_trick = tk.Spinbox(self.master, from_=-1, to=min(round_num+1, 13), width=10)\r\n spin_trick.grid(row=round_num+2, column=player*3+2, padx=2)\r\n self.spin_tricks[player].append(spin_trick)\r\n lbl_score = tk.Label(self.master, text=\"0\", font='courier 10 bold', fg='green',\r\n width=10, borderwidth=2, relief=\"groove\", anchor=\"e\")\r\n if round_num % 4 == player: # mark starting player in each round\r\n spin_bid.configure(bg='LightSteelBlue2')\r\n spin_trick.configure(bg='LightSteelBlue2')\r\n lbl_score.configure(bg='LightSteelBlue2')\r\n lbl_score.grid(row=round_num+2, column=player*3+3, sticky=tk.W+tk.E, padx=2)\r\n self.lbl_scores[player].append(lbl_score)", "def rounds(self) -> Row:\n return rounds(self.stage)", "def padding_width(self):\n ...", "def _increment_round_number():\n store.round += 1", "def setinputsizes(self, sizes):\n pass", "def SetAnalysisRound(self):\r\n if self.FilepathSwitchBox.currentText() == 'Tag':\r\n self.Tag_round_infor.append(self.AnalysisRoundBox.value())\r\n elif self.FilepathSwitchBox.currentText() == 'Lib':\r\n self.Lib_round_infor.append(self.AnalysisRoundBox.value())\r\n \r\n self.normalOutputWritten('Tag_round_infor: {}\\nLib_round_infor: {}\\n'.format(str(self.Tag_round_infor), str(self.Lib_round_infor)))", "def report_rounds(self):\n return print(f\"Total Rounds played: {sum([self.wins, self.draws, self.losses])}\")" ]
[ "0.5804964", "0.5515761", "0.5343894", "0.5200584", "0.5151792", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.49409455", "0.4925701", "0.4892504", "0.48858312", "0.48690236", "0.48663366", "0.4854891", "0.48378652", "0.47014406", "0.4627937", "0.46260348", "0.46177056", "0.46155435", "0.45665684", "0.45527485" ]
0.66365224
0
Call `set_num_rounds` and return ``cls``.
def set_num_rounds_and_return(cls, new_num_rounds): cls.set_num_rounds(new_num_rounds) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_num_rounds(cls, new_num_rounds):\n raise NotImplementedError(\"subclasses need to override this method\")", "def vrepr(cls):\n return f\"{super().vrepr()}.set_num_rounds_and_return({cls.num_rounds})\"", "def newRound():\r\n pass", "def rounds(self):\n if self.round_number > 0:\n for i in range(self.round_number):\n yield Round(i + 1)", "def new_round(self, round_number: int):\n self.turn = 0\n self.round = round_number", "def NewRound(self, numLarge):\n self._init_numbers()\n \n self._set_lg_sm_numbers(numLarge)\n self._set_target_random()\n\n return self._rndNums, self._target", "def __new__(cls, conf):\n # Call is already for a subclass, so pass it through\n RunnerClass = cls\n return super(Runner, cls).__new__(RunnerClass)", "def __init__(self, rounds: List[Round]=None): # noqa: E501\n self.swagger_types = {\n 'rounds': List[Round]\n }\n\n self.attribute_map = {\n 'rounds': 'rounds'\n }\n\n self._rounds = rounds", "def __new__(cls, num: int, *args, **kwargs):\n if(verify_value(num)):\n return super().__new__(cls, *args, **kwargs)", "def rounds(self) -> Row:\n return rounds(self.stage)", "def round_next(cls):\n if cls.is_finished():\n logging.error('Grand challenge finished.')\n return None\n\n round = cls.get_current_round()\n cls.force_round_close(round)\n\n challenges = []\n if cls.is_final():\n # Only two players left in the game\n arb_win = cls.eligible(0)\n arb_lose = cls.eligible(1)\n challenges.append(GrandChallenge.create(arb_win[0], arb_lose[0], round.round_number + 1))\n else:\n # More than two players, create new challenges\n if round.round_number % 2 == 1:\n challenges += cls.play_round(1, round.round_number + 1)\n challenges += cls.play_round(0, round.round_number + 1)\n else:\n challenges += cls.play_round(1, round.round_number + 1)\n\n if challenges:\n # Update round number\n round.round_number += 1\n cls.set_current_round(round.round_number)\n logging.debug('Played round %s' % round.round_number)\n return round", "def __init__(self, max_iters, tries):\r\n\r\n if max_iters < 3:\r\n print 'K must be 3 or more.'\r\n self.max_iters = max_iters\r\n self.tries = tries", "def _increment_round_number():\n store.round += 1", "def __init__(self, retry_count):\n self.retry_count = retry_count", "def __new__(cls, size, reset_after):\n self = object.__new__(cls)\n self.reset_after = reset_after\n self.lock = ScarletLock(KOKORO, size)\n return self", "def __init__(self, timeout, tries):\r\n self._timeout = timeout\r\n self._tries = tries", "def rounds(self) -> List[Round]:\n return self._rounds", "def __init__(self, max_num_of_rounds_to_retain=100, num_of_last_check_rounds_consider=2):\n self.data = list()\n self.max_num_of_rounds_to_retain = max_num_of_rounds_to_retain\n self.num_of_last_check_rounds_consider = num_of_last_check_rounds_consider", "def get_name(cls):\n return f\"{super().get_name()}_{cls.num_rounds}R\"", "def test_timeout_retries(self):\n\n batch = Batch(Mock())\n self.check_instance(batch=batch)\n\n self.assertEqual(batch.timeout_retries, 0)\n self.check_instance(batch, timeout_retries=0)\n\n batch.timeout_retries = 10\n self.assertEqual(batch.timeout_retries, 10)\n self.check_instance(batch, timeout_retries=10)\n\n batch.timeout_retries = 0\n self.assertEqual(batch.timeout_retries, 0)\n self.check_instance(batch, timeout_retries=0)\n\n batch.timeout_retries = 1\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n\n # exceptions\n ## error messages\n value_error = \"'timeout_retries' must be positive, i.e. greater or equal that zero (>=0).\"\n type_error = f\"'timeout_retries' must be of type {int}.\"\n\n #######################################################################\n # test wrong value\n with self.assertRaises(ValueError) as error:\n batch.timeout_retries = -1\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, value_error)\n\n #######################################################################\n # test wrong type\n with self.assertRaises(TypeError) as error:\n batch.timeout_retries = True\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, type_error)\n\n with self.assertRaises(TypeError) as error:\n batch.timeout_retries = '2'\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, type_error)", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n r_dummy = cls(2, 4)\n r_dummy.update(**dictionary)\n return (r_dummy)\n\n if cls.__name__ == \"Square\":\n s_dummy = cls(3)\n s_dummy.update(**dictionary)\n return (s_dummy)", "def count_one_round(self):\n\t\tself.round_count+=1\n\t\treturn self.round_count", "def __init__(self, precision=0, random_generator=random.Random(), minimum_stochastic_distance=0):\n super(StochasticRound, self).__init__(precision=precision)\n self.random_generator = random_generator\n self.minimum_stochastic_distance = minimum_stochastic_distance", "def create(cls, **dictionary):\n if cls.__name__ == \"Square\":\n dummy = cls(1)\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 1)\n dummy.update(**dictionary)\n return dummy", "def num_classes(self):\n\t\treturn 10", "def rounds(self, rounds: List[Round]):\n\n self._rounds = rounds", "def newRound(self, winningTeamNumber):\n self.counter[\"Team1\"] = 0\n self.counter[\"Team2\"] = 0\n self.wonRounds[\"Team{}\".format(winningTeamNumber)] += 1\n self.sidesChanged = not self.sidesChanged\n self.updateModel()\n if (self.isGameOver()):\n self.newGame(winningTeamNumber)", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 2)\n dummy.update(**dictionary)\n return dummy\n if cls.__name__ == \"Square\":\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy", "def simulate(self):\n self.round += 1", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n instance = super().__call__(*args, **kwargs)\n cls._instances[cls] = instance\n return cls._instances[cls]" ]
[ "0.7139906", "0.58930886", "0.5714936", "0.53902805", "0.5324967", "0.52531534", "0.5185726", "0.50927013", "0.4869179", "0.48575476", "0.485736", "0.48567307", "0.4828904", "0.48038808", "0.47967023", "0.4774418", "0.47364053", "0.47306034", "0.47172225", "0.47037777", "0.46852618", "0.4683242", "0.46768537", "0.46658134", "0.46610618", "0.46536246", "0.4648624", "0.4648249", "0.46479928", "0.46463877" ]
0.728035
0
Return the list of round outputs obtained in the last evaluation. See also `add_round_outputs`.
def get_rounds_outputs(cls): if cls._rounds_outputs is None: raise ValueError("eval must be called before get_rounds_outputs") return cls._rounds_outputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_round_separators(self):\n if getattr(self, \"_rounds_outputs\", None) is None:\n return None\n if len(self._rounds_outputs) == 0:\n return None\n return self._rounds_outputs[:-1]", "def outputs(self):\n return self._outputs if self._outputs else [self.utop]", "def rounds(self) -> List[Round]:\n return self._rounds", "def _list_outputs(self):\n \n outputs = self._outputs().get()\n return outputs", "def get_outputs(self):\r\n # check to see if the outputs exist\r\n try:\r\n self.times\r\n except AttributeError:\r\n self.times = []\r\n try:\r\n self.outputs\r\n except AttributeError:\r\n self.outputs = []\r\n\r\n # append the time to the times list\r\n self.times.append(self.t)\r\n\r\n outlist = [] # initalize\r\n\r\n # run for the output functions\r\n for func in self.out_funcs:\r\n # get the function values\r\n fv = func()[0]\r\n\r\n # if the output is a list, use extend\r\n if type(fv) is list:\r\n outlist.extend(fv)\r\n\r\n # otherwise, use append\r\n else:\r\n outlist.append(fv)\r\n\r\n self.outputs.append(outlist)", "def outputs() -> List[str]:\n return Invocation.current.expanded_outputs", "def outputs(self):\r\n return self._outputs", "def get_outputs(self):\n return self.outputs", "def outputs(self):\n\n outputs = []\n for arg in self.arguments:\n if arg.OUT:\n outputs.append(arg)\n\n return outputs", "def outputs(self):\n return self._outputs", "def outputs(self):\n return self._outputs", "def get_outputs(self):\n return self.attributes[\"outputs\"]", "def retrieve_results(self):\n # return the results of the last calculation\n last_calc = self.ctx.calculations[-1]\n for name, port in self.spec().outputs.items():\n if port.required and name not in last_calc.outputs:\n self.report('the spec specifies the output {} as required '\n 'but was not an output of {}<{}>'.format(name, self._calculation.__name__,\n last_calc.pk))\n\n if name in last_calc.outputs:\n # node = last_calc.outputs[name]\n self.out(name, last_calc.outputs[name])\n # self.report(\"attaching the node {}<{}> as '{}'\".format(node.__class__.__name__, node.pk, name))\n return", "def outputs(self):\n return self.outputs", "def op_output_values(self):\n return self.solid_output_values", "def add_round_outputs(cls, *args):\n if len(args) == 1 and isinstance(args[0], collections.abc.Sequence):\n args = args[0]\n if not all(isinstance(bv, core.Term) for bv in args):\n raise ValueError(\"the arguments of add_round_outputs must be a bit-vectors\")\n if cls._rounds_outputs is None:\n cls._rounds_outputs = []\n cls._rounds_outputs.append(tuple(args))", "def expected_outputs(self):\n return self._expected_outputs", "def report_rounds(self):\n return print(f\"Total Rounds played: {sum([self.wins, self.draws, self.losses])}\")", "def get_outputs(self):\n raise NotImplementedError", "def getOperandsWritten(self):\n # pylint: disable=no-self-use\n return []", "def get_unnamed_outputs(self):\n return []", "def num_outputs(cls) -> list[int]:\n return [5] * 10", "def evaluate_outputs(self):\n raise NotImplementedError(\n 'evaluate_outputs called but not implemented in the derived class.'\n )", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def get_evaluations(self):\r\n return self.evaluations", "def get_outputs(self):\n outputs = []\n missing = []\n for i, name in enumerate(self.output_names[:]):\n try:\n value = self.proto.output_env.look_up(name).unwrapped\n except Exception:\n if self.optional_flags[i]:\n value = None\n missing.append((i, name))\n else:\n raise\n outputs.append(value)\n for i, name in reversed(missing):\n del outputs[i]\n del self.output_names[i]\n del self.optional_flags[i]\n if missing:\n return outputs, reversed(missing)\n return outputs", "def coin_outputs(self):\n return self._coin_outputs" ]
[ "0.6637656", "0.6438899", "0.62058717", "0.61304086", "0.61035067", "0.6090151", "0.6051942", "0.600966", "0.59717953", "0.59507537", "0.59507537", "0.59088373", "0.59074545", "0.5906354", "0.5867009", "0.57934654", "0.57923913", "0.5694699", "0.5679354", "0.5678499", "0.5628396", "0.56060326", "0.55963606", "0.5497236", "0.5497236", "0.5497236", "0.5497236", "0.54924893", "0.5483792", "0.5466489" ]
0.77957076
0
Check whether the last assignments are SSAReturn of the output variables.
def _are_last_assignments_valid(assignments, output_vars, ignore_exception=True): assert isinstance(assignments, collections.OrderedDict) if len(assignments) == 0: return False last_assignments = [] for assign_outvar in reversed(assignments): last_assignments.append([assign_outvar, assignments[assign_outvar]]) if len(last_assignments) == len(output_vars): break last_assignments = list(reversed(last_assignments)) # proper order for i, (assign_outvar, expr) in enumerate(last_assignments): if not(assign_outvar == output_vars[i]) or \ not(isinstance(expr, SSAReturn)) or \ not(expr.args[0] not in output_vars): if not ignore_exception: last_assignments_vrepr = [(k.vrepr(), v.vrepr()) for (k, v) in last_assignments] raise ValueError("last assignments are not of the form " "output_var <- SSAReturn(non_output_var)" f"\noutput vars = {output_vars}" f"\nlast assignments {last_assignments_vrepr}" f"\n{assignments}") return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_return(\n ret_type: None | IntType | ReferenceType, returned: Sequence[BitString]\n) -> None:\n if len(returned) > 1:\n raise ValueError(\"code block returns multiple values\")\n\n if ret_type is None:\n if len(returned) != 0:\n raise ValueError(\n 'function has no return type, but its code block defines \"ret\"'\n )\n elif isinstance(ret_type, ReferenceType):\n if len(returned) == 0:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'does not define \"ret\"'\n )\n (ret_bits,) = returned\n if ret_type.type.width != ret_bits.width:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'defines \"ret\" with width {ret_bits.width}'\n )\n else: # returns a value\n if len(returned) == 0:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'does not assign to \"ret\"'\n )\n (ret_bits,) = returned\n if ret_type.width != ret_bits.width:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'defines \"ret\" with width {ret_bits.width}'\n )", "def _is_return(self, words):\n if words[0] == 'return':\n if len(words) != 1:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_RETURN command.\".format(self._file_line))\n return True\n else:\n return False", "def is_return(self, pick):\n return ('-' in pick.name and sorted(pick.name.split('-'), reverse=True)[0].startswith('ret'))", "def is_premature_return(node: Node, edge: Node, nodes: List[Node]) -> bool:\n if len(nodes) < 3:\n return False\n antepenultimate_node, final_return_node, terminal_node = nodes[-3:]\n assert isinstance(terminal_node, TerminalNode)\n if not isinstance(edge, ReturnNode) or edge != final_return_node:\n return False\n if not is_trivial_return_block(edge.block):\n # Only trivial return blocks can be used for premature returns,\n # hopefully.\n return False\n if not isinstance(node, BasicNode):\n # We only treat BasicNode's as being able to return early right now.\n # (Handling ConditionalNode's seems to cause assertion failures --\n # might need changes to build_flowgraph_between.)\n return False\n # The only node that is allowed to point to the return node is the node\n # before it in the flow graph list. (You'd think it would be the node\n # with index = return_node.index - 1, but that's not necessarily the\n # case -- some functions have a dead antepenultimate block with a\n # superfluous unreachable return.)\n return node != antepenultimate_node", "def is_return_element(se: Any) -> bool:\n\n def _is_output_field(o):\n if isinstance(o, tuple):\n for i in o:\n if not _is_output_field(i):\n return False\n return True\n return inspect.isclass(o) and issubclass(o, BaseField)\n\n if isinstance(se, Sequence):\n if len(se) != 1:\n return False\n return _is_output_field(se[0])\n return _is_output_field(se)", "def function_returns(self):\n shards = self.line.split()\n if len(shards) > 3:\n if self.line.endswith('returns') is False:\n if shards[-1] in 'return':\n return True", "def has_failed_outputs(self):\n return False", "def testHasReturns(self):\n concise = build_code([], [], [], concise=True)\n full = build_code([], [], [], concise=False)\n self.assertNotIn('return', concise)\n self.assertIn('return', full)", "def validate_state(retval):\n success = True\n for data in retval.itervalues():\n for result in data.itervalues():\n if not result.get('result'):\n success = False\n break\n return success", "def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError", "def visit_Return(self, node):\n if node.value is None:\n value = KnownValue(None)\n method_return_type.check_no_return(node, self, self.current_function_name)\n else:\n value = self.visit(node.value)\n method_return_type.check_return_value(\n node, self, value, self.current_function_name\n )\n self.return_values.append(value)\n self._set_name_in_scope(LEAVES_SCOPE, node, UNRESOLVED_VALUE)\n if (\n # TODO check generator types properly\n not (self.is_generator and self.async_kind == AsyncFunctionKind.non_async)\n and self.expected_return_value is not None\n and not self.is_value_compatible(self.expected_return_value, value)\n ):\n self._show_error_if_checking(\n node,\n \"Declared return type %s is incompatible with actual return type %s\"\n % (self.expected_return_value, value),\n error_code=ErrorCode.incompatible_return_value,\n )\n elif self.expected_return_value == KnownValue(None) and value != KnownValue(\n None\n ):\n self._show_error_if_checking(\n node,\n \"Function declared as returning None may not return a value\",\n error_code=ErrorCode.incompatible_return_value,\n )", "def over(self):\n return self.result is not None", "def verify_output(self, output):\n return output == self.output", "def hasReturnTo(self):\n return self.getReturnTo() is not None", "def isOutput(parameter):\n try:\n if '@age' in parameter['gisprompt'].keys():\n return (parameter['gisprompt']['@age'] == 'new')\n else:\n return False\n except KeyError:\n return False", "def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True", "def evaluate_output(self, output: int) -> Callable[[str], bool]:\n assert 0 <= output < len(self.observations)\n return lambda out: self.observations(output).name == out", "def HasArrayOuts(self, function):\n if function.callback:\n for param in function.callback.params:\n if self._IsOrContainsArray(param.type_):\n return True\n return function.returns and self._IsOrContainsArray(function.returns)", "def return_last_function_assignment(topconstruct):\n items = query([\n is_layering([syntax.ASSIGNMENT]),\n at_indent_0_from_function_program,\n last_program_step\n ], TreeItem(topconstruct))\n for item in items:\n # special case of returning a PY_TUPLE\n if item.construct.args[0].construct == syntax.PY_TUPLE:\n # if the first element of the tuple is a no var (_) replace it by a ret\n retprop = item.construct.args[0].args[0][0]\n if retprop.construct == syntax.PY_NOVAR:\n retprop = syntax.Construct(syntax.VAR_NAME, \"_ret\")\n retprop.resolution = RESOLUTION_NAKED\n item.construct.args[0].args[0][0] = retprop\n item.append_construct(\n syntax.Construct(syntax.FUNCTION_RETURN, retprop))\n\n else:\n assigned_varname = item.construct.args[0].args[0]\n # does not work for paths\n if assigned_varname.construct == syntax.VAR_NAME:\n assigned_string = assigned_varname.args[0]\n prop = syntax.Construct(syntax.VAR_NAME, assigned_string)\n prop.resolution = assigned_varname.resolution\n item.append_construct(\n syntax.Construct(syntax.FUNCTION_RETURN, prop))", "def verify_ret(self, ret, expected_ret):\n assert ret == expected_ret, (\n \"Function should return: \"\n + ret_vals_dictionary[expected_ret]\n + \".\\nInstead returned: \"\n + ret_vals_dictionary[ret]\n )", "def verify_ret(self, d_stmt, table):\n d_expr = d_stmt.find_first(\"p_expr\")\n self.exprs = [DanaExpr.factory(d_expr, table)] if d_expr else []\n\n expected = DanaType(table.function.type.base)\n ret_type = self.exprs[0].type if d_expr else DanaType(\"void\")\n ret_type.check_type(d_stmt.linespan, expected)", "def _analyse_stmt_Return(self, statement: ast.Return, *, next: CFNode) -> CFNode:\n if statement.value is None:\n nodes = dict(next=self._context[_LEAVE])\n else:\n nodes = dict(next=self._context[_RETURN])\n value_is_constant, _ = self._expression_as_constant(statement.value)\n if not value_is_constant:\n nodes.update(error=self._raise)\n return self._ast_node(statement, **nodes)", "def has_result(self):\n return len(self.__analysis_items) > 0", "def _is_function_output(node):\r\n return node.clients == [('output', 1)]", "def _return(*args):\r\n to_return = ()\r\n\r\n for arg in args:\r\n cond, value = arg\r\n if cond:\r\n to_return += (value,)\r\n\r\n if len(to_return) == 1:\r\n return to_return[0]\r\n return to_return", "def is_valid_output(output) -> bool:\n log.info(f\"Output validation: {output}\")\n\n try:\n float(output)\n except ValueError as value_error:\n log.error(value_error)\n return False\n\n log.info(\"Output successfully validated\")\n return True", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def returned(self):\n if self._returned is None:\n node = _parse_text(self.extracted)\n self._returned = usefunction._returns_last(node)\n return self._returned", "def logging_outputs_can_be_summed() -> bool:\n return False", "def backtrack(csp):\n\n # Base case\n if (is_complete(csp)):\n return True\n\n # Get first unassigned variable\n var = select_unassigned_variable(csp)\n\n # Iterate through domain\n for value in order_domain_values(csp, var):\n\n # Inference\n if is_consistent(csp, var, value):\n\n # Set rollback point\n csp.variables.begin_transaction()\n var.assign(value)\n\n # Explore this assignment\n if (inference(csp, var)):\n # GGWP\n if backtrack(csp):\n return True\n # Nope\n csp.variables.rollback()\n return False" ]
[ "0.5993794", "0.5920944", "0.5595559", "0.5568826", "0.55436", "0.54635245", "0.5395138", "0.5378417", "0.53685534", "0.5290771", "0.52233475", "0.5216341", "0.5209409", "0.5198429", "0.5192773", "0.51814836", "0.5171536", "0.51640624", "0.51598394", "0.5133761", "0.51216966", "0.51125884", "0.50822425", "0.5079356", "0.50698006", "0.5042705", "0.50046426", "0.50016356", "0.49983844", "0.49760705" ]
0.751274
0
Return an executable string representation. This method returns a string so that ``eval(self.vrepr())`` returns a new `SSA` object with the same content. >>> from cascada.bitvector.core import Variable >>> from cascada.bitvector.operation import BvXor, BvIdentity >>> from cascada.bitvector.ssa import BvFunction >>> z = Variable("z", 8)
def vrepr(self): return "{}(input_vars={}, output_vars={}, assignments={}{})".format( type(self).__name__, f"[{', '.join([v.vrepr() for v in self.input_vars])}]", f"[{', '.join([v.vrepr() for v in self.output_vars])}]", f"[{', '.join([f'({v.vrepr()}, {e.vrepr()})' for v, e in self.assignments.items()])}]", "" if not self._replace_multiuse_vars_bool else ", replace_multiuse_vars=True", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _repr_(self):\n s = 'An equation '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' == 0'\n return s", "def _repr_(self):\n s = 'An equation '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' == 0'\n return s", "def vsstrrepr(expr, **settings):\n p = VectorStrReprPrinter(settings)\n return p.doprint(expr)", "def __repr__(self):\n\n rep = \"\"\n rep += str(self.literal)+\"\\n\"\n rep += str(self.bindings)+\"\\n\"\n rep += str(self.facts)+\"\\n\"\n return (rep)", "def __repr__(self):\n s = \"\"\n for v in self.V():\n s += f\"{v.__repr__()}\\n\"\n \n return s", "def __str__(self):\n return self.V.__str__()", "def _repr_(self):\n return 'A vertex at ' + repr(self._representation_vector);", "def _repr_(self):\n return 'A vertex at ' + repr(self.vector());", "def __str__(self):\n s = \"\"\n for v in self.vectors:\n s += str(v) + \"\\n\"\n return s", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def __str__(self):\n return str((self.instruction_pointer, self.program,))", "def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out", "def to_qasm_str(self):\n return qasm(self)", "def asString(self):\n\n res = []\n for v in list(self.vars.values()):\n res.append(v.asString())\n res.append('')\n for e in list(self.enums.values()):\n res.append(e.asString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.defAsString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.dataAsString())\n\n return '\\n'.join(res)", "def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string", "def __repr__(self) -> str:\n argument_dict = {\n \"T_e\": self.T_e,\n \"n_e\": self.n_e,\n \"particle\": self.particle,\n \"Z\": self.Z,\n }\n\n return code_repr.call_string(PlasmaBlob, (), argument_dict)", "def __repr__(self):\n modulename = str(type(self).__module__)\n\n ichars = len(str(int(self.max())))\n slen = ichars + casas\n fstr = \"{{:>{}.{}g}}\".format(slen, casas)\n\n if modulename == \"__main__\":\n s = str(type(self).__name__)\n else:\n s = modulename + '.' + str(type(self).__name__)\n\n s += '(['\n s += ', '.join([fstr.format(x) for x in self.elem])\n s += '])'\n\n return s", "def jv_to_eval_string(jv):\n kind_flags = jv['kind_flags']\n pad_ = jv['pad_']\n offset = jv['offset']\n size = jv['size']\n ptr = jv['u']['ptr']\n return ('((jv){0x%x, 0x%x, 0x%x, 0x%x, (void*)0x%x})' %\n (kind_flags, pad_, offset, size, ptr))", "def __str__(self):\n # special cases\n if self.is_nan() :\n return \"nan\"\n elif self.coeff == 1 :\n if self.expt == 1 :\n return \"x\"\n else :\n return \"x^\" + str(self.expt)\n elif self.coeff == -1 :\n if self.expt == 1 :\n return \"-x\"\n else :\n return \"-x^\" + str(self.expt)\n \n # str_builder\n if self.expt == 0 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator))\n elif self.expt == 1 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x\"\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x\"\n else :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x^\" + str(self.expt)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x^\" + str(self.expt)", "def __repr__(self):\n klass = self.__class__.__qualname__\n cmd = \" \".join(sys.argv)\n cmd_ = f\"$ {cmd}\"\n args = pformat(self.get_args())\n repr_ = \"\\n\".join((klass, cmd_, args))\n return f\"<{repr_}>\"", "def vsprint(expr, **settings):\n\n string_printer = VectorStrPrinter(settings)\n return string_printer.doprint(expr)", "def vrepr(self):\n fa_vrepr = ', '.join([str(a) if not isinstance(a, core.Term) else a.vrepr()\n for a in self.__class__.op.fixed_args])\n fa_vrepr = f\"({fa_vrepr})\"\n\n if len(self.input_prop) == 1:\n id_vrepr = self.input_prop[0].vrepr()\n else:\n id_vrepr = f\"[{', '.join([d.vrepr() for d in self.input_prop])}]\"\n\n return \"{}({}, {})({})\".format(\n make_partial_op_model.__name__,\n self.__class__.__bases__[0].__name__,\n fa_vrepr,\n id_vrepr\n )", "def __repr__(self):\n return pformat(vars(self))", "def vrepr(self):\n if len(self.input_prop) == 1:\n id_vrepr = self.input_prop[0].vrepr()\n else:\n id_vrepr = f\"[{', '.join([d.vrepr() for d in self.input_prop])}]\"\n return f\"{self.__class__.__name__}({id_vrepr})\"", "def __repr__(self):\n def r(a):\n rr = re.sub('\\s','',repr(a))\n rr = re.sub('\\.(?=\\D)','',rr)\n rr = re.sub('(?<=\\D)0\\.','.',rr)\n if rr.startswith('array(['):\n rr = rr[6:-1]\n return rr\n return 'MSTP(shape=%s,\\n ra=%s,\\n ri=%s,\\n dt=%s,\\n pal=%s)' % (\n r(self.z.shape), r(self.ra), r(self.ri), r(self.dt), r(self.pal))", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n return \"v(\" + \",\".join([str(round(c, digits)) for c in self.components]) + \")\"", "def __repr__(self):\n answer = \"%s(cmd=%s\" % (self.__class__.__name__, repr(self.program_name))\n for parameter in self.parameters:\n if parameter.is_set:\n if isinstance(parameter, _Switch):\n answer += \", %s=True\" % parameter.names[-1]\n else:\n answer += \", %s=%s\" \\\n % (parameter.names[-1], repr(parameter.value))\n answer += \")\"\n return answer", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())" ]
[ "0.61188376", "0.61188376", "0.60854304", "0.6069777", "0.6012829", "0.5928467", "0.5817199", "0.58145636", "0.5814443", "0.5811863", "0.57879734", "0.5767751", "0.57263815", "0.5706116", "0.5697784", "0.5687782", "0.5685565", "0.56687975", "0.5661975", "0.56314254", "0.56243896", "0.5595853", "0.5576897", "0.5574739", "0.5542617", "0.55358595", "0.55356747", "0.553465", "0.55320144", "0.55320144" ]
0.6500199
0
Return two strings (header and body) of a C function evalauting the SSA. This method returns two strings (the function header and the function body) of a function in the C programming language that computes the SSA. The C function is of ``void`` type and its list of arguments consists of the input variables, the external variables (if any) and the output variables. The output variables are defined as pointers to store the results. See also `crepr`. >>> from cascada.bitvector.core import Variable >>> from cascada.bitvector.operation import BvXor, BvIdentity >>> from cascada.bitvector.ssa import BvFunction >>> z = Variable("z", 8)
def get_C_code(self, C_function_name): from cascada.bitvector.printing import BvCCodePrinter width2type = BvCCodePrinter._width2C_type # in C, * binds to the declarator, not the type specifier input_vars_c = ', '.join(["{} {}".format(width2type(v.width), v.name) for v in self.input_vars]) output_vars_c = ', '.join(["{} *{}".format(width2type(v.width), v.name) for v in self.output_vars]) if self.external_vars: external_vars_c = ', '.join(["{} {}".format(width2type(v.width), v.name) for v in self.external_vars]) external_vars_c = external_vars_c + ", " else: external_vars_c = "" aux = f"void {C_function_name}({input_vars_c}, {external_vars_c}{output_vars_c})" header = f"{aux};" body = f"#include <stdint.h>\n{aux}{{" # stdint for uint_* outvar2outvar_c = {v: core.Variable("*" + v.name, v.width, allowed_symbols="*") for v in self.output_vars} def primary_assignment2C_code(my_var, my_expr): assert isinstance(my_expr, (core.Constant, core.Variable, operation.PrimaryOperation)) if my_var in self.output_vars: return f"*{my_var} = {my_expr.crepr()};" else: return f"{width2type(my_var.width)} {my_var} = {my_expr.crepr()};" for var, expr in self.assignments.items(): expr = expr.xreplace(outvar2outvar_c) if isinstance(expr, operation.SecondaryOperation): expr = expr.doit(eval_sec_ops=True) body += f"\n\t{primary_assignment2C_code(var, expr)}" body += "\n};" return header, body
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)", "def compile_function_to_c(self, function, signatures):\n\n code = []\n code += self.emit_prologue(function)\n labels = Labels()\n for instruction in function.instructions:\n (new_code, new_labels) = self.emit_instruction(instruction, labels, signatures)\n code += new_code\n labels = new_labels\n code += self.emit_epilogue(function)\n return '\\n'.join(code)", "def to_bvfunction(self):\n _input_widths = [v.width for v in self.input_vars]\n _output_widths = [v.width for v in self.output_vars]\n\n class MyBvFunction(BvFunction):\n input_widths = _input_widths\n output_widths = _output_widths\n _ssa = self\n\n @classmethod\n def vrepr(cls):\n return f\"{self.vrepr()}.to_bvfunction()\"\n\n @classmethod\n def to_ssa(cls, input_names, id_prefix, decompose_sec_ops=False, **ssa_options):\n old_names = [str(v) for v in cls._ssa.input_vars]\n if list(input_names) != old_names or decompose_sec_ops is True or \\\n ssa_options.get(\"replace_multiuse_vars\", False) != cls._ssa._replace_multiuse_vars_bool:\n raise ValueError(f\"the arguments of to_ssa(input_names={input_names}, \"\n f\"decompose_sec_ops={decompose_sec_ops}, {ssa_options})\"\n f\" are not the arguments used to generate {cls._ssa}\")\n return cls._ssa\n\n @classmethod\n def eval(cls, *input_vals):\n d = {}\n\n for var, val in zip(cls._ssa.input_vars, input_vals):\n d[var] = val\n\n for outvar, expr in cls._ssa.assignments.items():\n # avoiding expr.xreplace(d)\n # (only calls Operation.__new__ if there is some substitution)\n out_val = type(expr)(*[d.get(arg, arg) for arg in expr.args])\n d[outvar] = out_val\n\n output_vals = []\n for var in cls._ssa.output_vars:\n output_vals.append(d.get(var, var))\n\n return tuple(output_vals)\n\n return MyBvFunction", "def get_function(instruction_structure, total_instructions, file_name):\n\n state = [\"LCL\", \"ARG\", \"THIS\", \"THAT\"]\n instruction = instruction_structure[0]\n\n if instruction == \"function\":\n function_name = instruction_structure[1]\n vars_count = int(instruction_structure[2])\n \n bytecode = []\n \n # Start a function block\n bytecode.extend([f\"({function_name})\"])\n\n for _ in range(vars_count):\n bytecode.extend(VirtualMachineLibrary.get_memory(\"push constant 0\", file_name)) \n\n elif instruction == \"call\": \n function_name = instruction_structure[1]\n args_count = instruction_structure[2]\n \n bytecode = []\n \n return_label = \":\".join([file_name, function_name, str(total_instructions), \"RETURN\"])\n\n # Push return address\n bytecode.extend([f\"@{return_label}\"])\n bytecode.extend([\"D=A\", \"@SP\", \"A=M\", \"M=D\"])\n bytecode.extend(VirtualMachineLibrary._get_primary(\"sp++\"))\n\n # Save state\n for address in state:\n bytecode.extend([f\"@{address}\", \"D=M\", \"@R13\", \"M=D\"])\n bytecode.extend(VirtualMachineLibrary._get_primary(\"*a=*b\", a=\"SP\", b=\"R13\", treat_b_as_pointer=False))\n bytecode.extend(VirtualMachineLibrary._get_primary(\"sp++\"))\n\n # Set ARG to point to new base address (sp - 5 - args_count)\n bytecode.extend([\"@SP\", \"D=M\", \"@5\", \"D=D-A\", f\"@{args_count}\", \"D=D-A\", \"@ARG\", \"M=D\"])\n \n # Set LCL to point to current SP\n bytecode.extend([\"@SP\", \"D=M\", \"@LCL\", \"M=D\"])\n \n # Jump to function_name\n bytecode.extend([f\"@{function_name}\", \"0;JMP\"])\n \n # Set return label\n bytecode.extend([f\"({return_label})\"])\n\n bytecode = bytecode\n\n else:\n bytecode = []\n\n # Set R13 to point to callee\"s LCL\n bytecode.extend([\"@LCL\", \"D=M\", \"@R13\", \"M=D\"])\n\n # Set R14 to return address\n bytecode.extend([\"@R13\", \"D=M\", \"@5\", \"D=D-A\", \"A=D\", \"D=M\", \"@R14\", \"M=D\"])\n\n # Set first callee\"s argument to be return value\n bytecode.extend(VirtualMachineLibrary._get_primary(\"sp--\"))\n bytecode.extend(VirtualMachineLibrary._get_primary(\"*a=*b\", a=\"ARG\", b=\"SP\"))\n\n # Reposition SP to be after first callee\"s argument\n bytecode.extend([\"@ARG\", \"D=M+1\", \"@SP\", \"M=D\"])\n \n # Restore registers\n for index, address in enumerate(reversed(state)):\n bytecode.extend([\"@R13\", \"D=M\", f\"@{int(index) + 1}\", \"D=D-A\", \"A=D\", \"D=M\", f\"@{address}\", \"M=D\"])\n \n # Return jump\n bytecode.extend([\"@R14\", \"A=M\", \"0;JMP\"])\n \n return bytecode", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def get_fitness_function_c_code(self):\n s = \"\"\"FLOAT fitness_function(FLOAT phi, FLOAT chi, FLOAT omega)\n {\n FLOAT fitness = absolute(phi);\n return fitness;\n }\n \"\"\" \n return s", "def get_fitness_function_c_code(self):\n s = \"\"\"FLOAT fitness_function(FLOAT phi, FLOAT chi, FLOAT omega)\n {\n FLOAT fitness = absolute(phi);\n return fitness;\n }\n \"\"\" \n return s", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def gen_code(self, filename, func_name):\n\n assert self.bits is not None\n\n vd_list = []\n n_vars = 0\n for tree in self.trees:\n vd_list.append(tree.gen_code(n_vars))\n n_vars += len(vd_list[-1])\n\n # checks the type by the suffix\n\n is_v = filename.split(\".\")[-1] == \"v\"\n\n assert self.inputs\n\n f = open(filename, \"w\")\n\n i_bits = np.sum(self.bits[:-1])\n o_bits = self.bits[-1]\n o_sign = self.is_neg[-1]\n\n if is_v:\n f.write(\"module {}(input [{}:0] i, output [{}:0] o);\\n\".format(\n func_name, i_bits-1, o_bits-1))\n else:\n f.write(\"#include<ac_int.h>\\n\\n\")\n f.write(\"void {}(ac_int<{},false> i, ac_int<{},{}> &o)\\n\".format(\n func_name, i_bits, o_bits, o_sign))\n f.write(\"{\\n\")\n\n\n # write function headline\n s_in_line = []\n\n i_bits = self.bits[0]\n i_sign = self.is_neg[0]\n\n if is_v:\n i_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if i_sign else \"\", i_bits-1)\n else:\n i_datatype = \" ac_int<{},{}> \".format(i_bits, i_sign)\n\n len_s = len(i_datatype)\n\n for i in range(self.inputs):\n if is_v:\n s = (\n \"i_\" + str(i) + \" = \" + \"i[\" + str(i_bits*(i+1)-1) + \":\" +\n str(i_bits*i) + \"]\"\n )\n else:\n s = (\n \"i_\" + str(i) + \" = \" + \"i.slc<\" + str(i_bits) + \">(\" +\n str(i_bits*i) + \")\"\n )\n if (\n len_s + len(s) + 2 > 70 or i_bits != self.bits[i] or\n i_sign != self.is_neg[i]\n ):\n f.write(i_datatype + \", \".join(s_in_line) + \";\\n\")\n\n s_in_line = []\n if is_v:\n i_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if i_sign else \"\", i_bits-1)\n else:\n i_datatype = \" ac_int<{},{}> \".format(i_bits, i_sign)\n\n len_s = len(i_datatype)\n\n s_in_line.append(s)\n len_s += len(s) + 2\n\n if s_in_line:\n f.write(i_datatype + \", \".join(s_in_line) + \";\\n\")\n\n if is_v:\n o_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if o_sign else \"\", o_bits)\n else:\n o_datatype = \" ac_int<{},{}> \".format(o_bits, o_sign)\n\n o_list = []\n for i in range(len(vd_list)):\n for v in vd_list[i]:\n if is_v:\n f.write(o_datatype + v + \" = \" + vd_list[i][v] + \";\\n\")\n else:\n f.write(o_datatype + v + \" = \" + vd_list[i][v] + \";\\n\")\n f.write(\"\\n\")\n o_list.append(v)\n\n assert len(o_list) <= 3\n\n if is_v:\n f.write(\" assign \")\n else:\n f.write(\" \")\n\n if len(o_list) == 1:\n f.write(\"o = \" + o_list[0] + \";\")\n elif len(o_list) == 2:\n cond = \"( \" + o_list[0] + \" == \" + o_list[1] + \" ) \"\n n1 = o_list[0]\n n0 = \"( ( \" + \" + \".join(o_list) + \" ) >> 1 )\"\n f.write(\"o = \" + cond + \"? \" + n1 + \": \" + n0)\n elif len(o_list) == 3:\n cond = (\n \"( \" +\n \"( \" + \" == \".join(o_list[0:2]) + \" )?\" + o_list[0] + \":\" +\n \"( \" + \" == \".join(o_list[1:]) + \" )?\" + o_list[1] + \":\" +\n \"( \" + \" == \".join([o_list[0], o_list[2]]) + \" )?\" + o_list[0] +\n \":\" + \"( \" + \" < \".join(o_list[0:2]) + \" ) ?\" +\n \"( ( \" + \" < \".join(o_list[1:]) + \" ) ?\" + o_list[1] + \":\" +\n o_list[2] + \" ) : \" +\n \"( ( \" + \" < \".join([o_list[0], o_list[2]]) + \" ) ?\" + o_list[0] +\n \":\" + o_list[2] + \" )\"\n )\n f.write(\"o = \" + cond + \";\\n\")\n if is_v:\n f.write(\"endmodule\")\n else:\n f.write(\"}\")\n\n f.close()", "def compile_function(self, function, arguments):", "def compile_functions_to_c(self, functions, externs=[]):\n # Mangle main function: real main is provided by libv\n for function in functions:\n if function.signature.name == \"main\":\n function.signature.name = \"vizh_main\"\n\n signature_list = externs + [function.signature for function in functions]\n \n # We need size_t and libv functions\n code = ['#include <stddef.h>',\n '#include \"libv.h\"']\n\n # First output forward declarations for all functions and externs\n code += [f'{str(signature)};' for signature in signature_list]\n\n signature_list += libv_decls\n signatures = {signature.name: signature for signature in signature_list}\n\n errors = []\n for function in functions:\n try:\n code.append(self.compile_function_to_c(function, signatures))\n except CompilerError as err:\n errors.append((function.signature.name,err))\n\n if len(errors) > 0:\n messages = [f'Error while compiling {func_name}: {err}' for func_name, err in errors]\n raise CompilerError('\\n'.join(messages))\n \n return '\\n'.join(code)", "def vjp_assemble_eval(\n fenics_function: Callable, fenics_templates: FenicsVariable, *args: np.array\n) -> Tuple[np.array, Callable]:\n\n numpy_output, ufl_form, fenics_inputs = assemble_eval(\n fenics_function, fenics_templates, *args\n )\n\n def vjp_fun(g):\n return tuple(\n vjp if vjp is not None else jax.ad_util.zeros_like_jaxval(args[i])\n for i, vjp in enumerate(vjp_assemble_impl(g, ufl_form, fenics_inputs))\n )\n\n return numpy_output, vjp_fun", "def make_cpp_func_bodies(self):\n\t\tfor name, body in self.func_bodies.iteritems():\n\t\t\tt = Lexer(body).get_tokens()\t\t\t\n\t\t\tS = [] #Stack\n\t\t\tx = 0\n\t\t\twhile x < len(t):\n\t\t\t\tif t[x] == '(': #function call begins\n\t\t\t\t\tx += 1\n\t\t\t\t\tS.append(self.FUNCS_DICT.get(t[x], t[x]) + '(')\n\t\t\t\telif t[x] == ')': #function call ends\n\t\t\t\t\tacc = ''\n\t\t\t\t\twhile S[-1][-1] != '(':\n\t\t\t\t\t\t#pop off params until function call is reached\n\t\t\t\t\t\tacc = S.pop() + ',' + acc\n\t\t\t\t\t# [:-1] to strip off comma at the end\n\t\t\t\t\tS.append(S.pop() + acc[:-1] + ')') #S.pop() gives function\n\t\t\t\telse:\n\t\t\t\t\tS.append(self.convert_atom(t[x]))\n\t\t\t\tx += 1\n\t\t\tself.cpp_func_bodies[name] = S[0]", "def fortran_c_wrapper(self) -> str:\n result = banner('//')\n result += self._fc_includes()\n result += self._fc_using_statements()\n result += self._fc_function_definitions()\n return result", "def fortran_function(self) -> str:\n if self.f_override is not None:\n return indent(\n self.f_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\n \"$F_PREFIX$\", self.f_prefix),\n 4*' ')\n\n result = ''\n\n # declaration\n func_name = '{}_{}_{}'.format(\n self.f_prefix, self.class_name, self.name)\n in_parameters = self._f_in_parameters()\n return_type, out_parameters = self._f_out_parameters()\n if self.may_throw:\n out_parameters.append(('integer, optional', 'err_code'))\n out_parameters.append(('character(:), allocatable, optional',\n 'err_msg'))\n\n all_parameters = in_parameters + out_parameters\n arg_list = ', &\\n'.join([par_name for _, par_name in all_parameters])\n arg_ilist = indent(arg_list, 8*' ')\n if return_type != '':\n result += 'function {}( &\\n{})\\n'.format(func_name, arg_ilist)\n else:\n result += 'subroutine {}( &\\n{})\\n'.format(func_name, arg_ilist)\n\n # parameter declarations\n result += ' implicit none\\n'\n for par_type, par_name in in_parameters:\n result += ' {}, intent(in) :: {}\\n'.format(\n par_type, par_name)\n for par_type, par_name in out_parameters:\n result += ' {}, intent(out) :: {}\\n'.format(par_type, par_name)\n if return_type != '':\n result += ' {} :: {}\\n'.format(return_type, func_name)\n result += '\\n'\n\n # variable declarations\n c_return_type, fi_out_parameters = self._fi_out_parameters()\n if c_return_type:\n result += ' {} :: ret_val\\n'.format(c_return_type)\n for par_type, par_name in fi_out_parameters:\n result += ' {} :: {}\\n'.format(par_type, par_name)\n for par_type, par_name in self.ret_type.f_aux_variables():\n result += ' {} :: {}\\n'.format(par_type, par_name)\n if self.may_throw:\n result += ' integer (c_int) :: err_code_v\\n'\n result += ' type (c_ptr) :: err_msg_v\\n'\n result += ' integer (c_size_t) :: err_msg_len_v\\n'\n result += ' character (c_char), dimension(:), pointer :: err_msg_f\\n'\n result += ' character(:), allocatable :: err_msg_p\\n'\n result += ' integer (c_size_t) :: err_msg_i\\n'\n if c_return_type or fi_out_parameters or self.may_throw:\n result += '\\n'\n\n # convert input\n args = [param.f_chain_arg() for param in self.params]\n args += [par_name for _, par_name in fi_out_parameters]\n if self.may_throw:\n args += ['err_code_v', 'err_msg_v', 'err_msg_len_v']\n arg_str = ', &\\n'.join([8*' ' + arg for arg in args])\n\n # call C function\n fc_func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n chain_call = self.fc_chain_call(\n ns_prefix=self.c_prefix, class_name=self.class_name,\n fc_func_name=fc_func_name, fc_args=arg_str)\n result_name = ''\n if return_type != '':\n result_name = func_name\n elif out_parameters:\n result_name = out_parameters[0][1]\n result += self.ret_type.f_call_c('ret_val', chain_call)\n\n # handle errors if necessary\n if self.may_throw:\n # Note: I tried to factor this out into a function, but Fortran\n # makes that near-impossible. Since we're generating anyway, it's\n # not really duplication, so leave it as is.\n result += indent(dedent(f\"\"\"\\\n if (err_code_v .ne. 0) then\n if (present(err_code)) then\n err_code = err_code_v\n if (present(err_msg)) then\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg)\n do err_msg_i = 1, err_msg_len_v\n err_msg(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n end if\n {dedent(\n self.ret_type.f_return_dummy_result(result_name))}\n return\n else\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg_p)\n do err_msg_i = 1, err_msg_len_v\n err_msg_p(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n print *, err_msg_p\n stop\n end if\n else\n if (present(err_code)) then\n err_code = 0\n end if\n end if\n\n \"\"\"), 4*' ')\n\n # convert and return result\n result += self.ret_type.f_return_result(result_name, 'ret_val')\n\n # end\n if return_type != '':\n result += 'end function {}\\n\\n'.format(func_name)\n else:\n result += 'end subroutine {}\\n\\n'.format(func_name)\n return indent(result, 4*' ')", "def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def assemble_eval(\n fenics_function: Callable,\n fenics_templates: Iterable[FenicsVariable],\n *args: np.array,\n) -> Tuple[np.array, ufl.Form, Tuple[FenicsVariable]]:\n\n check_input(fenics_templates, *args)\n fenics_inputs = convert_all_to_fenics(fenics_templates, *args)\n\n out = fenics_function(*fenics_inputs)\n if not isinstance(out, tuple):\n raise ValueError(\n \"FEniCS function output should be in the form (assembly_output, ufl_form).\"\n )\n\n assembly_output, ufl_form = out\n\n if isinstance(assembly_output, tuple):\n raise ValueError(\n \"Only single solution output from FEniCS function is supported.\"\n )\n\n if not isinstance(assembly_output, float):\n raise ValueError(\n f\"FEniCS function output should be in the form (assembly_output, ufl_form). Got {type(assembly_output)} instead of float\"\n )\n\n if not isinstance(ufl_form, ufl.Form):\n raise ValueError(\n f\"FEniCS function output should be in the form (assembly_output, ufl_form). Got {type(ufl_form)} instead of ufl.Form\"\n )\n\n numpy_output = np.asarray(assembly_output)\n return numpy_output, ufl_form, fenics_inputs", "def _make_cas_function():\n # Generate IR\n mod = lc.Module.new('generate-cas')\n llint = lc.Type.int()\n llintp = lc.Type.pointer(llint)\n fnty = lc.Type.function(llint, [llintp, llint, llint])\n fn = mod.add_function(fnty, name='.numba.parallel.ufunc.cas')\n ptr, old, repl = fn.args\n bb = fn.append_basic_block('')\n builder = lc.Builder.new(bb)\n outpack = builder.cmpxchg(ptr, old, repl, ordering='monotonic')\n out = builder.extract_value(outpack, 0)\n failed = builder.extract_value(outpack, 1)\n builder.ret(builder.select(failed, old, out))\n\n # Build & Link\n llmod = ll.parse_assembly(str(mod))\n\n target = ll.Target.from_triple(ll.get_process_triple())\n tm = target.create_target_machine()\n engine = ll.create_mcjit_compiler(llmod, tm)\n ptr = engine.get_function_address(fn.name)\n return engine, ptr", "def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def decompile_and_get(ea):\n decompiler_plugin = \"hexarm\"\n if not idaapi.init_hexrays_plugin() and not (load_plugin(decompiler_plugin) and idaapi.init_hexrays_plugin()):\n raise Exception(\"Could not load Hex-Rays!\")\n f = get_func(ea)\n if f is None:\n return None, None, None\n\n cfunc = decompile(f);\n if cfunc is None:\n # Failed to decompile\n return None, None, None\n\n visitor = CAstVisitor(cfunc)\n visitor.apply_to(cfunc.body, None)\n pseudo_hash = visitor.primes_hash\n\n cmts = idaapi.restore_user_cmts(cfunc.entry_ea)\n pseudo_comments = {}\n if cmts is not None:\n for tl, cmt in cmts.iteritems():\n pseudo_comments[tl.ea - self.get_base_address()] = [str(cmt), tl.itp]\n\n sv = cfunc.get_pseudocode()\n pseudo = []\n first_line = None\n for sline in sv:\n line = tag_remove(sline.line)\n if line.startswith(\"//\"):\n continue\n\n if first_line is None:\n first_line = line\n else:\n pseudo.append(line)\n pseudo_text = \"\\n\".join(pseudo)\n if pseudo_text:\n pseudo_text = get_cmp_pseudo_lines(pseudo_text)\n return pseudo_text, pseudo_hash, pseudo_comments", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def _(self, node: FunctionDef):\n body_nodes = []\n for n in node.body:\n curr_piece = self.visit(n)\n if len(curr_piece) > 0:\n body_nodes.append(curr_piece)\n\n func_body = \" \".join(body_nodes)\n\n return f\"( {node.name} {func_body} )\"", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def render_function(self, name):\n func = self.functions[name]\n text = self.sections['.text']\n a = func.range.start - text.addr\n z = func.range.stop - text.addr\n data = text.data()[a:z]\n return self.render_disassembly(data, func.addr)", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir" ]
[ "0.6069087", "0.5914765", "0.57788587", "0.562777", "0.56116414", "0.56074697", "0.56074697", "0.5592773", "0.5562327", "0.55166125", "0.5506797", "0.5430662", "0.53873694", "0.5343953", "0.5333583", "0.5295312", "0.5277339", "0.527095", "0.5228903", "0.522377", "0.51906025", "0.5187608", "0.51738465", "0.51652247", "0.5156414", "0.5152597", "0.51462895", "0.50954133", "0.5076326", "0.507102" ]
0.67547506
0
Return a `BvFunction` that evaluates ``self``. The returned `BvFunction` stores ``self`` in the ``_ssa`` attribute. Moreover, the method `BvFunction.to_ssa` of the returned `BvFunction` raises an exception unless it is called with the same arguments that were used to create ``_ssa`` (in this case ``_ssa`` is returned). >>> from cascada.bitvector.ssa import BvFunction
def to_bvfunction(self): _input_widths = [v.width for v in self.input_vars] _output_widths = [v.width for v in self.output_vars] class MyBvFunction(BvFunction): input_widths = _input_widths output_widths = _output_widths _ssa = self @classmethod def vrepr(cls): return f"{self.vrepr()}.to_bvfunction()" @classmethod def to_ssa(cls, input_names, id_prefix, decompose_sec_ops=False, **ssa_options): old_names = [str(v) for v in cls._ssa.input_vars] if list(input_names) != old_names or decompose_sec_ops is True or \ ssa_options.get("replace_multiuse_vars", False) != cls._ssa._replace_multiuse_vars_bool: raise ValueError(f"the arguments of to_ssa(input_names={input_names}, " f"decompose_sec_ops={decompose_sec_ops}, {ssa_options})" f" are not the arguments used to generate {cls._ssa}") return cls._ssa @classmethod def eval(cls, *input_vals): d = {} for var, val in zip(cls._ssa.input_vars, input_vals): d[var] = val for outvar, expr in cls._ssa.assignments.items(): # avoiding expr.xreplace(d) # (only calls Operation.__new__ if there is some substitution) out_val = type(expr)(*[d.get(arg, arg) for arg in expr.args]) d[outvar] = out_val output_vals = [] for var in cls._ssa.output_vars: output_vals.append(d.get(var, var)) return tuple(output_vals) return MyBvFunction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_function(self):\n return SSAFunction(self.get_graph())", "def get_ssa(self, name='main'):\n\n ssa = NetworkEnsemble()\n ssa.add_function(name, self.get_function())\n return ssa", "def Avv_func(f):\n\n def Avv(x, v):\n def F(s):\n return f(x + v * s)\n\n return jacfwd(jacfwd(F))(0.0)\n\n return Avv", "def get_ssa(self):\n return copy.deepcopy(self.ssa)", "def as_vector(self):\n if self.is_vector():\n return self\n else:\n assert self.is_scalar()\n return BSplineFunc(self.kvs, self.coeffs[..., np.newaxis])", "def csb(self):\n\n def myfunc(incident_energy):\n return XrayLibWrap_Energy(self.Z, \"csb\", incident_energy)\n\n return myfunc", "def gen_dual_func(self):\n if 0 in self.sig:\n # We are degenerate, use the right complement\n return self.right_complement_func\n else:\n Iinv = self.pseudoScalar.inv().value\n gmt_func = self.gmt_func\n @numba.njit\n def dual_func(Xval):\n return gmt_func(Xval, Iinv)\n return dual_func", "def beta_sf_wrapper(q, a, b):\n if a / (a + b) < c.low_score:\n return c.low_score\n return beta.sf(q, a, b)", "def apply_matrix(self, A):\n assert self.is_vector(), 'Can only apply matrices to vector-valued functions'\n C = np.matmul(A, self.coeffs[..., None])\n assert C.shape[-1] == 1 # this should have created a new singleton axis\n return BSplineFunc(self.kvs, np.squeeze(C, axis=-1))", "def apply(self, applicative):\n def state_applier(state, state_f=self, state_v=applicative):\n func, func_state = state_f(state)\n result, res_state = state_v(func_state)\n return (func(result), res_state)\n return State(state_applier)", "def compute_vectorized_jacobian(self, a):\n if self.forward_activation == 'tanh':\n return 1. - torch.tanh(a)**2\n elif self.forward_activation == 'relu':\n J = torch.ones_like(a)\n J[a < 0.] = 0.\n return J\n elif self.forward_activation == 'leakyrelu':\n J = torch.ones_like(a)\n J[a < 0.] = 0.2\n return J\n elif self.forward_activation == 'linear':\n return torch.ones_like(a)\n elif self.forward_activation == 'sigmoid':\n s = torch.sigmoid(a)\n return s * (1 - s)\n else:\n raise ValueError('The provided forward activation {} is not '\n 'supported'.format(self.forward_activation))", "def gen_vee_func(self):\n dual_func = self.dual_func\n omt_func = self.omt_func\n @numba.njit\n def vee(aval, bval):\n return dual_func(omt_func(dual_func(aval), dual_func(bval)))\n return vee", "def from_functional_optim(cls, functional_optim):\n opt_hook_state_inst = cls.__new__(cls) # Does not call __init__\n opt_hook_state_inst.functional_optimizer = functional_optim\n opt_hook_state_inst._check_valid_functional_optim()\n return opt_hook_state_inst", "def intern_F(self):\n if self.A is None:\n def Fx(x,y):\n if self.hx is None:\n fx = self.gradf(x)\n self.Fz = fx, None, None\n return fx, None, None\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n self.Fz = fx, fy, None\n return fx, fy, None\n else:\n def Fx(x,y,u):\n if self.hx is None:\n fx = self.gradf(x)\n fu = self.b-self.A@x\n self.Fz = fx, None, fu\n return fx, None, fu\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n fu = self.b-self.A@x\n self.Fz = fx, fy, fu\n return fx, fy, fu\n return Fx", "def eval(self) -> Callable:\n if len(self.closure_vals) > 0:\n func_fake_closure = eval_function_def_as_closure(\n self.tree,\n list(self.closure_vals),\n globals_=self.globals,\n flags=self._compiler_flags,\n )\n\n func = FunctionType(\n func_fake_closure.__code__,\n self.globals,\n func_fake_closure.__name__,\n func_fake_closure.__defaults__,\n tuple(self.closure_vals.values()),\n )\n\n for attr in (\"__kwdefaults__\", \"__annotations__\"):\n if hasattr(func_fake_closure, attr):\n setattr(func, attr, getattr(func_fake_closure, attr))\n else:\n func = eval_function_def(self.tree, globals_=self.globals, flags=self._compiler_flags)\n\n # A regular function contains a file name and a line number\n # pointing to the location of its source.\n # I we wanted to trick ``inspect.getsource()`` into working with\n # this newly generated function, we could create a temporary file and write it there.\n # But it leads to other complications, and is unnecessary at this stage.\n # So we just save the source into an attribute for ``Function.from_object()``\n # to discover if we ever want to create a new ``Function`` object\n # out of this function.\n vars(func)[SOURCE_ATTRIBUTE] = self.get_source()\n\n return func", "def closure(self):\n cdef StdVectorFst result = self.copy()\n result.set_closure()\n return result", "def costFun(self, S, x):", "def convert(self):\n return _libsbml.SBMLFunctionDefinitionConverter_convert(self)", "def erfinv(F):\n def compute(value):\n if isinstance(value, Number):\n if sc is not None:\n return sc.erfinv(value)\n else:\n raise ValueError('Numbers are not supported as input if scipy is not installed')\n return F.npx.erfinv(value)\n return compute", "def representsBinaryFunction(self, *args):\n return _libsbml.ASTBasePlugin_representsBinaryFunction(self, *args)", "def sf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.one\n if x > 1:\n return mp.zero\n return mp.betainc(a, b, x1=x, x2=1, regularized=True)", "def function(self) -> Callable:\n\n if self._function is None:\n # Create the underlying continuous function.\n\n if self._domain.size != 0 and self._range.size != 0:\n self._function = self._extrapolator(\n self._interpolator(\n self._domain, self._range, **self._interpolator_kwargs\n ),\n **self._extrapolator_kwargs,\n )\n else:\n\n def _undefined_function(\n *args: Any, **kwargs: Any # noqa: ARG001\n ):\n \"\"\"\n Raise a :class:`ValueError` exception.\n\n Other Parameters\n ----------------\n args\n Arguments.\n kwargs\n Keywords arguments.\n\n Raises\n ------\n ValueError\n \"\"\"\n\n raise ValueError(\n \"Underlying signal interpolator function does not \"\n 'exists, please ensure that both \"domain\" and \"range\" '\n \"variables are defined!\"\n )\n\n self._function = cast(Callable, _undefined_function)\n\n return cast(Callable, self._function)", "def get_function(self):\n return subs(self.f.get_function(), self.sub_pre, self.sub_post)", "def _symbolic_jacobian(self):\n return self._symbolic_system.jacobian([V[0], V[1]])", "def scikit_signal_factory(signal_function: callable):\n return FunctionTransformer(signal_function)", "def eval(self, x):\n\n if self.eval_f is None:\n self.eval_f = theano.function(\n inputs=[self.input],\n outputs=[self.y],\n givens=[(self.m, self.bm), (self.v, self.bv)]\n )\n\n return self.eval_f(x.astype(dtype))", "def gradFun(self, S, x):", "def ms_function(fn=None, input_signature=None, hash_args=None, jit_config=None):\n\n logger.warning(\"'mindspore.ms_function' will be deprecated and removed in a future version. \"\n \"Please use 'mindspore.jit' instead.\")\n return jit(fn=fn, input_signature=input_signature, hash_args=hash_args, jit_config=jit_config)", "def __init__(self):\n GinacFunction.__init__(self, \"beta\", nargs=2,\n conversions=dict(maxima='beta',\n mathematica='Beta',\n sympy='beta'))", "def get_bprop_sparse_to_dense(self):\n\n def bprop(indices, values, dense_shape, out, dout):\n return zeros_like(indices), dout, zeros_like(dense_shape)\n\n return bprop" ]
[ "0.6440057", "0.57623315", "0.54604423", "0.52078617", "0.50013286", "0.47485492", "0.46421528", "0.46381408", "0.46227366", "0.46155635", "0.46057883", "0.4594721", "0.4532908", "0.45137742", "0.45113122", "0.45107594", "0.45080462", "0.45050156", "0.44994885", "0.4497978", "0.4489537", "0.44654378", "0.4464963", "0.44318962", "0.44299966", "0.44164127", "0.44074762", "0.4396872", "0.43955612", "0.43885425" ]
0.75143814
0
Split into multiple `SSA` objects given the list of variable separators.
def split(self, var_separators): assert len(var_separators) >= 1 _listify = lambda s: list(s) if isinstance(s, collections.abc.Sequence) else [s] input_vars_set = set(self.input_vars) external_vars_set = set(self.external_vars) # remove input and external vars from var_separators var_separators = [[x for x in _listify(vs_list) if x not in input_vars_set|external_vars_set] for vs_list in var_separators] assert tuple(var_separators[-1]) != tuple(self.output_vars) for i in range(len(var_separators)): if len(var_separators[i]) == 0: raise ValueError(f"no non-input/non-external vars in var_separators[{i}]") for j in range(len(var_separators[i])): var = var_separators[i][j] if j == len(var_separators[i]) - 1 and self.singleuse_var2multiuse_var.get(var, None) in external_vars_set: is_last_copy = self.multiuse_var2singleuse_vars[self.singleuse_var2multiuse_var[var]][-1] == var if not is_last_copy: raise ValueError(f"split does not support copies of external variables ({var}) in var_separators") if var not in self.assignments: assert var in self._old_outvar_assignments, \ f"var {var} is not in SSA\n - {self}\nand not in old SSA\n - {self._old_outvar_assignments}" old_index = self._old_outvar_assignments.index(var) for new_var_index in reversed(range(old_index)): new_var = self._old_outvar_assignments[new_var_index] if new_var in self.assignments: # it doesn't matter if new_var in var_separators[i] # (duplicates removed later) warnings.warn(f"var_separators[{i}][{j}] = {var} replaced by {new_var}") var_separators[i][j] = new_var break else: raise ValueError(f"var_separators[{i}][{j}] = {var} not in assignments\n{self}") # removing duplicates var_separators[i] = set(var_separators[i]) for prev_i in range(0, i): var_separators[i].difference_update(var_separators[prev_i]) if len(var_separators[i]) == 0: raise ValueError(f"var_separators[{i}] only contains variables from " f"var_separators[j] with j < {i}") # var_terminators is var_separators w/ output vars (entries required to be sets) var_terminators = var_separators + [set(self.output_vars)] class SubSSA(object): replace_multiuse_vars = self._replace_multiuse_vars_bool def __init__(self_subssa): self_subssa.input_vars = [] self_subssa.output_vars = [] self_subssa.assignments = collections.OrderedDict() self_subssa.external_vars = [] def __str__(self_subssa): msg = "SubSSA:" msg += f"\n - input_vars: {self_subssa.input_vars}" msg += f"\n - output_vars: {self_subssa.output_vars}" msg += f"\n - external_vars: {self_subssa.external_vars}" msg += f"\n - assignments: {self_subssa.assignments}" return msg sub_ssa_list = [SubSSA() for _ in range(len(var_terminators))] sub_ssa_list[0].input_vars = self.input_vars[:] sub_ssa_list[-1].output_vars = self.output_vars[:] # fill assignments and external_vars. # when var_terminators[i] is empty we move to the next sub_ssa i = 0 for assign_out_var, assign_expr in self.assignments.items(): sub_ssa_list[i].assignments[assign_out_var] = assign_expr var_terminators[i].discard(assign_out_var) for v in assign_expr.args: assert isinstance(v, core.Variable) if v in external_vars_set: sub_ssa_list[i].external_vars.append(v) # # debugging # print(f" - added {(assign_out_var, assign_expr)} to sub_ssa_list[{i}] and added" # f"{[v for v in assign_expr.args if v in external_vars_set]} to " # f"sub_ssa_list[{i}].external_vars") if len(var_terminators[i]) == 0: i += 1 assert i == len(sub_ssa_list) def get_input_vars(my_sub_ssa): vars_not_defined = [] # input vars vars_defined = set(my_sub_ssa.external_vars) for assign_out_var, assign_expr in my_sub_ssa.assignments.items(): for arg in assign_expr.args: if isinstance(arg, core.Variable) and arg not in vars_defined: if arg not in vars_not_defined: # avoid duplicates # print(f" - var {arg} added to {vars_not_defined} from {(assign_out_var, assign_expr)}") vars_not_defined.append(arg) vars_defined.add(assign_out_var) assert len(vars_defined.intersection(set(vars_not_defined))) == 0 # input vars also contain output vars not defined assert len(my_sub_ssa.output_vars) > 0 vars_defined |= set(vars_not_defined) # ows input_vars contain duplicates for output_var in my_sub_ssa.output_vars: if output_var not in vars_defined: # output_var can still be used in some assignment expression vars_not_defined.append(output_var) return vars_not_defined def check_output_vars(my_sub_ssa, my_i): # check all output vars are in input_vars or assignments aux_set = set(my_sub_ssa.input_vars) | set(my_sub_ssa.assignments) assert all(v in aux_set for v in my_sub_ssa.output_vars), \ f"sub_ssa_list[{my_i}] | output_vars={my_sub_ssa.output_vars} not in " \ f"input_vars={my_sub_ssa.input_vars} or assignments={my_sub_ssa.assignments}" # fill sub_ssa[i].input_vars from get_input_vars_from_sub_ssa(sub_ssa[i]) # fill sub_ssa[i-1].output_vars from sub_ssa[i].input_vars for i in reversed(range(len(sub_ssa_list))): if i == 0: assert len(sub_ssa_list[i].input_vars) > 0 else: # copy vars to sub_ssa_list to avoid sharing the vars between multiple objects sub_ssa_list[i].input_vars = get_input_vars(sub_ssa_list[i])[:] sub_ssa_list[i-1].output_vars = sub_ssa_list[i].input_vars[:] check_output_vars(sub_ssa_list[i], i) for i in range(len(sub_ssa_list)): # after calling SSA(), the suffix of output_vars might change # (some ``_`` or ``_out`` might be added) # but better not to use the new output_vars for the input vars # of the next sub_ssa (to avoid variables with suffix ``_out_out_out``) if i >= 1: # check that the base name (except the suffix) has not changed for v, prev_v in zip(sub_ssa_list[i].input_vars, sub_ssa_list[i-1].output_vars): assert v.name == prev_v.name[:len(v.name)] check_output_vars(sub_ssa_list[i], i) sub_ssa_list[i] = SSA(input_vars=sub_ssa_list[i].input_vars, output_vars=sub_ssa_list[i].output_vars, assignments=sub_ssa_list[i].assignments, replace_multiuse_vars=SubSSA.replace_multiuse_vars) for ev in sub_ssa_list[i].external_vars: if ev not in external_vars_set: raise ValueError(f"sub_ssa_list[{i}].external_vars {sub_ssa_list[i].external_vars} " f"not a subset of parent SSA external_vars {external_vars_set}") return sub_ssa_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_split(text, seps):\n if not seps: # split by whitespaces\n return text.split()\n else: # split by separators in `seps`\n\n ##### Topics on Stack Overflow\n # http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators\n\n ## Method 1: use `re.split()` (from gimel)\n return re.split(r'[%s]' % seps, text)\n\n ## Method 2: DIY (from pprzemek)\n '''\n res = [text]\n for sep in seps:\n text, res = res, []\n for s in text:\n res += s.split(sep)\n return res\n '''", "def splitAtSeparators(expressions):\n splitExpressions = []\n wordStart = 0\n for index, expression in enumerate(expressions):\n if expression.variant == TestExpression.Variant.Separator:\n splitExpressions.append(expressions[wordStart:index])\n wordStart = index + 1\n splitExpressions.append(expressions[wordStart:])\n return splitExpressions", "def tsplit(s, sep):\n stack = [s]\n for char in sep:\n pieces = []\n for substr in stack:\n pieces.extend(substr.split(char))\n stack = pieces\n return stack", "def split(a):\r\n compos = [-1] # compos stores the positions of the relevant commas in the argument string\r\n compos.extend(t[2][1] for t in generate_tokens(StringIO(a).readline) if t[1] == ',')\r\n compos.append(len(a))\r\n return [ a[compos[i]+1:compos[i+1]] for i in xrange(len(compos)-1)]", "def batch_declarations(declarations):\n return declarations.split(\"\\n\\n\")", "def separate_semicolon(s):\n return s.split(';')", "def split_string(source,splitlist):\n\tspaces = \" \" * len(splitlist)\n\ttranstable = string.maketrans(splitlist, spaces)\n\tsource = string.translate(source, transtable)\n\treturn source.split()", "def base_parsing(lines):\n lines = [l.strip() for l in lines]\n return [ tuple(line.split(sep='-')) for line in lines ]", "def partitionall(s, sep=None):\n ls = s.split(sep)\n nls = [sep] * (len(ls) * 2 - 1)\n nls[::2] = ls\n return nls", "def Split(S):\n # for each char do\n # if c splits S into s1 and s2\n # then return {s1, s2}\n \n # return S", "def test_split(self):\n\n p1 = \"std::vector<char, std::allocator<char> >\"\n p2 = \"std::vector<int, std::allocator<int> >\"\n args_list = [\n \"const std::basic_string<char> &\", \"const int &\", \"const double &\"]\n\n for arg in args_list:\n\n li = [p1]\n name, args = declarations.templates.split(\n \"myClass0a<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass0a\")\n self.assertEqual(args, li)\n\n li = [p1, p2]\n name, args = declarations.templates.split(\n \"myClass0b<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass0b\")\n self.assertEqual(args, li)\n\n li = [p1, p2, p2]\n name, args = declarations.templates.split(\n \"myClass0c<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass0c\")\n self.assertEqual(args, li)\n\n li = [p1 + \" (\" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass1<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass1\")\n self.assertEqual(args, li)\n\n li = [p1 + \" (\" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass2<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass2\")\n self.assertEqual(args, li)\n\n li = [p2 + \" (\" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass3<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass3\")\n self.assertEqual(args, li)\n\n li = [p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass4<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass4\")\n self.assertEqual(args, li)\n\n li = [\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\",\n p1]\n name, args = declarations.templates.split(\n \"myClass5<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass5\")\n self.assertEqual(args, li)\n\n li = [\n p1,\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass6<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass6\")\n self.assertEqual(args, li)\n\n li = [\n p2 + \" (\" + arg + \")\",\n p1,\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass7<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass7\")\n self.assertEqual(args, li)\n\n li = [\n p1,\n p2 + \" (\" + arg + \")\",\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass8<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass8\")\n self.assertEqual(args, li)\n\n li = [\n p2 + \" (\" + arg + \")\",\n p1 + \" (\" + arg + \", \" + arg + \")\",\n p1]\n name, args = declarations.templates.split(\n \"myClass9<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass9\")\n self.assertEqual(args, li)\n\n li = [\n p2 + \" (\" + arg + \")\",\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\",\n p1,\n p2]\n name, args = declarations.templates.split(\n \"myClass10<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass10\")\n self.assertEqual(args, li)", "def tokenize(source_code):\n delimiters = '();'\n for delimiter in delimiters:\n source_code = source_code.replace(delimiter, ' '+delimiter+' ')\n return source_code.split()", "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def split_tokens(s):\n parts = []\n parens = 0\n for opar, cpar, unquoted, quoted, residue in TOKENS_FINDALL(s):\n if unquoted:\n parts.append(unquoted)\n elif quoted:\n parts.append(quoted[1:-1])\n elif opar:\n parens += 1\n parts.append(opar)\n elif cpar:\n parens -= 1\n parts.append(cpar)\n elif residue == '$':\n if not parens:\n raise ValueError(\"'$' outside parenthesis in %r\" % (s))\n else:\n raise ValueError(residue, s)\n if parens:\n raise ValueError(\"Unbalanced parenthesis in %r\" % (s))\n return parts", "def split_by_comma_and_whitespace(s):\r\n return re.split(r'[\\s,]+', s)", "def split_into_tokens(dataset, delimiter=\"\"):\n pass", "def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n return ret_list", "def vars(svars):\n return np.array([pm.var(var) for var in svars.split()])", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n elif prefix in varname:\r\n ret_list.append(var)\r\n return ret_list", "def get_list_slice_vars(list_):\n\n # Dump unwanted portions\n array, list_ = list_.split(\"[\")\n list_ = list_.split(\"]\", 1)[0]\n\n # Split at ':'\n variables = list_.split(\":\")\n var_count = len(variables)\n\n step = \"\"\n\n # If step provided\n if var_count == 3:\n\n # If provided, store provided values\n start, stop, step = variables\n else:\n\n # Else store start, stop with default step\n start, stop = variables\n\n # If values are not provided by user, fall back to defaults\n\n # Set start default to 0\n if not start:\n start = \"0\"\n\n # Set stop default to array length\n if not stop:\n stop = \"Array.length\"\n\n # Set step default to 1\n if not step:\n step = \"1\"\n\n # Return stripped array with extracted values\n return array, start, stop, step", "def parse_variables(self, text, separator=None):\n\n def splitter(x, separator=None):\n if len(x) > 1:\n y = x.split(separator)\n return (y[0], y[-1])\n return (None, None)\n\n return dict(splitter(x, separator=separator) for x in text.split(\"\\n\"))", "def parsePresetStrings(ps_list):\n\n return [parsePreset(ps) for ps in ps_list]", "def split_special_areas(\n data: str,\n area_delimiter: Iterable[str],\n):\n return re.split(\n \"({})\".format(\"|\".join(area_delimiter)),\n data,\n flags=re.MULTILINE\n )", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def tokenize(source_code):\n return source_code.replace('(',' ( ').replace(')',' ) ').split()", "def split(a, sep=None, maxsplit=None):\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, object_, 'split', [sep] + _clean_args(maxsplit))", "def _split_parameters(self, parameters):\n if not parameters:\n return []\n return [parameter.strip() for parameter in parameters.split(', ')]", "def split(self, s):\r\n l = [self._split(x) for x in _SPLIT_RE.split(s)]\r\n return [item for sublist in l for item in sublist]" ]
[ "0.58263", "0.5815856", "0.57184464", "0.5663677", "0.5640506", "0.5351031", "0.53371066", "0.530127", "0.52853507", "0.526529", "0.5252852", "0.52451015", "0.52377874", "0.5202907", "0.52014166", "0.5190723", "0.5180124", "0.5155712", "0.51366144", "0.51366144", "0.51365817", "0.5092034", "0.50832486", "0.5079246", "0.50629264", "0.5037259", "0.50280035", "0.5027042", "0.5022395", "0.5020664" ]
0.623241
0
Return the round separators if the SSA was obtained from a `RoundBasedFunction`. If the `SSA` object was obtained from `RoundBasedFunction.to_ssa` of a `RoundBasedFunction` including `add_round_outputs` calls in its ``eval``, this method returns a list with the round outputs delimiting the rounds. Otherwise, ``None`` is returned. In the first case, this list contains ``num_rounds 1`` entries, where the ``i``th entry is the list of outputs of the ``i``th round. In particular, the outputs of the last round are not included in this list. The list returned by this method is meant to be used as the argument of `split` to get the `SSA` object of each round. >>> from cascada.bitvector.ssa import RoundBasedFunction
def get_round_separators(self): if getattr(self, "_rounds_outputs", None) is None: return None if len(self._rounds_outputs) == 0: return None return self._rounds_outputs[:-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rounds(self) -> List[Round]:\n return self._rounds", "def get_rounds_outputs(cls):\n if cls._rounds_outputs is None:\n raise ValueError(\"eval must be called before get_rounds_outputs\")\n return cls._rounds_outputs", "def roundParameters(self, roundNum):\n ruleName = random.choice(self.state.ruleNames)\n return SupportedRules[ruleName].makeRoundParameters(self, roundNum)", "def rounds(self):\n if self.round_number > 0:\n for i in range(self.round_number):\n yield Round(i + 1)", "def parallel_build_rounds(self):\n rounds_list = []\n n = len(self._sorted_savings)\n for i in range(1, n + 1):\n patient_a, patient_b = self.get_patients_pair_from_arg(self._arg_sorted_savings[n - i])\n patient_a_somewhere = self.search_rounds_for_patient(patient_a, rounds_list, True, True, True)\n patient_b_somewhere = self.search_rounds_for_patient(patient_b, rounds_list, True, True, True)\n patient_a_right = self.search_rounds_for_patient(patient_a, rounds_list, False, False, True)\n patient_b_left = self.search_rounds_for_patient(patient_b, rounds_list, True, False, False)\n if patient_a != patient_b and patient_a_somewhere is None and patient_b_somewhere is None:\n new_round = Round([patient_a, patient_b], problem=self._problem)\n self.add_round_if_possible(new_round, rounds_list, self._problem)\n elif patient_a_right is not None and patient_b_somewhere is None:\n merged_round = Round([patient for patient in patient_a_right.patients_list] + [patient_b],\n self._problem)\n self.add_merged_round_if_possible(merged_round, patient_a_right, rounds_list, self._problem)\n elif patient_b_left is not None and patient_a_somewhere is None:\n merged_round = Round([patient_a] + [patient for patient in patient_b_left.patients_list], self._problem)\n self.add_merged_round_if_possible(merged_round, patient_b_left, rounds_list, self._problem)\n elif patient_a_right is not None and patient_b_left is not None:\n if patient_a_right is not patient_b_left and patient_a_right.can_merge_right(patient_b_left):\n self.merge_rounds_if_possible(patient_a_right, patient_b_left, rounds_list, self._problem)\n return rounds_list", "def season_rounds(cls, season):\r\n\t\t\r\n\t\tfolder_name = cls.season_folder(season)\r\n\t\tround_list = os.listdir(f'Data/{folder_name}')\r\n\r\n\t\tall_rounds = []\r\n\r\n\t\tfor round_file in round_list:\r\n\t\t\twith open(f'Data/{folder_name}/{round_file}', 'r', encoding='utf-8') as f:\r\n\t\t\t\tround_info = f.read().splitlines()\r\n\r\n\t\t\tround_number = round_file[:-4]\r\n\t\t\tfull_round_name = f\"{season} R{round_number}\"\r\n\r\n\t\t\tround_date = int(round_info[0])\r\n\t\t\tlookup_ind = DATES.month_diff(round_date, DATES.MIN_DATE)\r\n\r\n\t\t\t# If the round isn't actually counted for TWOW Glicko\r\n\t\t\tif full_round_name not in cls.ROUNDS[lookup_ind].keys():\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tcontestant_count = len(round_info) - 1\r\n\r\n\t\t\tstrength = cls.ROUNDS[lookup_ind][full_round_name][0]\r\n\r\n\t\t\tall_rounds.append([\r\n\t\t\t\tround_number,\r\n\t\t\t\tround_date,\r\n\t\t\t\tcontestant_count,\r\n\t\t\t\tstrength\r\n\t\t\t])\r\n\t\t\r\n\t\treturn all_rounds", "def rounds(self) -> Row:\n return rounds(self.stage)", "def get_splits_from_resampling_strategy(self) -> List[Tuple[List[int], Optional[List[int]]]]:\n splits = []\n if isinstance(self.resampling_strategy, HoldoutValTypes):\n val_share = DEFAULT_RESAMPLING_PARAMETERS[self.resampling_strategy].get(\n 'val_share', None)\n if self.resampling_strategy_args is not None:\n val_share = self.resampling_strategy_args.get('val_share', val_share)\n splits.append(\n self.create_holdout_val_split(\n holdout_val_type=self.resampling_strategy,\n val_share=val_share,\n )\n )\n elif isinstance(self.resampling_strategy, CrossValTypes):\n num_splits = DEFAULT_RESAMPLING_PARAMETERS[self.resampling_strategy].get(\n 'num_splits', None)\n if self.resampling_strategy_args is not None:\n num_splits = self.resampling_strategy_args.get('num_splits', num_splits)\n # Create the split if it was not created before\n splits.extend(\n self.create_cross_val_splits(\n cross_val_type=self.resampling_strategy,\n num_splits=cast(int, num_splits),\n )\n )\n elif isinstance(self.resampling_strategy, NoResamplingStrategyTypes):\n splits.append((self.no_resampling_validators[self.resampling_strategy.name](self.random_state,\n self._get_indices()), None))\n else:\n raise ValueError(f\"Unsupported resampling strategy={self.resampling_strategy}\")\n return splits", "def snr_list(self):\n return [self.snr_one_iteration(q) for q in range(self.N_itr)]", "def mostVisited(self, n: int, rounds):\n start, end = rounds[0], rounds[-1]\n if start <= end:\n return list(range(start, end+1))\n else:\n return list(range(1, end+1)) + list(range(start, n+1))", "def round(self, decimals=0, out=None):\n if out is not None and not isinstance(out, tuple):\n out = (out,)\n return self.__array_ufunc__(\n np.round, \"__call__\", self, decimals=decimals, out=out\n )", "def sround(*values):\n last_decimals = _get_sround_decimals (values[-1])\n other_decimals = int(-numpy.floor(numpy.log10(values[-1])))\n l = []\n for value in values[:-1]:\n if value<0:\n l.append(-numpy.around (-value, other_decimals))\n else:\n l.append(numpy.around (value, other_decimals))\n value = values[-1]\n if value<0:\n l.append(-numpy.around (-value, last_decimals))\n else:\n l.append(numpy.around (value, last_decimals))\n\n if len (l)==1:\n return l[0]\n return l", "def splits(self) -> List[int]:\n if self._splits is None:\n self.RefreshStats()\n return self._splits", "def play_round(self):\n print('='*10) # Round separation display\n print(f'Round {self.round_num}:')\n for player in self.players:\n\n # Player separation display:\n if player != self.players[0]:\n print('-' * 5)\n\n self.play_turn(player)\n \n # Return if exit conditions are met\n if (self.exit_flag) or (self.winner is not None) or (self.board.full()):\n return\n self.round_num += 1", "def get_sandwichers(self):\n # pairs = [(square - 1, square + 1), (square - game[\"row_width\"], square + game[\"row_width\"])]\n results = []\n neighbors = self.get_neighbors()\n pairs = ((neighbors[0], neighbors[2]), (neighbors[1], neighbors[3]))\n for pair in pairs:\n if self.owner != pair[0].owner and pair[0].occupied and pair[0].owner == pair[1].owner \\\n and (not self.game.rules[\"trapping\"] or True not in [pair[0].trapped, pair[1].trapped]):\n results.extend([*pair])\n return results", "def getTurns(self, turns):\n\t\t\tiGameSpeed = gc.getGame().getGameSpeedType()\n\t\t\tif iGameSpeed == 1: return turns # normal\n\t\t\telif iGameSpeed == 0: # epic\n\t\t\t\t\tif turns == 3: return 5 # getTurns(6) must be a multiple of getTurns(3) for turn divisors in Stability.py\n\t\t\t\t\telif turns == 6: return 10\n\t\t\t\t\telse: return turns*3/2\n\t\t\t#elif iGameSpeed == 0: return turns*3 # marathon\n\t\t\t#elif iGameSpeed == 3: return turns*2/3 # quick\n\t\t\treturn turns", "def round_sequence(seq, nb_digits=5, roundf=round):\n def round_float(x):\n if isinstance(x, float):\n return roundf(x, nb_digits)\n elif isinstance(x, list) or isinstance(x, tuple):\n return round_sequence(x, nb_digits, roundf)\n else:\n return x\n rounded = map(round_float, seq)\n if isinstance(seq, tuple):\n rounded = tuple(rounded)\n return rounded", "def add_round_outputs(cls, *args):\n if len(args) == 1 and isinstance(args[0], collections.abc.Sequence):\n args = args[0]\n if not all(isinstance(bv, core.Term) for bv in args):\n raise ValueError(\"the arguments of add_round_outputs must be a bit-vectors\")\n if cls._rounds_outputs is None:\n cls._rounds_outputs = []\n cls._rounds_outputs.append(tuple(args))", "def applyRoundResults(seeds, results):\n\tnGames = len(results)\n\treturn [seeds[2*i] * results[i] + seeds[2*i+1] * (1 - results[i]) for i in range(nGames)]", "def splitnstrip(self, string, symbol=\":\", maxsplit=1):\n if symbol not in string:\n return None\n\n return self.stripall(string.split(symbol, maxsplit))", "def round_info(cls, season, round_n):\r\n\r\n\t\tfolder_name = cls.season_folder(season)\r\n\r\n\t\t# If the round does not exist in the folder for that season\r\n\t\tif f\"{round_n}.txt\" not in os.listdir(f'Data/{folder_name}'):\r\n\t\t\treturn False\r\n\r\n\t\twith open(f'Data/{folder_name}/{round_n}.txt', 'r', encoding='utf-8') as f:\r\n\t\t\tround_info = f.read().splitlines()\r\n\r\n\t\tround_date = int(round_info[0])\r\n\t\tlookup_ind = DATES.month_diff(round_date, DATES.MIN_DATE)\r\n\r\n\t\tfull_round_name = f\"{season} R{round_n}\"\r\n\r\n\t\tM_ROUNDS = cls.ROUNDS[lookup_ind]\r\n\t\tM_HISTORY = cls.HISTORY[lookup_ind]\r\n\r\n\t\t# If that round was not counted for TWOW Glicko\r\n\t\tif full_round_name not in M_ROUNDS.keys():\r\n\t\t\treturn False\r\n\r\n\t\tstrength = M_ROUNDS[full_round_name][0]\r\n\r\n\t\trankings = round_info[1:]\r\n\t\tRM_change = []\r\n\r\n\t\tfor ind, name in enumerate(rankings):\r\n\t\t\tname = name.strip()\r\n\r\n\t\t\t# If this name does not map to any players\r\n\t\t\tif not (name := cls.true_name(name)):\r\n\t\t\t\trankings[ind] = False\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# If the name does map to any players but is not in the history this month\r\n\t\t\tif name not in M_HISTORY:\r\n\t\t\t\trankings[ind] = False\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# If name is in history but this round is not\r\n\t\t\tif full_round_name not in M_HISTORY[name]:\r\n\t\t\t\trankings[ind] = False\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tRM_change.append(M_HISTORY[name][full_round_name][0])\r\n\r\n\t\t\trankings[ind] = name\r\n\t\t\r\n\t\t# Remove entries that were changed into False\r\n\t\trankings = [cont for cont in rankings if cont]\r\n\t\t\r\n\t\treturn [round_date, rankings, RM_change, strength]", "def get_round_list(tournament):\n database = TinyDB('db.json')\n rounds_table = database.table('round')\n # retrieving the list of identifiers of players following a tournament\n id_list = tournament['Liste des Tours']\n round_list = []\n for round_id in id_list:\n # getting the rounds\n round_db = rounds_table.get(doc_id=round_id)\n round_list.append(round_db)\n return round_list", "def split_trials(self) -> tuple[list[Trial], list[Trial]]:\n\n trials: list[Trial] = []\n for trial in self.registry:\n if trial.status != \"completed\":\n trial = self.strategy.infer(trial)\n\n if trial is not None:\n trials.append(trial)\n # NOTE: This assumes that all trials have an objective. Making assumption explicit.\n assert all(trial.objective is not None for trial in trials)\n sorted_trials = sorted(trials, key=lambda trial: trial.objective.value) # type: ignore\n\n split_index = int(numpy.ceil(self.gamma * len(sorted_trials)))\n\n below = sorted_trials[:split_index]\n above = sorted_trials[split_index:]\n\n return below, above", "def get_tourney_rounds(self, conference, year):\n ts_dict = self.get_tourney_slots()\n seed_dict = self.get_tourney_seeds()\n tr_dict = self.get_tourney_results()\n \n round_1 = list()\n round_2 = list()\n round_3 = list()\n round_4 = list()\n winner = list()\n \n round1_winners = list()\n for seed, team in seed_dict[year].items():\n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n round1_winners.append(seed[1:])\n #removes duplicates because I did this part weirdly... HEHEH\n round1_winners = list(set(round1_winners))\n\n win_counter = defaultdict(int)\n for seed, team in seed_dict[year].items(): \n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n win_counter[winning] += 1\n \n for slot, matchup in ts_dict[year].items():\n \n if conference in slot and \"R1\" in slot: \n round_1.append(\"{}-{}\".format(matchup[1:3], matchup[-2:]))\n round_1 = sorted(round_1)\n #for match in round_1:\n for winner1 in round1_winners:\n if winner1 in round_1[0]:\n for winner2 in round1_winners:\n if winner2 in round_1[-1]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[1]:\n for winner2 in round1_winners:\n if winner2 in round_1[-2]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[2]:\n for winner2 in round1_winners:\n if winner2 in round_1[-3]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[3]:\n for winner2 in round1_winners:\n if winner2 in round_1[-4]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n round_2 = sorted(round_2)\n\n round2_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 1:\n round2_winners.append(seed[1:])\n \n for winner1 in round2_winners:\n if winner1 in round_2[0]:\n for winner2 in round2_winners:\n if winner2 in round_2[-1]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_2[1]:\n for winner2 in round2_winners:\n if winner2 in round_2[-2]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n round_3 = sorted(round_3)\n\n round3_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 2:\n round3_winners.append(seed[1:])\n\n for winner1 in round3_winners:\n if winner1 in round_3[0]:\n for winner2 in round3_winners:\n if winner2 in round_3[-1]:\n round_4.append(\"{}-{}\".format(winner1, winner2))\n round_4 = sorted(round_4)\n\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 3:\n winner.append(seed[1:])\n\n conferences = {\"W\": \"East\", \"X\": \"Midwest\", \"Y\": \"South\", \"Z\": \"West\"}\n\n #print(\"CONFERENCE: {}, YEAR: {}\".format(conferences[conference], year))\n #print(\"ROUND1:\", round_1)\n #print(\"ROUND2:\", round_2)\n #print(\"ROUND3:\", round_3)\n #print(\"ROUND4:\", round_4)\n #print(\"WINNER:\", winner)\n\n #clearing out the tourney results dictionary\n #tr_dict.clear()\n\n return round_1, round_2, round_3, round_4, winner", "def squares(s):\n\n \"*** YOUR CODE HERE ***\"\n return [int(x**(1/2)) for x in s if x**(1/2) == round(x**(1/2))]", "def get_moves(self, pgn_data):\n result = []\n\n try:\n exp = '(?P<num>\\d+)\\.(?P<white>\\w+) (?P<black>[\\d|\\w|-]+)'\n result = re.findall(exp, pgn_data)\n\n if result[-1][2] == '1-0' or \\\n result[-1][2] == '0-1' or \\\n result[-1][2] == '1/2-1/2':\n last_item = result[-1]\n del result[-1]\n result.append((last_item[0], last_item[1], ''))\n except:\n pass\n\n return result", "def split(self, smiles):\n splitted_smiles = []\n for j, k in enumerate(smiles):\n if j == 0:\n if k.isupper() and smiles[j + 1].islower() and smiles[j + 1] != \"c\":\n splitted_smiles.append(k + smiles[j + 1])\n else:\n splitted_smiles.append(k)\n elif j != 0 and j < len(smiles) - 1:\n if k.isupper() and smiles[j + 1].islower() and smiles[j + 1] != \"c\":\n splitted_smiles.append(k + smiles[j + 1])\n elif k.islower() and smiles[j - 1].isupper() and k != \"c\":\n pass\n else:\n splitted_smiles.append(k)\n\n elif j == len(smiles) - 1:\n if k.islower() and smiles[j - 1].isupper() and k != \"c\":\n pass\n else:\n splitted_smiles.append(k)\n return splitted_smiles", "def ring_winners(b, players):\n winners = []\n winrank = ''\n s = [evaluator.evaluate(b, p) for p in players]\n for i, rank in enumerate(s):\n if rank == min(s):\n winners.append(i)\n winrank = evaluator.class_to_string(evaluator.get_rank_class(rank))\n return [winners, winrank]", "def get_result(self, roundNum):\n return self.results[roundNum]", "def _split(self, outputs: O, n: int) -> Tuple[O, O]:\n raise NotImplementedError" ]
[ "0.5567383", "0.5464221", "0.5120953", "0.511039", "0.5052884", "0.48159716", "0.46500137", "0.46258867", "0.45040205", "0.44882062", "0.4462644", "0.4399613", "0.43700054", "0.43601322", "0.43520933", "0.42661065", "0.42626894", "0.4245764", "0.42430812", "0.42364082", "0.42309847", "0.42292666", "0.42232183", "0.41862497", "0.4175823", "0.41754085", "0.41680542", "0.41482255", "0.4146382", "0.41451567" ]
0.6987893
0
Compile a C function given its C code as two strings (function header and body).
def _compile_C_code(header, body, return_unloaded=False, verbose=False): import importlib import tempfile import uuid import cffi module_name = "module_" + uuid.uuid4().hex if "__uint128" in header: raise ValueError("_compile_C_code does not support bit-vector widths " "larger than 64 bits (cffi does not support __uint128)") ffibuilder = cffi.FFI() ffibuilder.cdef(header) ffibuilder.set_source(module_name, body) tmpdir = tempfile.TemporaryDirectory() lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose) if return_unloaded: return lib_path, module_name, tmpdir # dynamic import # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly spec = importlib.util.spec_from_file_location(module_name, lib_path) pymod_parent = importlib.util.module_from_spec(spec) # sys.modules[module_name] = module spec.loader.exec_module(pymod_parent) pymod = pymod_parent return pymod, tmpdir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compile(self, filename, source):\n \n if source and source[-1] != '\\n':\n source = source + '\\n'\n code = __builtin__.compile(source, filename.cStr(), 'exec')\n\n # try to cache the compiled code\n pycFilename = Filename(filename)\n pycFilename.setExtension(pycExtension)\n try:\n f = open(pycFilename, 'wb')\n except IOError:\n pass\n else:\n f.write('\\0\\0\\0\\0')\n f.write(struct.pack('<I', self.timestamp))\n f.write(marshal.dumps(code))\n f.flush()\n f.seek(0, 0)\n f.write(imp.get_magic())\n f.close()\n\n return code", "def compile_function_to_c(self, function, signatures):\n\n code = []\n code += self.emit_prologue(function)\n labels = Labels()\n for instruction in function.instructions:\n (new_code, new_labels) = self.emit_instruction(instruction, labels, signatures)\n code += new_code\n labels = new_labels\n code += self.emit_epilogue(function)\n return '\\n'.join(code)", "def compile_function(self, function, arguments):", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def cross_compile(*args, **kwargs):\n return compile(*args, **kwargs)", "def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore", "def get_C_code(self, C_function_name):\n from cascada.bitvector.printing import BvCCodePrinter\n\n width2type = BvCCodePrinter._width2C_type\n\n # in C, * binds to the declarator, not the type specifier\n input_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.input_vars])\n output_vars_c = ', '.join([\"{} *{}\".format(width2type(v.width), v.name) for v in self.output_vars])\n if self.external_vars:\n external_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.external_vars])\n external_vars_c = external_vars_c + \", \"\n else:\n external_vars_c = \"\"\n\n aux = f\"void {C_function_name}({input_vars_c}, {external_vars_c}{output_vars_c})\"\n header = f\"{aux};\"\n body = f\"#include <stdint.h>\\n{aux}{{\" # stdint for uint_*\n\n outvar2outvar_c = {v: core.Variable(\"*\" + v.name, v.width, allowed_symbols=\"*\") for v in self.output_vars}\n\n def primary_assignment2C_code(my_var, my_expr):\n assert isinstance(my_expr, (core.Constant, core.Variable, operation.PrimaryOperation))\n if my_var in self.output_vars:\n return f\"*{my_var} = {my_expr.crepr()};\"\n else:\n return f\"{width2type(my_var.width)} {my_var} = {my_expr.crepr()};\"\n\n for var, expr in self.assignments.items():\n expr = expr.xreplace(outvar2outvar_c)\n if isinstance(expr, operation.SecondaryOperation):\n expr = expr.doit(eval_sec_ops=True)\n body += f\"\\n\\t{primary_assignment2C_code(var, expr)}\"\n body += \"\\n};\"\n\n return header, body", "def compile_functions_to_c(self, functions, externs=[]):\n # Mangle main function: real main is provided by libv\n for function in functions:\n if function.signature.name == \"main\":\n function.signature.name = \"vizh_main\"\n\n signature_list = externs + [function.signature for function in functions]\n \n # We need size_t and libv functions\n code = ['#include <stddef.h>',\n '#include \"libv.h\"']\n\n # First output forward declarations for all functions and externs\n code += [f'{str(signature)};' for signature in signature_list]\n\n signature_list += libv_decls\n signatures = {signature.name: signature for signature in signature_list}\n\n errors = []\n for function in functions:\n try:\n code.append(self.compile_function_to_c(function, signatures))\n except CompilerError as err:\n errors.append((function.signature.name,err))\n\n if len(errors) > 0:\n messages = [f'Error while compiling {func_name}: {err}' for func_name, err in errors]\n raise CompilerError('\\n'.join(messages))\n \n return '\\n'.join(code)", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def compile(c_file: File) -> File:\n os.system(\"gcc -c {c_file}\".format(c_file=c_file.path))\n return File(c_file.path.replace('.c', '.o'))", "def compile_c(self):\n if(self.input == \"\"):\n stderr = subprocess.run(\n [\"gcc\", self.id+\".c\", \"-o\", self.id+\"_c\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n if(len(stderr) == 0):\n self.status = 1\n stdout = subprocess.run(\n [\"./\"+self.id+\"_c\"], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n else:\n self.status = 0\n self.output = stderr\n else:\n pass", "def compile(expression):", "def compileCode(pretext, codetext, filename):\n\n try:\n if codetext:\n co = compile(codetext, filename, \"exec\")\n o = [ pretext, co, codetext ]\n else:\n o = [ pretext, None, codetext ]\n except:\n o = [ pretext, None, codetext ]\n\n print >> sys.stderr, \\\n \"Error compiling template in the following code:\"\n print >> sys.stderr, codetext\n\n try:\n etype, value, tb = sys.exc_info()\n print_exception(etype, value, tb, None, sys.stderr)\n finally:\n etype = value = tb = None\n if not opts.ignore_errors:\n errors = 1\n\n print >> sys.stderr\n return o", "def make_cpp_func_bodies(self):\n\t\tfor name, body in self.func_bodies.iteritems():\n\t\t\tt = Lexer(body).get_tokens()\t\t\t\n\t\t\tS = [] #Stack\n\t\t\tx = 0\n\t\t\twhile x < len(t):\n\t\t\t\tif t[x] == '(': #function call begins\n\t\t\t\t\tx += 1\n\t\t\t\t\tS.append(self.FUNCS_DICT.get(t[x], t[x]) + '(')\n\t\t\t\telif t[x] == ')': #function call ends\n\t\t\t\t\tacc = ''\n\t\t\t\t\twhile S[-1][-1] != '(':\n\t\t\t\t\t\t#pop off params until function call is reached\n\t\t\t\t\t\tacc = S.pop() + ',' + acc\n\t\t\t\t\t# [:-1] to strip off comma at the end\n\t\t\t\t\tS.append(S.pop() + acc[:-1] + ')') #S.pop() gives function\n\t\t\t\telse:\n\t\t\t\t\tS.append(self.convert_atom(t[x]))\n\t\t\t\tx += 1\n\t\t\tself.cpp_func_bodies[name] = S[0]", "def compile_code(name, code, context=None):\n if context is None:\n context = {} # pragma: no cover\n try:\n obj = compile(code, \"\", \"exec\")\n except SyntaxError as e: # pragma: no cover\n raise SyntaxError(f\"Unable to compile\\n{code}\") from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l) # pylint: disable=W0122\n return context_l[name]", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def gen_c_code(self, comp, dest, jump):\r\n return '111' + self.comp(comp) + self.dest(dest) + self.jump(jump)", "def _llvm_jit_code(args, expr, signature, callback_type):\n if callback_type is None:\n jit = LLVMJitCode(signature)\n else:\n jit = LLVMJitCodeCallback(signature)\n\n jit._create_args(args)\n jit._create_function_base()\n jit._create_param_dict(args)\n strmod = jit._create_function(expr)\n if False:\n print(\"LLVM IR\")\n print(strmod)\n fptr = jit._compile_function(strmod)\n return fptr", "def get_code(self, fullname):\n source_path = self.get_filename(fullname)\n source_bytes = self.get_data(source_path)\n return compile(source_bytes, source_path, 'exec',\n dont_inherit=True)", "def compile_helper(debug=0):\n cmd = ['gcc', '-x', 'c', '-fPIC', '--shared', '-lpthread', '-lxclib',\n '-o', '/tmp/libcygnet4k.so', '-']\n if debug:\n cmd[1:1] = ['-g', '-O0', '-DDEBUG=%d' % debug]\n else:\n cmd[1:1] = ['-O3']\n\n process = subprocess.Popen(cmd, stdin=subprocess.PIPE, bufsize=-1)\n process.stdin.write(code)\n process.stdin.close()\n process.wait()", "def _build_code_from_func(self, func: Callable) -> str:\n with open(assets.paths.PARTIAL_MAIN_FILEPATH, 'r') as f:\n main_string = f.read()\n lines = inspect.getsourcelines(func)\n\n tabs_diff = lines[0][0].count(' ') - 1\n for line_index in range(len(lines[0])):\n line_tabs = lines[0][line_index].count(' ') - tabs_diff\n lines[0][line_index] = (' ' * line_tabs) + lines[0][line_index].strip() + '\\n'\n\n method_func_string = \"\".join(lines[0])\n\n code = '{}\\n{}\\n @staticmethod\\n{}'.format('', main_string,\n method_func_string)\n return code", "def compile(self, code, options=''):\n try:\n data = self.client.cli.compile_contract(body=dict(\n code=code,\n options=options\n ))\n return data.bytecode\n except OpenAPIClientException as e:\n raise ContractError(e)", "def compile() -> str:\n content = request.get_json()\n code = injection + \"\\n\" + content[\"input_code\"]\n stdin = content[\"input_stdin\"]\n args = [\n \"python\",\n \"-c\",\n code,\n config[\"blocked\"][\"imports\"],\n config[\"blocked\"][\"functions\"],\n ]\n\n process = Popen(\n args,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n encoding=config[\"process\"][\"encoding\"],\n )\n\n try:\n stdout, stderr = process.communicate(\n stdin, timeout=float(config[\"process\"][\"timeout\"])\n )\n except TimeoutExpired:\n return json.dumps(\n {\n \"output\": \"The program takes too long to execute\",\n \"error\": str(config[\"process\"][\"timeout\"]),\n }\n )\n\n return json.dumps({\"output\": str(stdout), \"error\": str(stderr)})", "def cross_compiler(compile_func, base_options=None, output_format=\"so\"):\n if base_options is None:\n base_options = []\n def _fcompile(outputs, objects, options=None):\n all_options = base_options\n if options is not None:\n all_options += options\n compile_func(outputs, objects, options=all_options)\n _fcompile.output_format = output_format\n return _fcompile", "def call_gcc(fn):\n x = subprocess.check_output(['gcc', '-fdump-tree-original-raw=stdout', '-c',\n '-o/dev/null', fn])\n return x.decode(\"utf-8\")", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def compile_udf(udf, type_signature):\n import cudf.core.udf\n\n key = make_cache_key(udf, type_signature)\n res = _udf_code_cache.get(key)\n if res:\n return res\n\n # We haven't compiled a function like this before, so need to fall back to\n # compilation with Numba\n ptx_code, return_type = cuda.compile_ptx_for_current_device(\n udf, type_signature, device=True\n )\n if not isinstance(return_type, cudf.core.udf.masked_typing.MaskedType):\n output_type = numpy_support.as_dtype(return_type).type\n else:\n output_type = return_type\n\n # Populate the cache for this function\n res = (ptx_code, output_type)\n _udf_code_cache[key] = res\n\n return res", "def run_c(snippet: str):\n\n # if the snippet is a single token, assume we should print it\n if re.match(r\"\\w+$\", snippet):\n snippet = f'printf(fmt({snippet}), {snippet});printf(\"\\\\n\");'\n\n cc = os.environ.get(\"CC\", \"cc\")\n run(cc, \"c\", C_PRELUDE, C_CODA, snippet)", "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)" ]
[ "0.7229262", "0.6958879", "0.68127483", "0.663641", "0.6623606", "0.6555563", "0.6529795", "0.63210225", "0.6301976", "0.6199189", "0.61737925", "0.61382604", "0.60743004", "0.60626096", "0.6054126", "0.60315853", "0.60043836", "0.59953725", "0.59876066", "0.59520745", "0.59267914", "0.5926494", "0.59220934", "0.58695275", "0.5851293", "0.58454186", "0.5841948", "0.58398736", "0.58333963", "0.5819487" ]
0.71571857
1
returns progress as a number between 0 and 100
def progress(self): return self.progressValue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def progress(self) -> float:\n return self._progress", "def progress(self) -> float:\n return self._progress", "def progress(self) -> float:\n return self._progress", "def progress(self) -> float:\n return self._progress", "def progress(self):\n try:\n return 100.0 * (self.fields['sizeWhenDone'] - self.fields['leftUntilDone']) / float(self.fields['sizeWhenDone'])\n except ZeroDivisionError:\n return 0.0", "def getProgress(self):", "def get_progress(self, pr, id):\n\t\treturn round((self.handler.file_progress()[id] / pr.length) * 100, )", "def task_progress(project):\n complete = Task.objects.filter(project=project, status='C').count()\n total = Task.objects.filter(project=project).count()\n if total == 0:\n return 0\n\n return round(complete/total * 100, 2)", "def GetProgress(self):\n return self.new_progress", "def progress(self):\n percent = self._infos.get(BulkInsertState.IMPORT_PROGRESS, \"0\")\n return int(percent)", "def getProgress(self):\n return self._progress", "def progress(x):\n global progress_x\n x = int(x * 40 // 100)\n sys.stdout.write(\"#\" * (x - progress_x))\n sys.stdout.flush()\n progress_x = x\n return 0", "def build_progress(self) -> Union[int, float]:\n return self.proto.build_progress", "def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)", "def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value", "def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value", "def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value", "def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def set_progress(self, progress: float):", "def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)", "def progress(self):\n return self.runProgress", "def progress(self):\n return self.runProgress", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))", "def get_progress(self):\n ret = self.state + \"\\n\"\n self.reset_progress()\n return ret", "def get_progress(self):\n return self.cloudserver.progress", "def get_progress(count, block_size, total_size) -> None:\r\n percent = int(count * block_size * 100 / total_size)\r\n print(f\"Downloading clip... {percent}%\", end=\"\\r\", flush=True)" ]
[ "0.79426646", "0.7889467", "0.7889467", "0.7813299", "0.7813299", "0.7813299", "0.7813299", "0.7616806", "0.755221", "0.74852914", "0.74636984", "0.73992074", "0.7360572", "0.73133385", "0.7295597", "0.726702", "0.7258826", "0.71958226", "0.71958226", "0.71958226", "0.71958226", "0.7131892", "0.7084339", "0.7074739", "0.70550025", "0.70550025", "0.6983376", "0.6930432", "0.6922421", "0.6916234" ]
0.79399526
1
Parse a file from Dunbrack's PICSE server. This will iterate all lines in the file and extract the list of proteins that match the resolution criteria.
def parse_file(self, path, max_resolution, threshold, proteins={}): """ create regex pattern here so it is not done repeatedly while parsing file groups: 0 - Protein ID 1 - Chain ID 2 - Length of protein chain 3 - Exptl. 4 - Resolution 5 - R-factor 6 - FreeRValue """ regex_str = '(\w{4})(\w)\s+(\d+)\s+(\w+)\s+([\d\.]+)\s+([\d\.]+)\s+([\d\.]+)' regex_pattern = re.compile(regex_str) printc('Processing: %s' % path) raw = None try: _file = gzip.open(path, 'r') #first line is labels, discard it _file.readline() for line in _file: match = regex_pattern.match(line) if match: groups = match.groups() if groups[0] in proteins: # if protein already exists just update the additional # chain information. The properties should not change # between records in the selection file. protein = proteins[groups[0]] if not groups[1] in protein['chains']: protein['chains'].append(groups[1]) #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold) else: # protein is not in proteins dict yet create initial # structure from parsed properties. resolution = float(groups[4]) if resolution > 0 and resolution <= max_resolution: proteins[groups[0]] = { 'code':groups[0], 'chains':[groups[1]], 'resolution':groups[4], 'rfactor':groups[5], 'rfree':groups[6], 'threshold':threshold } #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold) finally: if _file: _file.close() return proteins
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_protenn_matches(file: str):\n with open(file, \"rt\") as fh:\n for line in fh:\n sequence_id, pfam_acc, start, end = line.rstrip().split(\"\\t\")\n if re.fullmatch(r\"PF\\d+\", pfam_acc):\n yield sequence_id, pfam_acc, int(start), int(end)", "def read(read_file) -> list:\n result = []\n try:\n with open(read_file) as file:\n for lines in file:\n line = decode(lines.strip(\"\"))\n result.append(extract_information(line))\n global header\n header = result[0:2]\n result = result[3:]\n for word in result:\n if \"None\" in word[0:3]:\n raise InvalidPrincessException(\"Invalid princess!\")\n continue\n return result\n except FileNotFoundError:\n raise Exception(\"File not found!\")", "def mo_parse_p(self, filepath):\n\n # Now, can reprocess using tesseract-ocr rather than pdftotext\n ptext = textract.process(filepath, method='tesseract', encoding='utf-8')\n ptext = ptext.replace(b'\\xe2\\x80\\x94', b'-')\n ptext = ptext.decode('utf-8')\n keys = list(self.mo_coefficient_name_map.keys())\n\n # Get the calibration date:\n for line in ptext.splitlines():\n if 'CALIBRATION DATE' in line:\n items = line.split()\n ind = items.index('DATE:')\n cal_date = items[ind+1]\n cal_date = pd.to_datetime(cal_date).strftime('%Y%m%d')\n self.date.update({len(self.date): cal_date})\n\n if 'psia S/N' in line:\n items = line.split()\n ind = items.index('psia')\n prange = items[ind-1]\n name = self.mo_coefficient_name_map.get('prange')\n self.coefficients.update({name: prange})\n\n # Loop through each line looking for the lines which contain\n # calibration coefficients\n if '=' in line:\n # Tesseract-ocr misreads '0' as O, and 1 as IL\n line = line.replace('O', '0').replace('IL', '1').replace(\n '=', '').replace(',.', '.').replace(',', '.')\n line = line.replace('L', '1').replace('@', '0').replace('l', '1').replace('--', '-')\n if '11' in line and 'PA2' not in line:\n line = line.replace('11', '1')\n items = line.split()\n for n, k in enumerate(items):\n if k.lower() in keys:\n try:\n float(items[n+1])\n name = self.mo_coefficient_name_map.get(k.lower())\n self.coefficients.update({name: items[n+1]})\n except:\n pass\n if 'CC_ptcb2' not in list(self.mo_coefficient_name_map.keys()):\n self.coefficients.update({'CC_ptcb2': '0.000000e+000'})", "def _parse_file(cls, filepath):\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)", "def read_pronunciation_file_as_list(self, filename):\n data = open(filename, mode=\"r\", encoding=\"utf8\").read()\n res = []\n for line in data.splitlines():\n if line.split()[1:]:\n for p in line.split()[1:]:\n res.append(p)\n return res", "def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info", "def mk_parses(listfile, corenlp_host):\n # if not listfile.endswith('.listfile'):\n # filetype = 'Co-Reference List file'\n # error = 'has incorrect file type'\n # raise FilenameException(\"Error: %s %s\" % (filetype, error))\n\n try:\n with open(listfile) as f:\n pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),\n jsonrpc.TransportTcpIp(\n addr=(corenlp_host, 8080), limit=1000))\n parses = dict([(get_id(path), FileParse(path, pserver))\n for path in f.readlines()\n if path.lstrip()[0] != '#'])\n except IOError:\n stderr.write(strerror(EIO)) # stderr.write does not have newlines\n stderr.write(\"\\nERROR: Could not open list file\\n\")\n exit(EIO)\n else:\n return parses", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def parse(self, fp):\n\n # create the plex scanner for fp\n self.create_scanner(fp)\n\n # call parsing logic\n self.stmt_list()\n print('Parsing successful!')", "def _process_file(self, file: str) -> List[Dict]:\n with open(self.base_path + file, encoding='latin-1') as hosts_file:\n records = []\n for line in hosts_file.readlines():\n line_contents = line.strip().split()\n if line_contents and line_contents[0][0] != \"#\":\n new_record = {\n RecordAttribute.CATEGORY: Category.GENERAL_MALWARE\n }\n if self.is_url_ip_address(line_contents[0]):\n new_record[RecordAttribute.IP] = line_contents[0]\n else:\n new_record[RecordAttribute.URL] = line_contents[0]\n records.append(new_record)\n return records", "def LoadNmapServiceFP(ServiceFileName):\n\n\t#File format details at http://nmap.org/vscan/vscan-fileformat.html\n\n\tglobal ServiceFPs\n\t\n\tLoadCount = 0\n\tCompileSuccess = 0\n\tCompileFail = 0\n\tPortList = \"\"\n\tPortArray = [ ]\n\n\tif os.path.isfile(ServiceFileName):\n\t\ttry:\n\t\t\tServiceHandle = open(ServiceFileName, \"r\")\n\t\t\tfor line in ServiceHandle:\n\t\t\t\tif (len(line) >= 5) and (line[0:6] == \"Probe \"):\n\t\t\t\t\t#print \"==== PROBE ====\"\n\t\t\t\t\tPortArray = [ ]\n\t\t\t\t\t#print len(PortArray), PortArray\t\t\t#len of empty array is 0\n\t\t\t\telif (len(line) >= 5) and (line[0:6] == \"match \"):\n\t\t\t\t\t#print \"match\"\n\t\t\t\t\t#print line\n\t\t\t\t\tInformationPresent = True\n\t\t\t\t\t\t\t\t\t\t\t\t\t#Sample line:\n\t\t\t\t\t\t\t\t\t\t\t\t\t# match srun m|^X\\0\\0\\0$| p/Caucho Resin JSP Engine srun/\n\t\t\t\t\tRemainder=line[6:].strip()\t\t\t\t\t# srun m|^X\\0\\0\\0$| p/Caucho Resin JSP Engine srun/\n\t\t\t\t\tMatchStart=Remainder.find(\" m\")\t\t\t\t\t# 4\n\t\t\t\t\tProtoString=Remainder[:MatchStart].replace(',', ';')\t\t# srun\n\t\t\t\t\t#At the moment, nmap-service-probes uses these separators:\n\t\t\t\t\t#3 m%, 2 m+, 126 m/, 29 m=, 2 m@, and 3509 m|\n\t\t\t\t\t#No flags on %, +, \n\t\t\t\t\t#Only flags should be \"i\" (case-insensitive) and \"s\" (\".\" can match newline)\n\t\t\t\t\tSeparator=Remainder[MatchStart+2:MatchStart+3]\t\t\t# |\n\t\t\t\t\tMatchEnd=Remainder.find(Separator,MatchStart+3)\t\t\t# 16\n\t\t\t\t\tMatchString=Remainder[MatchStart+3:MatchEnd]\t\t\t# ^X\\0\\0\\0$\n\n\t\t\t\t\t#Handle an \"i\" or \"s\" flag after separator\n\t\t\t\t\t#Debug(\"==== \" + Remainder[MatchEnd+1:MatchEnd+4])\n\t\t\t\t\tif MatchEnd + 1 == len(Remainder):\n\t\t\t\t\t\tInformationPresent = False\n\t\t\t\t\t\t#Debug(\"No information data for \" + MatchString)\n\t\t\t\t\telif (Remainder[MatchEnd+1:MatchEnd+2] == \" \"):\n\t\t\t\t\t\tPPointer=MatchEnd + 2\n\t\t\t\t\t\tMatchFlags = re.M\n\t\t\t\t\t\t#Debug(Remainder + \", no flags\")\n\t\t\t\t\telif (Remainder[MatchEnd+1:MatchEnd+3] == \"i \"):\n\t\t\t\t\t\tPPointer=MatchEnd + 3\n\t\t\t\t\t\tMatchFlags = re.M | re.I\n\t\t\t\t\t\t#Debug(Remainder + \", i flag\")\n\t\t\t\t\telif (Remainder[MatchEnd+1:MatchEnd+3] == \"s \"):\n\t\t\t\t\t\tPPointer=MatchEnd + 3\n\t\t\t\t\t\tMatchFlags = re.M | re.S\n\t\t\t\t\t\t#Debug(Remainder + \", s flag\")\n\t\t\t\t\telif (Remainder[MatchEnd+1:MatchEnd+4] == \"is \") or (Remainder[MatchEnd+1:MatchEnd+4] == \"si \"):\n\t\t\t\t\t\tPPointer=MatchEnd + 4\n\t\t\t\t\t\tMatchFlags = re.M | re.I | re.S\n\t\t\t\t\t\t#Debug(Remainder + \", i and s flag\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tDebug(\"Unrecognized nmap-service-probes flag combination\")\n\t\t\t\t\t\tprint MatchEnd + 1, len(Remainder)\n\t\t\t\t\t\tDebug(Remainder + \", unknown flags\")\n\t\t\t\t\t\t#quit()\n\n\t\t\t\t\t#Substitute ; for , in ProtoString and ServerDescription since we're using commas as field delimiters in output\n\t\t\t\t\tServerDescription=Remainder[PPointer:].replace(',', ';')\t# p/Caucho Resin JSP Engine srun/\n\t\t\t\t\t\n\t\t\t\t\t#The nmap-service-probes file uses a character set (\"[...]\") issue that python doesn't like.\n\t\t\t\t\t#If a \"-\" is used inside a character set, it should either be in the first or last position,\n\t\t\t\t\t#or used in a character range (\"[.....a-z.....]\"). The following move any dashes to first or \n\t\t\t\t\t#last position so re.compile is happy.\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\w-\",\"[-\\w\")\t\t\t#The dash needs to be at the end or it's treated as a range specifier\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\d-\",\"[-\\d\")\t\t\t#same\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\w\\d-_.]\",\"[\\w\\d_.-]\")\t#and so on...\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\w\\d-_]\",\"[\\w\\d_-]\")\n\t\t\t\t\tMatchString=MatchString.replace(\"[.-\\w]\",\"[.\\w-]\")\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\s-\\w.,]\",\"[\\s\\w.,-]\")\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\w\\d-.]\",\"[\\w\\d.-]\")\n\t\t\t\t\tMatchString=MatchString.replace(\"[\\d\\.-\\w]\",\"[\\d\\.\\w-]\")\n\t\t\t\t\tMatchString=MatchString.replace(\"[^-_A-Z0-9]\",\"[^_A-Z0-9-]\")\n\t\t\t\t\tMatchString=MatchString.replace(\"[^-A-Z0-9]\",\"[^A-Z0-9-]\")\n\n\t\t\t\t\tif (ServerDescription.find('Skype VoIP data channel') > -1):\n\t\t\t\t\t\t#This \"14 bytes of random stuff\" signature way misfires.\n\t\t\t\t\t\tpass\n\t\t\t\t\telif (ServerDescription.find('Microsoft Distributed Transaction Coordinator') > -1):\n\t\t\t\t\t\t#This \"ERROR\" signature matches other protocols.\n\t\t\t\t\t\tpass\n\t\t\t\t\telif (InformationPresent == False):\n\t\t\t\t\t\t#There's a regex match, but no information about, skip.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t#We try to compile the MatchString now before inserting into ServiceFPs so the work only needs to be \n\t\t\t\t\t\t\t#done once. If this fails we fall down to the except and simply don't use the tuple.\n\t\t\t\t\t\t\t#Originally 413 out of 3671 match lines failed to compile because of \"-\" placement in character sets.\n\t\t\t\t\t\t\t#The problem, and a fixed version, have been reported to the nmap developers.\n\t\t\t\t\t\t\t#The use of \"str\" seems redundant, but we have occasionally gotten:\n\t\t\t\t\t\t\t#line 511: OutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\tSearchTuple=(re.compile(MatchString, MatchFlags), str(ProtoString + \"://\" + ServerDescription))\n\t\t\t\t\t\t\tCompileSuccess += 1\n\t\t\t\t\t\t\tif (len(PortArray) == 0):\n\t\t\t\t\t\t\t\t#No ports declared yet; we'll place this search pair under the special port \"all\"\n\t\t\t\t\t\t\t\tif (not(ServiceFPs.has_key('all'))):\n\t\t\t\t\t\t\t\t\tServiceFPs['all'] = [ ]\n\t\t\t\t\t\t\t\tServiceFPs['all'].append(SearchTuple)\n\t\t\t\t\t\t\t\tLoadCount += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t#Register this search pair for every port requested\n\t\t\t\t\t\t\t\tfor OnePort in PortArray:\n\t\t\t\t\t\t\t\t\tif (not(ServiceFPs.has_key(int(OnePort)))):\n\t\t\t\t\t\t\t\t\t\tServiceFPs[int(OnePort)] = [ ]\n\t\t\t\t\t\t\t\t\tServiceFPs[int(OnePort)].append(SearchTuple)\n\t\t\t\t\t\t\t\t\tLoadCount += 1\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t#print \"Failed to compile \" + MatchString\n\t\t\t\t\t\t\tCompileFail += 1\n\t\t\t\t\t\n\t\t\t\telif (len(line) >= 5) and (line[0:6] == \"ports \"):\n\t\t\t\t\tPortArray = [ ]\n\t\t\t\t\tRawPortsString=line[6:].strip()\n\t\t\t\t\t#print \"ports are \", RawPortsString\n\t\t\t\t\tfor PortBlock in RawPortsString.split(\",\"):\t\t#Each PortBlock is either an individual port or port range\n\t\t\t\t\t\tif (PortBlock.find(\"-\") > -1):\n\t\t\t\t\t\t\t#We have a port range\n\t\t\t\t\t\t\tPortRange=PortBlock.split(\"-\")\n\t\t\t\t\t\t\tfor OnePort in range(int(PortRange[0]), int(PortRange[1]) + 1):\n\t\t\t\t\t\t\t\tPortArray.append(OnePort)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPortArray.append(PortBlock)\n\t\t\t\t\t#print len(PortArray), PortArray\n\t\t\t\telif (len(line) >= 9) and (line[0:10] == \"softmatch \"):\n\t\t\t\t\tpass\n\t\t\t\t\t#softmatches look very weak at the moment; none give a productname. Skip for the moment.\n\t\t\t\t\t#print \"softmatch\"\n\n\t\t\tServiceHandle.close()\n\n\t\t\tif (CompileFail == 0):\n\t\t\t\tDebug(str(CompileSuccess) + \" nmap service signatures successfully loaded.\")\n\t\t\telse:\n\t\t\t\tDebug(str(CompileSuccess) + \" nmap service signatures successfully loaded, unable to parse \" + str(CompileFail) + \" others.\")\n\t\t\treturn True\n\t\texcept:\n\t\t\tDebug(\"Failed to load \" + ServiceFileName)\n\t\t\treturn False\n\telse:\n\t\tDebug(\"Unable to find \" + ServiceFileName)\n\t\treturn False", "def parse(self, line):\n\t\n\t# remove trailing newline a-la Perl CHOMP\n\tline = line.rstrip(\"\\n\")\n\n\t\n # correctly formatted PDB files\n # TODO - assuming 80 chars means well formatted is\n # perhaps risky. Need a more robust way to asses\n # formatting validity\n\n if len(line) == 80:\n self.record_name = line[0:6].strip()\n self.atom_id = int(line[6:11].strip())\n self.atom_name = line[12:16].strip()\n self.alt_location = line[16]\n self.res_name = line[17:20].strip()\n self.chain = line[21]\n self.res_id = line[22:26].strip()\n self.res_ins_code = line[26]\n self.coord_X = float(line[30:38].strip())\n self.coord_Y = float(line[38:46].strip())\n self.coord_Z = float(line[46:54].strip())\n self.occupancy = float(line[54:60].strip())\n self.beta = float(line[60:66].strip())\n self.seg_ID = line[72:76].strip()\n self.element = line[76:78].strip()\n\t if line[78:80].strip() == \"\":\n\t\t self.charge=0.0\n\t else:\n\t\t self.charge = float(line[78:80].strip())\n self.chain_local_id = -1\n self.formatted_ok = True\n\n # Heuristic section - split by space and then use\n # errors in casting as flags for things being issues\n # Note this may need to be expanded as malformed edge-cases\n # are identified...\n else:\n rawsplitline = filter(None, line.split(\" \"))\n\t \n\t \n\n splitline = []\n for i in rawsplitline:\n if i == \"\\n\" or i == \"\\t\":\n pass\n else:\n splitline.append(i)\n \n num_cols = len(splitline)\n\n\t print num_cols\n \n try:\n if num_cols == 10:\n self.record_name = splitline[0] \n self.atom_id = int(splitline[1])\n self.atom_name = splitline[2] \n self.alt_location = \"\"\n self.res_name = splitline[3] \n self.chain = \"\"\n self.res_id = int(splitline[4])\n self.res_ins_code = \"\"\n self.coord_X = float(splitline[5]) \n self.coord_Y = float(splitline[6]) \n self.coord_Z = float(splitline[7])\n self.occupancy = float(splitline[8])\n self.beta = float(splitline[9])\n self.seg_ID = \" \"\n self.element = \" \" \n self.charge = \" \"\n self.chain_local_id = -1\n self.formatted_ok = False\n\n elif num_cols == 11:\n self.record_name = splitline[0] \n self.atom_id = int(splitline[1])\n self.atom_name = splitline[2] \n self.alt_location = \" \"\n self.res_name = splitline[3] \n self.chain = splitline[4]\n self.res_id = int(splitline[5])\n self.res_ins_code = \" \"\n self.coord_X = float(splitline[6]) \n self.coord_Y = float(splitline[7]) \n self.coord_Z = float(splitline[8]) \n self.occupancy = float(splitline[9]) \n self.beta = float(splitline[10])\n self.seg_ID = \" \"\n self.element = \" \" \n self.charge = \" \"\n self.chain_local_id = -1\n self.formatted_ok = False\n\n elif num_cols == 12:\n self.record_name = splitline[0] \n self.atom_id = int(splitline[1])\n self.atom_name = splitline[2] \n self.alt_location = \" \"\n self.res_name = splitline[3] \n self.chain = splitline[4]\n self.res_id = int(splitline[5])\n self.res_ins_code = \" \"\n self.coord_X = float(splitline[6]) \n self.coord_Y = float(splitline[7]) \n self.coord_Z = float(splitline[8]) \n self.occupancy = float(splitline[9]) \n self.beta = float(splitline[10])\n self.seg_ID = \" \"\n self.element = splitline[11] \n self.charge = \" \"\n self.chain_local_id = -1\n self.formatted_ok = False\n else:\n raise PDB_atomException(\"Did not match number of columns\")\n except ValueError,e:\n print \"Error with columns (using \" + str(num_cols) + \") columns\"\n print \"Tried to cast string to int/float\"\n raise e", "def simParser(filePath):\n\t\tresults = []\n\t\twith open(filePath + \".txt\", \"r\") as execFile:\n\t\t\tcontent = execFile.read()\n\n\t\t\tcycleStr = search(r'([cC]ycles.*?:\\s*)(\\d+)', content)\n\t\t\tassemblyInst = search(r'([iI]nstructions.*?:\\s*)(\\d+(.\\d+)?)', content)\n\n\t\t\tif cycleStr: results.append(cycleStr.group(2))\n\t\t\tif assemblyInst: results.append(assemblyInst.group(2))\n\n\t\treturn results", "def mk_fparse(filename, pserver):\n parses = []\n \n try:\n with open(filename) as f:\n vprint('OPEN: %s' % filename)\n xml = f.read()\n except IOError:\n print strerror(EIO)\n print(\"ERROR: Could not open %s\" % filename)\n return (parses, get_tagged_corefs(''), get_synsets({}))\n\n # remove unwanted characters from xml\n vprint('\\tPARSE: Parsing file: %s' % filename)\n # parse_tries = 0\n # while parse_tries < 5:\n # try:\n # t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))\n # parse_tries = 0\n # break\n # except jsonrpc.RPCTimeoutError:\n # vprint('\\tERROR: RPCTimeoutError - retrying')\n # parse_tries += 3\n # except jsonrpc.RPCTransportError:\n # vprint('\\tERROR: RPCTransportError - retrying')\n # data = _normalize_sentence(_remove_tags(xml))\n # sentences = [sent for part in data.split('\\n\\n')\n # for sent in sent_tokenize(part)]\n # try:\n # xml1 = data[:data.find(sentences[len(sentences)/3])]\n # xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]\n # xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]\n # t1 = loads(pserver.parse(xml1))\n # t2 = loads(pserver.parse(xml2))\n # t3 = loads(pserver.parse(xml3))\n # t = dict(t1.items() + t2.items() + t3.items())\n # parse_tries = 0\n # break\n # except Exception:\n # parse_tries = -1\n # break\n # parse_tries += 1\n # if parse_tries != 0:\n # vprint('\\tFATAL: RPCTransportError - skipping')\n \n sentences = [sent for part in xml.split('\\n\\n')\n for sent in sent_tokenize(part)]\n vprint('\\tPARSE: Parsing sentences: %s' % filename)\n for sent in sentences:\n sent_corefs = get_tagged_corefs(sent, ordered=True)\n # remove unwanted characters from xml\n sent = _normalize_sentence(_remove_tags(sent))\n parse_tries = 0\n while parse_tries < 5:\n try:\n sparse = loads(pserver.parse(sent))\n parse_tries = 0\n break\n except jsonrpc.RPCTransportError:\n vprint('\\tERROR: RPCTransportError - retrying')\n parse_tries += 1\n if parse_tries != 0:\n vprint('\\tFATAL: RPCTransportError - skipping')\n \n pparse = _process_parse(sparse, sent_corefs)\n if pparse:\n parses.append(pparse)\n\n pos_tags = {}\n for parse in parses:\n for word, attr in parse[1]:\n tags = pos_tags.get(word, set())\n tags.add(attr['PartOfSpeech'])\n pos_tags[word] = tags\n \n return parses, get_tagged_corefs(xml), get_synsets(pos_tags)", "def parse_prnu_file():\n hdf_name = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\PRNU_map\\\n batch_2017Jun20_TEMPO_PRNU_-20Tccd__46Tfpe_3pixSpectral_3pixSpatial.h5'\n file = h5py.File(hdf_name, 'r')\n prnu = file.get('prnu')\n prnu = np.array(prnu).transpose()\n quad_d = prnu[2:1030, 10:1034]\n quad_c = prnu[2:1030, 1078:2102]\n quad_a = prnu[1062:2090, 10:1034]\n quad_b = prnu[1062:2090, 1078:2102]\n prnu_map_lower = np.concatenate((quad_d, quad_c), axis=1)\n prnu_map_upper = np.concatenate((quad_a, quad_b), axis=1)\n prnu_map = np.concatenate((prnu_map_lower, prnu_map_upper), axis=0)\n return prnu_map", "def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def get_parsed_paragraphs_from_file(self, processed_path):\n with open(processed_path, \"r\") as f:\n sent_len = json.loads(f.readline())['sentence_lens']\n paragraphs = list()\n line_no = 1\n para_idx = 0\n while para_idx < len(sent_len):\n paragraph = list()\n end_no = sent_len[para_idx]\n while line_no < end_no:\n sent = json.loads(f.readline())\n sent[\"sid\"] = self.generate_sid(sent, processed_path, line_no)\n paragraph.append(sent)\n line_no += 1\n para_idx += 1\n paragraphs.append(paragraph)\n return paragraphs", "def parse_pion(m1, m2, config):\n f = open(in_name(m1, m2, config), 'r')\n \n # Skip to the mixed pion correlator section.\n # This assumes it is the first PION entry.\n x = f.readline()\n while x:\n if re.match('correlator:\\s+PION', x):\n break\n x = f.readline()\n\n # Throw away header.\n print x\n for i in range(5):\n print f.readline().strip()\n\n result = []\n for i in range(64):\n t, r, im = f.readline().strip().split('\\t')\n result.append(complex(float(r), float(im))) \n \n f.close()\n\n return np.array(result)", "def Parse(filename: str) -> List[op.Operation]:\n\n # Open File\n file = open(filename, \"r\")\n\n # Get real file name\n index = filename.rfind(\"/\")\n if index == -1:\n index = filename.rfind(\"\\\\\")\n if index == -1:\n activeFile = filename\n else:\n activeFile = filename[index + 1:len(filename)]\n activeFileName = activeFile.split(sep=\".\")[0]\n\n print(f\"Parsing {activeFile}\")\n\n # Multi threading\n pool = ProcessPoolExecutor()\n futures = []\n\n lines = file.readlines()\n\n # start Threads\n lineNumber = 0\n for line in lines:\n futures.append(pool.submit(_ParseLine, line,\n lineNumber, activeFileName))\n lineNumber += 1\n\n wait(futures)\n successfullyParsed = []\n invalidCounter = 0\n commentCounter = 0\n # Put results in list\n for future in futures:\n result = future.result()\n # Remove invalid lines\n if isinstance(result, op.Invalid):\n invalidCounter += 1\n continue\n # Remove comments\n if isinstance(result, op.Comment):\n commentCounter += 1\n continue\n successfullyParsed.append(result)\n\n # Print for Debug\n if commentCounter > 0:\n print(f\"Ignoring {commentCounter} comments\")\n if invalidCounter > 0:\n print(Fore.YELLOW + f\"WARNING: {invalidCounter} invalid lines\")\n\n # Close File\n file.close()\n\n return successfullyParsed", "def parseFile(filename):\n\n Parse.data = []\n with open(filename, \"r\") as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data", "def read_prn(file_name):\n\n with open(file_name) as f:\n f = MyIter(f)\n try:\n assert next(f).strip() == 'prn:13|'\n assert next(f).strip() == 'subtype = MT2500'\n assert next(f).strip() == 'Doc={MT2500:14|'\n\n assert next(f).strip() == 'Film={12.1|'\n film_data = {}\n for line in f:\n if '}' in line:\n break\n key, value = read_key_value(line)\n film_data[key] = value\n\n assert next(f).strip() == 'Test_Info={2|'\n test_info = {}\n for line in f:\n if '}' in line:\n break\n key, value = read_key_value(line)\n test_info[key] = value\n\n assert next(f).strip() == 'Test_Data=('\n test_data = []\n for i, line in enumerate(f):\n if line.strip() != '{6|':\n break\n\n test_data.append({})\n for line in f:\n if '[' in line:\n break\n key, value = read_key_value(line)\n test_data[i][key] = try_to_num(value)\n\n xs, ys = [], []\n for line in f:\n if ']' in line:\n break\n x, y = line.split(',')\n xs.append(x)\n ys.append(y)\n\n test_data[i]['xs'] = np.array(xs, dtype='float')\n test_data[i]['ys'] = np.array(ys, dtype='float')\n assert int(test_data[i]['Number_Of_Points']) == len(xs)\n assert next(f).strip()[0] == '}' # may have a comma\n\n assert 'Test_Results=(' == next(f).strip()\n test_results = []\n for i, line in enumerate(f):\n if line.strip() != '{6|':\n break\n test_results.append({})\n for line in f:\n if '}' in line:\n break\n key, value = read_key_value(line)\n test_results[i][key] = try_to_num(value)\n assert next(f).strip()[0] == '}' # may include comma\n\n except AssertionError as e:\n print(f._index, f._line)\n raise\n\n data_remove = ['Number_Of_Points']\n results_swaps = [\n ('TestDate', 'date'),\n ('Length_Cnvrsn', 'length_conversion'),\n ('Force_Cnvrsn', 'force_conversion'),\n ('LoadCell_Capacity', 'loadcell_capacity'),\n ('LoadCell_CpctyUnit', 'loadcell_capacity_unit'),\n ('LoadCell_BitsOfReso', 'loadcell_bits_of_resolution'),\n ('Slack_time', 'slack_time'),\n ('BreakStrength', 'break_strength'),\n ('BreakElongation', 'break_elongation'),\n ('BreakPctElongation', 'break_percent_elongation'),\n ('YieldStrength1', 'yield_strength'),\n ('YieldLoad1', 'yield_load'),\n ('SampleThickness', 'thickness'),\n ('BreakLoad', 'break_load'),\n ]\n results_remove = ['Analysis']\n data_swaps = [\n ('X_unit', 'x_units'),\n ('Y_unit', 'y_units'),\n ('Crosshead_speed', 'crosshead_speed'),\n ('Sample_Thkness', 'sample_thickness'),\n ('Sample_Width', 'sample_width'),\n ('Grip_Separation', 'gauge_length'),\n ('Start_Threshhold', 'start_threshhold'),\n ('Stop_Threshhold', 'stop_threshhold'),\n ]\n\n elongations = []\n assert len(test_data) == len(test_results)\n for data, results in zip(test_data, test_results):\n for original, to in data_swaps:\n data[to] = data.pop(original)\n for original, to in results_swaps:\n results[to] = results.pop(original)\n for key in data_remove:\n data.pop(key)\n for key in results_remove:\n results.pop(key)\n\n if data['x_units'] == 'Secs.':\n data['x_units'] = 's'\n if data['y_units'] == 'Newtons':\n data['y_units'] = 'N'\n if results['date']:\n results['date'] = datetime.strptime(results['date'], '%d %b, %Y')\n\n xs = data['xs']*float(data['crosshead_speed'])\n elongations.append(\n Elongation(\n xs, data['ys'],\n float(data['gauge_length']) / 1e3, # mm → m\n float(data['sample_width']) / 1e3, # mm → m\n float(data['sample_thickness']) / 1e3, # mm → m\n None\n )\n )\n\n return elongations", "def _parse_stations_file(filename):\n with open(filename, 'rt') as f:\n receivers = []\n\n for line in f:\n station, network, lat, lon, _, _ = line.split()\n lat = float(lat)\n lon = float(lon)\n receivers.append(Receiver(lat, lon, network, station))\n\n return receivers", "def parse_problem(path_to_file):\n with open(path_to_file, 'r') as f:\n lines = f.readlines()\n return parse_problem_lines(lines)", "def parse_file(self, file):\n return self.parse(file.read())", "def parse_one_file(self, filename):\n parsed_patent_list = []\n with open(filename, 'rb') as f:\n patent_list = self._load_patent_list(f)\n for patent in patent_list:\n parsed_patent_list.append(self._parse_patent(patent))\n return parsed_patent_list", "def parse_reports(self):\n txt = (\n self.unixtext\n if self.unixtext[:2] != \"\\001\\n\"\n else self.unixtext[2:]\n )\n\n lines = txt.split(\"\\n\")\n # There may be an AWIPSID in line 3 or silly aviation control char\n pos = 3 if len(lines[2]) < 10 or lines[2].startswith(\"\\x1e\") else 2\n meat = \"\".join(lines[pos:])\n for report in meat.split(\"=\"):\n if report.strip() == \"\":\n continue\n res = self.process_pirep(\" \".join(report.strip().split()))\n if res is not None:\n self.reports.append(res)", "def DocParser(accession_list):\n\thandle = open(\"prosite.txt\")\n\trecords = Prodoc.parse(handle)\n\trecord_text_list=[]\n\ttry:\n\t\t# Loop to go through prosite.doc entries. \n\t\tfor record in records:\n\t\t\tif record.accession in accession_list: # If an entry is in the list of the already found domains\n\t\t\t\trecord_text_list.append(record.text) # Save it\n\texcept:\n\t\tprint(False)\n\treturn record_text_list", "def parser(in_file,verbose):\n\n # perform high-level parsing into sections\n res_file_lines = [row for row in in_file]\n tokenized_lines = tools.split_and_prune_lines(res_file_lines)\n sections = tools.extracted_sections(tokenized_lines)\n\n # split out common sections and subsequent groups of results sections\n def is_results_sentinel_section(section):\n \"\"\" Identify mesh point separator \"pseudo-section\" header.\n\n (Helper function for res_parser_spncci.)\n \"\"\"\n (section_name,_) = section\n return (section_name == \"RESULTS\")\n\n grouped_sections = tools.split_when(is_results_sentinel_section,sections)\n common_sections = list(next(grouped_sections))\n grouped_results_sections = [list(section_group) for section_group in grouped_sections]\n\n if (verbose):\n print(\"Section counts\")\n print(\" Common sections:\",len(common_sections))\n for results_section_group in grouped_results_sections:\n print(\" Results sections (by group):\",len(results_section_group))\n\n # generate results objects by mesh point\n mesh_data = []\n if (grouped_results_sections):\n # there are results sections: actual mesh, not counting run\n for results_section_group in grouped_results_sections:\n full_section_group = common_sections + results_section_group\n results = spncci_results_data.SpNCCIResultsData()\n parse_mesh_point(results,full_section_group,section_handlers)\n mesh_data.append(results)\n else:\n # no results sections: counting run\n results = spncci_results_data.SpNCCIResultsData()\n parse_mesh_point(results,common_sections,section_handlers)\n mesh_data.append(results)\n\n return mesh_data", "def parse_CPS_file(lines):\r\n\r\n chimeras = []\r\n for line in lines:\r\n record = line.split()\r\n try:\r\n id = record[1]\r\n parent1 = record[2]\r\n parent2 = record[3]\r\n verdict = record[10]\r\n except IndexError:\r\n raise ValueError(\"Error parsing ChimeraSlayer CPS file.\")\r\n if verdict == \"YES\":\r\n chimeras.append((id, [parent1, parent2]))\r\n return chimeras" ]
[ "0.5723123", "0.5713342", "0.56971914", "0.5693488", "0.5667087", "0.56034213", "0.5589402", "0.5556643", "0.55423754", "0.55163777", "0.54881227", "0.5454614", "0.5442599", "0.5424246", "0.542282", "0.5417614", "0.5411043", "0.5402362", "0.539387", "0.53841794", "0.5367232", "0.53455377", "0.5341027", "0.52884483", "0.52871186", "0.5276258", "0.52729666", "0.5271905", "0.5271537", "0.52621686" ]
0.5888745
0
Create an instance by evaluating all constraints in the problem. The ``problem`` is a DnaChisel DnaOptimizationProblem.
def from_problem(problem, autopass_constraints=True): def evaluate(constraint): if ( autopass_constraints and constraint.enforced_by_nucleotide_restrictions ): return SpecEvaluation( constraint, problem, score=1, locations=[], message="Enforced by nucleotides restrictions", ) else: return constraint.evaluate(problem) return ProblemConstraintsEvaluations( [evaluate(constraint) for constraint in problem.constraints], problem=problem, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def phase_I_problem_from(cls, problem: LinearConstraintsProblem) -> LinearProblem:\n n = problem.n\n m = len(problem.constraints)\n\n e_x = np.zeros(shape=n)\n e_z = np.ones(shape=m)\n e = np.concatenate((e_x, e_z))\n\n x0 = np.zeros(n)\n # we dont need to look at standardized b as abs(b) will be same even after standardizing\n z0 = np.abs(problem.b)\n xz0 = np.concatenate((x0, z0))\n\n constraints = []\n for i, constraint in enumerate(problem.constraints):\n E_i = np.eye(m)[i]\n if constraint.c.b < 0:\n E_i = -E_i\n\n A_i = constraint.c.a\n\n a = np.concatenate((A_i, E_i))\n\n constraints.append(LinearConstraint(\n c=LinearCallable(a=a, b=constraint.c.b),\n equation_type=constraint.equation_type))\n\n return LinearProblem(c=e, constraints=constraints, x0=xz0, n=n+m, solution=None)", "def apply(self, problem):\n data = dict()\n var = problem.x\n inv_data = {self.VAR_ID: var.id,\n 'suc_slacks': [], 'y_slacks': [], 'snx_slacks': [], 'psd_dims': []}\n\n # Get integrality constraint information\n data[s.BOOL_IDX] = [int(t[0]) for t in var.boolean_idx]\n data[s.INT_IDX] = [int(t[0]) for t in var.integer_idx]\n inv_data['integer_variables'] = len(data[s.BOOL_IDX]) + len(data[s.INT_IDX]) > 0\n\n if not problem.formatted:\n problem = self.format_constraints(problem,\n MOSEK.EXP_CONE_ORDER)\n data[s.PARAM_PROB] = problem\n constr_map = problem.constr_map\n data[s.DIMS] = problem.cone_dims\n\n inv_data['constraints'] = problem.constraints\n\n # A is ordered as [Zero, NonNeg, SOC, PSD, EXP]\n c, d, A, b = problem.apply_parameters()\n A = -A\n data[s.C] = c.ravel()\n inv_data['n0'] = len(data[s.C])\n data[s.OBJ_OFFSET] = float(d)\n inv_data[s.OBJ_OFFSET] = float(d)\n\n Gs = []\n hs = []\n # Linear inequalities\n num_linear_equalities = len(constr_map[Zero])\n num_linear_inequalities = len(constr_map[NonNeg])\n leq_dim = data[s.DIMS][s.LEQ_DIM]\n eq_dim = data[s.DIMS][s.EQ_DIM]\n if num_linear_inequalities > 0:\n # G, h : G * z <= h\n offset = num_linear_equalities\n for c in problem.constraints[offset:offset + num_linear_inequalities]:\n assert(isinstance(c, NonNeg))\n inv_data['suc_slacks'].append((c.id, c.size))\n row_offset = eq_dim\n Gs.append(A[row_offset:row_offset + leq_dim])\n hs.append(b[row_offset:row_offset + leq_dim])\n\n # Linear equations\n if num_linear_equalities > 0:\n for c in problem.constraints[:num_linear_equalities]:\n assert(isinstance(c, Zero))\n inv_data['y_slacks'].append((c.id, c.size))\n Gs.append(A[:eq_dim])\n hs.append(b[:eq_dim])\n\n # Second order cone\n num_soc = len(constr_map[SOC])\n soc_dim = sum(data[s.DIMS][s.SOC_DIM])\n if num_soc > 0:\n offset = num_linear_inequalities + num_linear_equalities\n for c in problem.constraints[offset:offset + num_soc]:\n assert(isinstance(c, SOC))\n inv_data['snx_slacks'].append((c.id, c.size))\n row_offset = leq_dim + eq_dim\n Gs.append(A[row_offset:row_offset + soc_dim])\n hs.append(b[row_offset:row_offset + soc_dim])\n\n # Exponential cone\n num_exp = len(constr_map[ExpCone])\n if num_exp > 0:\n # G * z <=_{EXP} h.\n len_exp = 0\n for c in problem.constraints[-num_exp:]:\n assert(isinstance(c, ExpCone))\n inv_data['snx_slacks'].append((c.id, 3 * c.num_cones()))\n len_exp += 3 * c.num_cones()\n Gs.append(A[-len_exp:])\n hs.append(b[-len_exp:])\n\n # PSD constraints\n num_psd = len(constr_map[PSD])\n psd_dim = sum([dim ** 2 for dim in data[s.DIMS][s.PSD_DIM]])\n if num_psd > 0:\n offset = num_linear_inequalities + num_linear_equalities + num_soc\n for c in problem.constraints[offset:offset + num_psd]:\n assert(isinstance(c, PSD))\n inv_data['psd_dims'].append((c.id, c.expr.shape[0]))\n row_offset = leq_dim + eq_dim + soc_dim\n Gs.append(A[row_offset:row_offset + psd_dim])\n hs.append(b[row_offset:row_offset + psd_dim])\n\n if Gs:\n data[s.G] = sp.sparse.vstack(tuple(Gs))\n else:\n data[s.G] = sp.sparse.csc_matrix((0, 0))\n if hs:\n data[s.H] = np.hstack(tuple(hs))\n else:\n data[s.H] = np.array([])\n inv_data['is_LP'] = (len(constr_map[PSD]) +\n len(constr_map[ExpCone]) +\n len(constr_map[SOC])) == 0\n\n return data, inv_data", "def __init__(self, problem):\n # Problem: The problem object from parsing\n self.problem = problem\n\n #: *dict*\n #: Container cached residual squared evaluation (cost function)\n self.cache_cost_x = {'params': None, 'value': None}", "def run(self, problem):\n\n self.pyopt_solution = None\n rel = problem.root._probdata.relevance\n\n # Metadata Setup\n self.metadata = create_local_meta(None, self.options['optimizer'])\n self.iter_count = 0\n update_local_meta(self.metadata, (self.iter_count,))\n\n # Initial Run\n with problem.root._dircontext:\n problem.root.solve_nonlinear(metadata=self.metadata)\n\n opt_prob = Optimization(self.options['title'], self._objfunc)\n\n # Add all parameters\n param_meta = self.get_desvar_metadata()\n self.indep_list = indep_list = list(param_meta)\n param_vals = self.get_desvars()\n\n for name, meta in iteritems(param_meta):\n opt_prob.addVarGroup(name, meta['size'], type='c',\n value=param_vals[name],\n lower=meta['lower'], upper=meta['upper'])\n\n opt_prob.finalizeDesignVariables()\n\n # Figure out parameter subsparsity for paramcomp index connections.\n # sub_param_conns is empty unless there are some index conns.\n # full_param_conns gets filled with the connections to the entire\n # parameter so that those params can be filtered out of the sparse\n # set if the full path is also relevant\n sub_param_conns = {}\n full_param_conns = {}\n for name in indep_list:\n pathname = problem.root.unknowns.metadata(name)['pathname']\n sub_param_conns[name] = {}\n full_param_conns[name] = set()\n for target, info in iteritems(problem.root.connections):\n src, indices = info\n if src == pathname:\n if indices is not None:\n # Need to map the connection indices onto the desvar\n # indices if both are declared.\n dv_idx = param_meta[name].get('indices')\n indices = set(indices)\n if dv_idx is not None:\n indices.intersection_update(dv_idx)\n ldv_idx = list(dv_idx)\n mapped_idx = [ldv_idx.index(item) for item in indices]\n sub_param_conns[name][target] = mapped_idx\n else:\n sub_param_conns[name][target] = indices\n else:\n full_param_conns[name].add(target)\n\n # Add all objectives\n objs = self.get_objectives()\n self.quantities = list(objs)\n self.sparsity = OrderedDict()\n self.sub_sparsity = OrderedDict()\n for name in objs:\n opt_prob.addObj(name)\n self.sparsity[name] = self.indep_list\n\n # Calculate and save gradient for any linear constraints.\n lcons = self.get_constraints(lintype='linear').keys()\n if len(lcons) > 0:\n self.lin_jacs = problem.calc_gradient(indep_list, lcons,\n return_format='dict')\n #print(\"Linear Gradient\")\n #print(self.lin_jacs)\n\n # Add all equality constraints\n econs = self.get_constraints(ctype='eq', lintype='nonlinear')\n con_meta = self.get_constraint_metadata()\n self.quantities += list(econs)\n\n self.active_tols = {}\n for name in self.get_constraints(ctype='eq'):\n meta = con_meta[name]\n size = meta['size']\n lower = upper = meta['equals']\n\n # Sparsify Jacobian via relevance\n rels = rel.relevant[name]\n wrt = rels.intersection(indep_list)\n self.sparsity[name] = wrt\n\n if meta['linear']:\n opt_prob.addConGroup(name, size, lower=lower, upper=upper,\n linear=True, wrt=wrt,\n jac=self.lin_jacs[name])\n else:\n\n jac = self._build_sparse(name, wrt, size, param_vals,\n sub_param_conns, full_param_conns, rels)\n opt_prob.addConGroup(name, size, lower=lower, upper=upper,\n wrt=wrt, jac=jac)\n\n active_tol = meta.get('active_tol')\n if active_tol:\n self.active_tols[name] = active_tol\n\n # Add all inequality constraints\n incons = self.get_constraints(ctype='ineq', lintype='nonlinear')\n self.quantities += list(incons)\n\n for name in self.get_constraints(ctype='ineq'):\n meta = con_meta[name]\n size = meta['size']\n\n # Bounds - double sided is supported\n lower = meta['lower']\n upper = meta['upper']\n\n # Sparsify Jacobian via relevance\n rels = rel.relevant[name]\n wrt = rels.intersection(indep_list)\n self.sparsity[name] = wrt\n\n if meta['linear']:\n opt_prob.addConGroup(name, size, upper=upper, lower=lower,\n linear=True, wrt=wrt,\n jac=self.lin_jacs[name])\n else:\n\n jac = self._build_sparse(name, wrt, size, param_vals,\n sub_param_conns, full_param_conns, rels)\n opt_prob.addConGroup(name, size, upper=upper, lower=lower,\n wrt=wrt, jac=jac)\n\n active_tol = meta.get('active_tol')\n if active_tol is not None:\n self.active_tols[name] = active_tol\n\n # Instantiate the requested optimizer\n optimizer = self.options['optimizer']\n try:\n _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n opt = getattr(_tmp, optimizer)()\n except ImportError:\n msg = \"Optimizer %s is not available in this installation.\" % \\\n optimizer\n raise ImportError(msg)\n\n #Set optimization options\n for option, value in self.opt_settings.items():\n opt.setOption(option, value)\n\n self._problem = problem\n self.opt_prob = opt_prob\n\n # Execute the optimization problem\n if self.options['gradient method'] == 'pyopt_fd':\n\n # Use pyOpt's internal finite difference\n fd_step = problem.root.deriv_options['step_size']\n sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file)\n\n elif self.options['gradient method'] == 'snopt_fd':\n if self.options['optimizer']=='SNOPT':\n\n # Use SNOPT's internal finite difference\n fd_step = problem.root.deriv_options['step_size']\n sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file)\n\n else:\n msg = \"SNOPT's internal finite difference can only be used with SNOPT\"\n raise Exception(msg)\n else:\n\n # Use OpenMDAO's differentiator for the gradient\n sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file)\n\n self._problem = None\n\n # Print results\n if self.options['print_results']:\n print(sol)\n\n # Pull optimal parameters back into framework and re-run, so that\n # framework is left in the right final state\n dv_dict = sol.getDVs()\n for name in indep_list:\n val = dv_dict[name]\n self.set_desvar(name, val)\n\n with self.root._dircontext:\n self.root.solve_nonlinear(metadata=self.metadata)\n\n # Save the most recent solution.\n self.pyopt_solution = sol\n try:\n exit_status = sol.optInform['value']\n self.exit_flag = 1\n if exit_status > 2: # bad\n self.exit_flag = 0\n except KeyError: #nothing is here, so something bad happened!\n self.exit_flag = 0", "def __init__(self, problem, options, tr_method):\n\n # User-defined optimal-control problem\n self.problem = problem\n\n # General options\n self.options = options\n\n # Transcription method\n self.tr_method = tr_method\n\n # Construction of the lower and upper constraints boundaries\n self.low, self.upp = self.build_constraints_boundaries()", "def run_problem(problem):\n\n problem = process_problem(problem) # preserves the original problem dict\n\n xin = problem.get('init_guess', lin_init_guess)(problem)\n\n algorithm = {\n 'method': 'SLSQP',\n 'options': {'disp': True, 'ftol': 1e-02, 'maxiter': 1000}\n }\n\n constr = []\n constr += [{'type': 'eq', 'fun': lambda x: eqconstr(x, problem)}]\n if not problem['use_log_bar']:\n constr += [{'type': 'ineq', 'fun': lambda x: ineqconstr(x, problem)}]\n\n bnds = variable_bounds(problem) if not problem['use_log_bar'] else None\n\n t = time()\n # noinspection PyTypeChecker\n res = minimize(cost_fun, xin, args=problem, method=algorithm['method'], bounds=bnds, constraints=constr, options=algorithm['options'])\n elapsed_time = time() - t\n x_out, t_final = matrify(res.x, problem)\n return x_out, t_final, res.fun, elapsed_time", "def __init__(self, initial, goal=(3, 3, 0, 0, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)", "def process_problem(problem_orig):\n problem = problem_orig.copy()\n problem.setdefault('state_bounds', None)\n problem.setdefault('input_bounds', None)\n problem.setdefault('num_inputs', 0)\n problem.setdefault('use_log_bar', False)\n problem.setdefault('use_sigma', True)\n problem.setdefault('T', 0)\n problem.setdefault('N', 20)\n problem = {**problem, **{\n # common parameters\n 'DiffMat': bern.derivelevmat(problem['N'], 1),\n 'elev_mat': bern.degrelevmat(problem['N'], problem['N'] * 10),\n 'EvalMat': bern.evalspacemat(problem['N'], problem['T'] if problem['T'] != 0 else 1,\n (0, problem['T'] if problem['T'] != 0 else 1, 1000)),\n 'num_states': problem['xi'].shape[1],\n 'Nv': problem['xi'].shape[0],\n }}\n problem.setdefault('obstacles_circles', [])\n problem.setdefault('obstacles_polygons', [])\n problem.setdefault('min_dist_obs', 0)\n problem.setdefault('min_dist_int_veh', .95)\n # noinspection PyTypeChecker\n problem = {**problem, **{\n 'obstacles':\n [TOLCircle(c[:-1], c[-1], problem['elev_mat'], problem['min_dist_obs'])\n for c in problem['obstacles_circles']] +\n [TOLPolygon(m) for m in problem['obstacles_polygons']]\n }}\n problem.setdefault('plot_control_points', False)\n problem.setdefault('recover_xy', None)\n problem.setdefault('boat_size', np.linalg.norm(problem['xi'][:2, 0]-problem['xf'][:2, 0])/13)\n\n return problem", "def get_problem():\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('Hannibal_HPAdemo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n #~~~~~!!!\n #problem.quantity('terrain3','(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/m^2') #Integral conversion factor\n problem.constant('V',1,'m/s') #Vehicle speed\n problem.constant('elev',1,'m') #Initial Elevation\n\n #Unit scaling\n problem.scale.unit('m',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n #problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=8)\n problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=15, verbose = True, cached = False)\n\n #Initial Guess (Classic test example [4.9,0.4])\n problem.guess.setup('auto',start=[9.0,0.5], costate_guess=[0.0,-0.1]) #City A\n #problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A\n\n #Add Continuation Steps (Classic test example [7.2,8.5]) [8, 4.5]\n problem.steps.add_step(strategy='HPA',hweight=0.9) \\\n .terminal('x', 3.0, 10) \\\n .terminal('y', 9.5, 10) \\\n # .const('w', 0.9, 2, confined=True)\n\n #problem.steps.add_step(strategy='manual').num_cases(10) \\\n # .terminal('x', 3.0) \\\n # .terminal('y', 9.5) \\\n\n #problem.steps.add_step().num_cases(30) \\\n # .const('w',0.99) #Final Terrain weighting factor\n\n\n return problem", "def __init__(self, variables, domains, neighbors, constraints, C):\r\n super().__init__(())\r\n variables = variables or list(domains.keys())\r\n self.variables = variables\r\n self.domains = domains\r\n self.neighbors = neighbors\r\n self.constraints = constraints\r\n self.curr_domains = None\r\n # visited nodes\r\n self.nassigns = 0\r\n self.conflict_set = {} #dictionary which stores the conflict set of each variable for fc - cbj\r\n self.prev_conflict_set = [] # we store the conflict set from the variable that causes dead-end\r\n self.deadend = None # we save the dead end variable in fc - cbj\r\n # initializating the conflict set array\r\n for x in self.variables:\r\n self.conflict_set[x]=[]\r\n # --------------------------\r\n # keep track of total checks for each algo\r\n self.totchecks=0\r\n # dict for later use in dom / wdeg heuristic\r\n # we initializating weights from constraints to 1\r\n self.weight = {}\r\n for each in C.keys():\r\n self.weight[(each[0],each[1])] = 1", "def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)", "def apply(self, problem):\n data, inv_data = super(CVXOPT, self).apply(problem)\n # Convert A, b, G, h, c to CVXOPT matrices.\n if data[s.A] is not None:\n data[s.A] = intf.sparse2cvxopt(data[s.A])\n if data[s.G] is not None:\n data[s.G] = intf.sparse2cvxopt(data[s.G])\n if data[s.B] is not None:\n data[s.B] = intf.dense2cvxopt(data[s.B])\n if data[s.H] is not None:\n data[s.H] = intf.dense2cvxopt(data[s.H])\n if data[s.C] is not None:\n data[s.C] = intf.dense2cvxopt(data[s.C])\n return data, inv_data", "def __init__(self, optimization: Optimization, error_on_fail: bool = False):\n\n ## Instance of the optimization problem.\n self.opt = optimization\n\n ## Initial guess for the optimization problem (set using reset_initial_seed).\n self.x0 = cs.DM.zeros(optimization.nx)\n\n ## Parameter vector.\n self.p = cs.DM.zeros(optimization.np)\n\n ## Parameter dictionary.\n self._p_dict = {}\n\n ## When True, after solve() is called, if the solver did not converge then a RuntimeError is thrown.\n self._error_on_fail = error_on_fail\n\n ## Solution container\n self._solution = None", "def getProblem(self):\n return ProblemInstance(nDays=self.nDays,\n nSC=self.nSC,\n nGS=self.nGS,\n timewindows=self.timewindows,\n requirements=self.requirements)", "def minimize(problem,\n algorithm,\n termination=None,\n **kwargs):\n\n # create a copy of the algorithm object to ensure no side-effects\n algorithm = copy.deepcopy(algorithm)\n\n # set the termination criterion and store it in the algorithm object\n if termination is None:\n termination = None\n elif not isinstance(termination, Termination):\n if isinstance(termination, str):\n termination = get_termination(termination)\n else:\n termination = get_termination(*termination)\n\n # initialize the method given a problem\n algorithm.initialize(problem,\n termination=termination,\n **kwargs)\n\n if algorithm.termination is None:\n if problem.n_obj > 1:\n algorithm.termination = MultiObjectiveDefaultTermination()\n else:\n algorithm.termination = SingleObjectiveDefaultTermination()\n\n # actually execute the algorithm\n res = algorithm.solve()\n\n # store the copied algorithm in the result object\n res.algorithm = algorithm\n\n return res", "def initialize(self, problem, problem_data):\n self.problem_data = problem_data\n\n # Generate scaling functions for states, costates\n # constants, constraints, lagrange multipliers\n\n # Convert units to symbols\n self.units_sym = symbols(list(self.units))\n\n # Growing list TODO: Put inside utils\n # TODO: Automate the following sections\n\n # Scaling functions for constants\n # self.scale_func['const'] = {str(const): self.create_scale_fn(const.unit)\n # for const in problem.constants()}\n self.scale_func['const'] = {str(const): lambdify(self.units_sym,sympify2(const.unit))\n for const in problem.constants()}\n\n # Cost function used for scaling costates\n cost_used = [key for (key,val) in problem.cost.items() if val.expr is not '0']\n if len(cost_used) < 1:\n raise ValueError('At least one cost function must be specified as nonzero!')\n cost_unit = problem.cost[cost_used[0]].unit\n\n # Scaling functions for states & costates\n self.scale_func['states'] = {}\n self.scale_func['states'] = {str(state): self.create_scale_fn(state.unit)\n for state in problem.states()}\n self.scale_func['states'].update({ state.make_costate():\n self.create_scale_fn('('+cost_unit+')/('+state.unit+')')\n for state in problem.states()})\n\n # Scaling function for the independent variable\n # TODO: Fix hardcoding\n # self.scale_func['independent_var'] = lambdify(units_sym,sympify2(problem.indep_var().unit))\n self.scale_func['states']['tf'] = self.create_scale_fn(problem.indep_var().unit)\n\n self.scale_func['initial'] = self.scale_func['states']\n self.scale_func['terminal'] = self.scale_func['states']\n\n # Scaling functions for constraint multipliers and other parameters\n self.scale_func['parameters'] = {}\n indices = {}\n for c in problem.constraints():\n if c.type not in indices:\n indices[c.type] = 1 # initialize multiplier index\n\n mul_var = c.make_multiplier(indices[c.type])\n mul_unit = '('+cost_unit+')/('+c.unit+')'\n self.scale_func['parameters'][mul_var] = self.create_scale_fn(mul_unit)\n indices[c.type] += 1 # increment multiplier index", "def __init__(self):\n self.points = []\n names = ['x1', 'x2', 'x3']\n lows = [-5, -5, -5]\n highs = [5, 5, 5]\n # TODO 2: Use names, lows and highs defined above to code up decision\n # and objective metadata for POM3.\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"f1\", True), Objective(\"f2\", True)]\n Problem.__init__(self, decisions, objectives)", "def __init__(self):\n self.points = []\n names = ['x1']\n lows = [10**-5]\n highs = [10**5]\n # TODO 2: Use names, lows and highs defined above to code up decision\n # and objective metadata for POM3.\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"f1\", True), Objective(\"f2\", True)]\n Problem.__init__(self, decisions, objectives)", "def evaluate(self, problem):\n score, nonoptimal_indices = self.codon_usage_matching_stats(problem)\n locations = self.codons_indices_to_locations(nonoptimal_indices)\n np.random.shuffle(locations)\n return SpecEvaluation(\n self,\n problem,\n score=score,\n locations=locations,\n message=\"Codon opt. on window %s scored %.02E\"\n % (self.location, score),\n )", "def __init__(self):\n self.points = []\n names = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6']\n lows = [0, 0, 1, 0, 1, 0]\n highs = [10, 10, 5, 6, 5, 10]\n # TODO 2: Use names, lows and highs defined above to code up decision\n # and objective metadata for POM3.\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"f1\", True), Objective(\"f2\", True)]\n Problem.__init__(self, decisions, objectives)", "def __init__(self, objective_function, constraint, dumper=None):\n self.total_iterations = 0\n self.maximum_iterations = 3000\n self.precision = np.sqrt(np.finfo(float).eps)\n self.constraint = constraint\n self.objective_fun = objective_function\n if dumper is None:\n self.use_dumper = False\n else:\n self.use_dumper = True\n self.dumper = dumper\n\n # Used to let the all parts of the solver be aware of the active constraints\n self.active_constraints_index = 0\n self.active_constraints_set = False\n\n # Used for exit information\n self.convergence_reached_tag = 1\n self.maximum_iterations_reached_tag = 2\n self.unknown_exit = 99\n return", "def create_another_example():\n G = np.array([[2, 0], [0, 2]])\n c = np.array([-4, -4])\n\n a1 = np.array([1, 1], dtype=np.float64)\n a2 = np.array([1, -2], dtype=np.float64)\n a3 = np.array([-1, -1], dtype=np.float64)\n a4 = np.array([-2, 1], dtype=np.float64)\n constraints = np.array([\n LinearConstraint(LinearCallable(a=a1, b=2), equation_type=EquationType.LE),\n LinearConstraint(LinearCallable(a=a2, b=2), equation_type=EquationType.LE),\n LinearConstraint(LinearCallable(a=a3, b=1), equation_type=EquationType.LE),\n LinearConstraint(LinearCallable(a=a4, b=2), equation_type=EquationType.LE),\n ])\n\n solution = np.array([1, 1], dtype=np.float64)\n\n return QuadraticProblem(G=G, c=c, n=2, constraints=constraints, x0=None, solution=solution)", "def __init__(self, problem):\n self._problem = problem\n self._sorted_savings = None\n self._arg_sorted_savings = None", "def problem_fn(self, params: MathyEnvProblemArgs) -> MathyEnvProblem:\n if params.difficulty == MathyEnvDifficulty.easy:\n if rand_bool(50):\n text, complexity = gen_binomial_times_monomial(min_vars=2, max_vars=3)\n else:\n text, complexity = gen_binomial_times_binomial(\n min_vars=2,\n max_vars=3,\n powers_probability=uniform(0.1, 0.4),\n like_variables_probability=uniform(0.3, 0.7),\n )\n elif params.difficulty == MathyEnvDifficulty.normal:\n text, complexity = gen_binomial_times_binomial(\n min_vars=2,\n max_vars=2,\n powers_probability=uniform(0.2, 0.6),\n like_variables_probability=uniform(0.2, 0.5),\n )\n elif params.difficulty == MathyEnvDifficulty.hard:\n text, complexity = gen_binomial_times_binomial(\n min_vars=2,\n max_vars=3,\n simple_variables=False,\n powers_probability=uniform(0.4, 0.8),\n like_variables_probability=uniform(0.1, 0.3),\n )\n complexity += 2\n else:\n raise ValueError(f\"Unknown difficulty: {params.difficulty}\")\n return MathyEnvProblem(text, complexity, self.get_env_namespace())", "def __init__(self, population=25, initSampling='lhc', fracMutation=0.2, fracElite=0.2, fracLevy=1.0, alpha=0.5, gamma=1, n=1, scalingFactor=10.0, penalty=0.0, maxGens=20000, maxFevals=200000, convTol=1e-06, stallLimit=10000, optConvTol=0.01, **kwargs):\n ProblemParameters_multi.__init__(self, **kwargs)\n self.population = population\n self.initSampling = initSampling\n self.fracMutation = fracMutation\n assert self.fracMutation >= 0 and self.fracMutation <= 1, 'The probability of discovery must exist on (0,1]'\n self.fracElite = fracElite\n assert self.fracElite >= 0 and self.fracElite <= 1, 'The elitism fraction must exist on (0,1]'\n self.fracLevy = fracLevy\n assert self.fracLevy >= 0 and self.fracLevy <= 1, 'The probability that a Levy flight is performed must exist on (0,1]'\n self.alpha = alpha\n self.gamma = gamma\n self.n = n\n self.scalingFactor = scalingFactor\n self.penalty = penalty\n self.maxGens = maxGens\n self.maxFevals = maxFevals\n self.convTol = convTol\n self.stallLimit = stallLimit\n self.optConvTol = optConvTol", "def eqconstr(x, problem):\n x, t_final = matrify(x, problem)\n return np.concatenate([problem['dynamics'](x[:, :, i], t_final, problem) for i in range(problem['Nv'])])", "def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file", "def get_problem():\n\n problem = beluga.optim.Problem('Track_demo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','k') \\\n .state('y','V*sin(hdg)','k') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define cost functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','k') \\\n .initial('y-y_0','k') \\\n .terminal('x-x_f','k') \\\n .terminal('y-y_f','k')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/k^2') #Integral conversion factor\n problem.constant('V',1,'k/s') #Vehicle speed\n problem.constant('elev',0.001,'k') #Units for the elevation\n\n #Unit scaling\n problem.scale.unit('k',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=50, verbose = True, cached = False, number_arcs=8)\n #problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=50, verbose = True, cached = False)\n\n #Initial Guess\n problem.guess.setup('auto',start=[16,10], costate_guess=[0.0,-0.1])\n\n #Add continuation steps\n problem.steps.add_step(strategy='HPA') \\\n .terminal('x', 180, 50) \\\n .terminal('y', 98, 50)\n\n return problem", "def generate_connectivity_constraint_all(problem):\n\n if problem.num_vars == None:\n problem.compute_num_var()\n\n ret = Constraint()\n\n # Iterator over all (v, t) subsets in the graph\n for b, b_r in enumerate(problem.src):\n # Convert each set in the iterator to (v,t) format\n add_S = map(\n lambda S: list(map(problem.get_time_augmented_n_t, S)),\n problem.powerset_exclude_agent(b_r),\n )\n ret &= generate_connectivity_constraint(problem, [b], add_S)\n\n return ret", "def __init__(self, initial,permutation=None):\n self.medics = initial[\"medics\"]\n self.police = initial[\"police\"]\n self.initial = initial['observations']\n self.obser_num = len(initial['observations'])\n self.row = len(initial['observations'][0])\n self.col = len(initial['observations'][0][0])\n self.init_matrix(self.initial,permutation)\n Problem.__init__(self, self.initial,permutation)" ]
[ "0.6295126", "0.60471386", "0.5886922", "0.5765422", "0.5730006", "0.5707503", "0.565367", "0.5576092", "0.5528963", "0.54968774", "0.5491965", "0.54807454", "0.5393297", "0.53774476", "0.53702635", "0.5276206", "0.52658254", "0.5262188", "0.5253071", "0.5241584", "0.52301854", "0.52025175", "0.51965857", "0.51920533", "0.5181311", "0.5174667", "0.51733434", "0.5167724", "0.5150302", "0.514791" ]
0.6974452
0
Return color 60f979 if evaluation.passes else f96c60.
def success_failure_color(self, evaluation): return "#60f979" if evaluation.passes else "#f96c60"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"", "def color(self):\n return 0x2f3136", "def color_negative_red_positive_green(val):\n if val < 0:\n color = 'red'\n elif val > 0:\n color = 'green'\n else:\n color = 'black'\n\n return 'color: %s' % color", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'", "def color_negative_red(value):\n\n if value == 1:\n color = 'red'\n else:\n color = 'black'\n\n return 'color: %s' % color", "def get_color(activePerMillion):\n activePer100k = activePerMillion / 10.0\n if activePer100k < 100:\n return \"#aaf0d1\"\n elif activePer100k < 500:\n return \"#a3f7bf\"\n elif activePer100k < 1000:\n return \"#90EE90\"\n elif activePer100k < 1500:\n return \"#00ff7f\"\n elif activePer100k < 2000:\n return \"#77dd77\"\n elif activePer100k < 2500:\n return \"#32cd32\"\n elif activePer100k < 3000:\n return \"#4cbb17\"\n elif activePer100k < 3500:\n return \"#228b22\"\n elif activePer100k < 4000:\n return \"#355e3b \"\n else:\n return \"#006400\"", "def get_color(self):\n return \"yellow\"", "def test_color(self):\n self._calibration_test(\"color_full\")", "def getColor(self):\n return self._l[2]", "def color(piece):\n return Color.BLACK if piece in {Piece.BP, Piece.BN, Piece.BB, Piece.BR, Piece.BQ, Piece.BK} else Color.WHITE", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3", "def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c", "def my_color_function(field):\n if field > 100000000:\n return \"#ff0000\"\n else:\n return \"#008000\"", "def get_color(self) -> str:\r\n return self.color", "def get_color(self):\r\n return self.__color", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")", "def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")" ]
[ "0.7141954", "0.6812762", "0.6681384", "0.66602385", "0.66393065", "0.65938675", "0.65505564", "0.64515567", "0.64472324", "0.6438781", "0.6421074", "0.6412371", "0.6412371", "0.6412371", "0.6412371", "0.64005363", "0.6387426", "0.63748986", "0.6365186", "0.6347126", "0.63348055", "0.63348055", "0.63348055", "0.63348055", "0.63348055", "0.63348055", "0.63348055", "0.63348055", "0.63348055", "0.63348055" ]
0.722408
0
Return a global SUCCESS or FAILURE message for all evaluations.
def text_summary_message(self): failed = [e for e in self.evaluations if not e.passes] if failed == []: return "SUCCESS - all constraints evaluations pass" else: return "FAILURE: %d constraints evaluations failed" % len(failed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def success_failure_color(self, evaluation):\n return \"#60f979\" if evaluation.passes else \"#f96c60\"", "def get_error_message(self):\n msg = 'Test case: ' + self.benchmark + '.yaml + ' + self.producer + '.yaml failed. '\n info = ''\n if not self.directory:\n info = 'No results directory found. The benchmark probably failed'\n elif not self.reports:\n info = 'No results report generated. The results output format is probably wrong'\n elif not self.test_passed:\n info = 'Recorded messages percentage is lower than expected '\n return msg + info", "def generateFinalResult(self):\n if self.__testResult == 'FAIL':\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'PASS':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'NONE':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY) \n self.__testResult = 'PASS'\n #else:\n total_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab))\n pass_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab))\n fail_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab))\n conditional_chk_flag = int(TestScriptSymbolTable.get_value_from_sym_tab(\"conditional_chk_flag\", TestScriptSymbolTable.test_result_tab))\n num_of_pass_required = int(TestScriptSymbolTable.get_value_from_sym_tab(\"num_of_pass_required\", TestScriptSymbolTable.test_result_tab))\n \n if total_count >= 1:\n if conditional_chk_flag == 1:\n if num_of_pass_required <= pass_count:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n if fail_count > 0:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n if GlobalConfigFiles.curr_tc_name != \"\":\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n logging.debug(\"\\n TEST COMPLETED without FINAL RESULT...\")\n\n self.__testResult = 'FAIL'\n\n self.tmsPacket.TestResult = self.__testResult\n if GlobalConfigFiles.curr_tc_name != \"\":\n logging.info(\"\\n FINAL TEST RESULT ---> %15s\", self.__testResult)\n logging.info(' END: TEST CASE [%s]', GlobalConfigFiles.curr_tc_name)\n\n Util.set_color(Util.FOREGROUND_WHITE)\n GlobalConfigFiles.test_result = self.__testResult\n\n self.tmsPacket.TimeStamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())\n if GlobalConfigFiles.curr_tc_name != \"\":\n self.tmsPacket.writeTMSJson()\n\n return", "def success_message(cls):\n return f'Successfully performed \"{cls.display_name.lower()}\"'", "def result_summary(self):\r\n summary = ['Ran %d commands to test %d scripts. %d of these commands '\r\n 'failed and %d scripts could not be tested due to errors.' %\r\n (self.total_commands, self.total_scripts,\r\n self._num_failures(), self._num_script_errors())]\r\n\r\n if self._num_failures() > 0:\r\n summary.append('Failed scripts were: %s' %\r\n ' '.join(self._failed_scripts()))\r\n\r\n for error_info in self.script_errors.values():\r\n if len(error_info[0]) > 0:\r\n summary.append(self._format_script_error_summary(\r\n error_info[0], error_info[1]))\r\n\r\n if self.warnings:\r\n summary.append('Warnings:')\r\n for warning in self.warnings:\r\n summary.append(' ' + warning)\r\n\r\n return '\\n'.join(summary)", "def all_results(self):\n res = [(True, result) for result in self.successes]\n res.extend([(False, result) for result in self.failures])\n return res", "def obj_successfail(succeeded):\n if succeeded:\n return \"<span class='objective success'>Success</span>\"\n else:\n return \"<span class='objective failure'>Failure</span>\"", "def is_successful(self):\n for item in self.summary:\n if item['task_status'] is False:\n return testcase.TestCase.EX_TESTCASE_FAILED\n\n return super().is_successful()", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def get_failure_message(self, value: T) -> str:\n try:\n return f\"'{value}' does not satisfy '{self.test.__name__}'\"\n except AttributeError:\n return str(value)", "def compute(self, failures):\n pass", "def evaluateCommand(self, cmd):\n\n if cmd.rc == 127:\n return WARNINGS\n elif cmd.rc != 0:\n if halt_on_unittest_error:\n return FAILURE\n else:\n return WARNINGS\n\n if self.warnings or self.errors:\n return WARNINGS\n return SUCCESS", "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))", "def failure_cmd(self) -> str:\n return \"{} --enable=all -f -q {}\".format(\n self.conf.get_executable(), constants.ROOT_PATH + \"/data/cppcheck-152/trial-fail.cpp\"\n )", "def notify_result(self, test_case, success, message):\n self.num_successes += 1 if success else 0\n self.num_failures += 0 if success else 1\n counter_string = str(self.num_successes + self.num_failures) + '/' + str(\n self.num_tests)\n print('%-10s %-40s ' % (counter_string, test_case.test.name()) +\n ('Passed' if success else '-Failed-'))\n if not success:\n print(' '.join(test_case.command))\n print(message)", "def _sc_print_ ( sc ) :\n from Bender.Logger import colored_string \n if sc.isSuccess () : return colored_string( 'SUCCESS' , WHITE , GREEN , True ) \n elif sc.isRecoverable () : return colored_string( 'RECOVERABLE' , RED , YELLOW , True ) \n elif 0 != sc.getCode () :\n return colored_string('FAILURE[%d]' % sc.getCode() , YELLOW , RED , True ) \n return colored_string('FAILURE' , YELLOW , RED , True )", "def print_failure(msg):\n\n tf.print(BColors.FAIL + msg + BColors.ENDC, output_stream=sys.stderr)\n sys.exit(1)", "def testFailed(self):\r\n failedExprKeys = list(self.__testFailedExpressions.keys())\r\n for i in range(len(failedExprKeys)):\r\n for expr in self.__testFailedExpressions[failedExprKeys[i]]:\r\n self.__Calculator.setExpression(expr)\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__testErrors[failedExprKeys[i]], self.__Calculator.getError())", "def print_overview(errors, failures):\n if len(errors) + len(failures) == 0:\n print(\"All tests passed!\")\n else:\n print(\"Some tests failed ({} errors, {} failures)\"\n \"\".format(len(errors), len(failures)))", "def __str__(self):\n return \"\\n\\n\".join(self.failures)", "def get_success_message(cls, args, results):\n return cls.success_message", "def message_user_results(self, request, successes, failures, action):\n\n self.message_user_success(request, successes, action)\n self.message_user_failure(request, failures, action)", "def generate_output(config, dataset):\n state = \"OK\"\n\n if dataset[\"cpu_usage\"] >= config[\"critical\"]:\n state = \"CRITICAL\"\n elif dataset[\"cpu_usage\"] >= config[\"warning\"]:\n state = \"WARNING\"\n\n message = f\"{SERVICE} {state} - {dataset['cpu_usage']} % \"\n message = message + add_performace_data(config, dataset)\n return state, message", "def get_printed_eval_results(self, general_metrics, report):\n printed_eval_results = \"----- Evaluation results -----\"\n for key in sorted(general_metrics.keys()):\n printed_eval_results += \"\\n %s = %s\" % (key, str(general_metrics[key]))\n printed_eval_results += f\"\\n{report}\"\n return printed_eval_results", "def _evaluation_summary(self, metric: str) -> str:\n fold_score_lists = self.fold_evaluations[metric].values()\n\n # Get the mean and std over all repetitions\n rep_means = [np.mean(list(x.values())) for x in fold_score_lists]\n rep_stds = [np.std(list(x.values())) for x in fold_score_lists]\n\n return \"{:.4f} +- {:.4f}\".format(np.mean(rep_means), np.mean(rep_stds))", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def standard_status():\n errors, warnings, infos = THE_LOGGER.status()\n info(errors, \"errors\")\n info(warnings, \"warnings\")\n info(infos, \"infos\")", "def analyze_scenario(self):\n global_result = '\\033[92m'+'SUCCESS'+'\\033[0m'\n\n for criterion in self.scenario.get_criteria():\n if criterion.test_status != \"SUCCESS\":\n global_result = '\\033[91m'+'FAILURE'+'\\033[0m'\n\n if self.scenario.timeout_node.timeout:\n global_result = '\\033[91m'+'FAILURE'+'\\033[0m'\n\n ResultOutputProvider(self, global_result)", "def get_warning_text(self):\n \n to_print = []\n if self['skipped_subchannel'] > 0:\n to_print.append(\"Some event with large weight have been discarded.\"+\\\n \" This happens %s times.\" % self['skipped_subchannel'])\n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n if fraction > 1.0e-4:\n to_print.append(\"Some PS with numerical instability have been set \"+\\\n \"to a zero matrix-element (%.3g%%)\" % (100.0*fraction))\n \n return ('\\n'.join(to_print)).replace(\"'\",\" \")", "def status(self, exe_paths=False):\n if self.status_msg is None:\n self.check_all(exe_paths=exe_paths)\n return self.status_msg" ]
[ "0.602707", "0.6026309", "0.575857", "0.57070476", "0.56101066", "0.55959904", "0.5554432", "0.5525771", "0.55251604", "0.54986006", "0.54985416", "0.5497898", "0.54954153", "0.5488641", "0.54584396", "0.5451354", "0.5448297", "0.54367286", "0.54192775", "0.5412327", "0.5397026", "0.53800017", "0.5374268", "0.53584766", "0.5351492", "0.52976125", "0.5291636", "0.52794844", "0.5264451", "0.52618" ]
0.73922765
0
Synchronously sends a button one time.
def send_one(self, button): self.client.send_one(self.name, button)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def background_thread():\n state = True\n while True:\n newstate = False if GPIO.input(18) else True\n if state != newstate:\n state = newstate \n print('Button', state)\n socketio.emit('button', {'state': state}, namespace='/test')\n time.sleep(.1)", "def send_one(self, remote, button):\n if lirc_client.lirc_send_one(self.fd, remote, button) == -1:\n raise RuntimeError(\"Error while communicating with LIRC daemon!\")", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "async def send(self):", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def handle_button(self, button):\n last_run = self.last_seen[button] if button in self.last_seen else 0\n diff = time.time() - last_run\n\n if diff <= 1:\n logging.warning(\"duplicate: %s, %d, %d\", button, last_run, diff)\n return\n\n try:\n cmd = buttons.COMMANDS[button]\n except KeyError:\n logging.warning(\"No instructions found for button %s.\", button)\n return\n\n self.last_seen[button] = time.time()\n\n try:\n function, music, zone = cmd\n except ValueError, ex:\n logging.warning(\"Couldn't parse instructions from %s: %s\", cmd, ex)\n return\n\n device = self.player.zone(zone)\n if not device:\n logging.warning(\"Can't find a device called %s\", zone)\n return\n\n # If this is the same button we saw last, pause or unpause it.\n if button == self.last_button:\n device.toggle()\n return\n\n if function == \"play_local\":\n self.play_local(music, device)\n self.last_button = button\n else:\n logging.warning(\"Don't know how to %s.\", cmd)", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def send_btn_clicked(self):\n command = self.SendLine.text()\n self.Serial.send(command)", "def run_user_code(self, button):\n button.setEnabled(False)\n self.user_thread.start()", "def send_command(self):\n button = self.sender()\n answer: str = self.UsbHost.send_command(self.state.ser, self.command_dict[button], str(self.state.device_id))\n if answer == 'Ok':\n self.statusbar.showMessage(self.result_dict[button])\n else:\n error_message(self.error_dict[button])\n self.statusbar.showMessage(answer_translate[answer])\n self.create_log_message(self.command_dict[button], answer, \"\")", "def buttonEventCallback(argument):\n global buttonCanBePressed\n if buttonCanBePressed is True:\n ledpedbutton.value(1)\n buttonCanBePressed = False\n start_new_thread(is_timer_4, tuple('0'))", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "async def async_preset_button(self, preset):\n if self._preset_key != None and preset != None:\n if not self._slave_mode:\n if int(preset) > 0 and int(preset) <= self._preset_key:\n value = await self.async_call_linkplay_httpapi(\"MCUKeyShortClick:{0}\".format(str(preset)), None)\n if value != \"OK\":\n _LOGGER.warning(\"Failed to recall preset %s. \" \"Device: %s, Got response: %s\", self.entity_id, preset, value)\n else:\n _LOGGER.warning(\"Wrong preset number %s. Device: %s, has to be integer between 1 and %s\", self.entity_id, preset, self._preset_key)\n else:\n await self._master.async_preset_button(preset)", "def handle_button_press(button_state, mqtt_client, message):\n if button_state:\n ev3.Sound.speak(message).wait()\n mqtt_client.send_message(\"button_pressed\", [message])", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "async def perform_action(self) -> None:", "def action(self):\n self.action_thread = Thread(target=self._action_then_signal, daemon=True)\n self.action_thread.start()", "def button_pressed(self, button: domain.ButtonPin, arduino: domain.Arduino):\n logger.debug(f\"Button {arduino.name}/button{button.number} pressed\")\n\n if button.multi_click_timer is not None:\n button.stop_multi_click_timer()\n if button.has_multi_click_actions(0):\n button.clicks += 1\n else:\n button.clicks = 1\n\n logger.debug(f\"Get immediate actions\")\n self.__action_executor.execute_actions(button.get_button_immediate_actions(), button, arduino.name)\n\n seconds_down = button.get_smallest_longdown_time(0)\n if seconds_down is not None:\n button.start_long_down_timer(seconds_down, self.__sender.publish_message_to_button_processor,\n [arduino.name, button.number, seconds_down, button.clicks])", "async def async_send(self):\n return await super().async_send(data1=self._data1)", "def button_click(self, btn, mbtn):\n self.last_action_ts = pygame.time.get_ticks() # update last action timestamp (idle shutdown countdown restarts)\n self.show_time = pygame.time.get_ticks() # refresh show time timestamp, so countdown restarts\n\n status = self.player.get_status()\n\n # which button was pressed?\n if btn is self.btn_play:\n logger.debug(\"button_click: btn_play \")\n player.pause() # toggle play/pause\n elif btn is self.btn_prev:\n logger.debug(\"button_click: btn_prev \")\n try:\n if int(status['song']) > 0: # only accept 'prev' button push if this is not the first song\n player.prev()\n except Exception as e:\n logger.error(e, exc_info=True) # log any exceptions\n elif btn is self.btn_next:\n logger.debug(\"button_click: btn_next \")\n try:\n if int(status['song']) < (int(status['playlistlength']) - 1):\n player.next()\n except Exception as e:\n logger.error(e, exc_info=True) # log any exceptions\n elif btn is self.background:\n logger.debug(\"button_click: background \")\n if status['state'] == 'play' or status['state']== 'pause':\n self.show_buttons()\n else:\n logger.debug(\"button_click: <unknown>\")", "def poll(self):\n\tself.met = self.button.poll()", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def _PressLeftButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_LEFT})\n time.sleep(self.send_delay)", "def bt_button_click(self, bool_val):\n\n # If it is the first ball and the BT button press emits True\n if bool_val and self.first_ball:\n self.start_drill()\n self.first_ball = False\n # Otherwise, if only the BT button is pressed but it is not the first ball, stop the drill and disconnect from the Bluetooth button\n else:\n self.stop_drill()\n self.threaded_bt_helper.disconnect_from_button()", "async def async_turn_on(self):\n await self.local_meural.send_key_resume()", "def executeButton(self, nodeId):\n identifierPropertie = self.getNodeProperties(\n nodeId, [{\"name\": \"identifier\", \"value\": \"\"}])\n identifier = identifierPropertie['properties'][0]['value']\n if self.existSession(self.client_session.session_key):\n if not self.isInBackground(identifier):\n # start new executeButtonThread\n thread = threading.Thread(\n target=self._executeButtonThread, args=[nodeId])\n thread.daemon = True\n thread.start()\n # Node is already running in background\n return True\n raise exceptions.NotAcceptable(\n \"executeButton - There's no session\")", "def hit_send_payment_button(self):\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.SEND_PAYMENT_BUTTON)\n\t\telement.click_on()\n\t\treturn None", "def arduPusherClick(self, dummy = 0):\r\n self.ardu.write(chr(self.CLICK))" ]
[ "0.62327343", "0.6195085", "0.6072948", "0.60537666", "0.59465384", "0.59400034", "0.5935611", "0.5875449", "0.578244", "0.57768023", "0.57764006", "0.5765144", "0.5585602", "0.55646986", "0.55332255", "0.5523995", "0.5501173", "0.5440503", "0.5439348", "0.5438772", "0.5416853", "0.5395597", "0.53944564", "0.5391387", "0.53692263", "0.53460276", "0.534446", "0.53440976", "0.53315586", "0.53130686" ]
0.72782546
0
initializes id and title
def __init__(self, id = 'abbrev', title = "Abbreviation Bibliography ID Cooker"): self.id = id self.title = title
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, id=\"\", title=\"\", arguments=\"\"):\n\t\tself.id= id\n\t\tself.title= title\n\t\tself.arguments= arguments", "def __init__(self, id_, name):\n self._id = id_\n\n self._name = name\n self._name = self._name.title()", "def __init__(self, title):\n self._title = title", "def initialize(self):\n self.ID = uuid.uuid4()\n self.TMDB_ID = 0\n self.title = \"\"\n self.release_date = \"\"\n self.popularity = \"\"\n self.overview = \"\"", "def __init__(self, id, show_name, author, description=None,episodes=None):\n self.id = id\n self.show_name = show_name\n self.author = author\n self.description = description\n self.episodes = episodes", "def __init__(self, id='Dummy Content', title='Dummy Content', actions=()):\n\n self.id = self._id = id\n self.title = title\n self._actions = {}\n\n self._setActions(actions)", "def __init__(self, id: str):\n self.id = id", "def __init__(self, label=None):\n super().__init__(\"id\", 1, [], label=label)", "def __init__(self, id):\n \n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self, id=None, title=None, description=None, price=None, image_url=None):\n self.id = id\n self.title = title\n self.description = description\n self.price = price\n self.image_url = image_url", "def __init__(self, title='', link='', desc=''):\n self.__title = title\n self.__link = link\n self.__desc = desc", "def __init__(__self__, *,\n id: str):\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: str):\n pulumi.set(__self__, \"id\", id)", "def __init__(name, title=\"\", description=\"\"):", "def __init__(__self__, *,\n id: Optional[str] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[str] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(self, title=\"\"):\n self.__title = title\n self.__data = []\n if self.__title:\n self.__nonzero = True\n else:\n self.__nonzero = False", "def __init__(self, id=None, name=None, nutriscore=None, url=None, stores=None, categories=None):\n self.id = id,\n self.name = name\n self.nutriscore = nutriscore\n self.url = url\n self.stores = stores\n self.categories = categories", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: Optional[pulumi.Input[str]] = None):\n if id is not None:\n pulumi.set(__self__, \"id\", id)", "def set_title(self, title):\n\n self._title = title\n # reset lazy loading\n self._title_words = []\n self._full_text = []\n self._full_text_words = []" ]
[ "0.78083724", "0.73984087", "0.7277978", "0.7061586", "0.686914", "0.6863247", "0.6813077", "0.6807725", "0.67897606", "0.666484", "0.666484", "0.666484", "0.666484", "0.66423106", "0.6562245", "0.6524675", "0.6524675", "0.6489632", "0.64539367", "0.64539367", "0.642176", "0.6404097", "0.63748056", "0.63748056", "0.63748056", "0.63748056", "0.63748056", "0.63748056", "0.63748056", "0.6339488" ]
0.7662275
1
cooks a bibref id for one reference entry dict
def _cookIdCore(self, ref, **kwargs): # AUTHORS namepart='nobody' if self._refHasAuthorNames(ref): lastnames = [] for each in ref['authors']: if each.get('lastname', None): lastnames.append(each['lastname']) if len(lastnames) > 1: namepart = '%s' % ''.join([ lastname[0] for lastname in lastnames ]) elif len(lastnames) == 1: namepart = lastnames[0][:3] else: pass # PUBLICATION YEAR if ref.get('publication_year', None): yearpart = str(ref['publication_year']) else: yearpart = "1000" return namepart + yearpart
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_cff_reference(bib_entry: pybtex.database.Entry) -> dict:\n\n def _cff_transform(cff_field, bib_value):\n if cff_field == \"type\":\n if bib_value == \"inproceedings\":\n return \"article\"\n elif bib_value == \"incollection\":\n return \"article\"\n elif cff_field == \"publisher\":\n return {\"name\": bib_value}\n elif cff_field == \"month\":\n try:\n return int(bib_value)\n except ValueError:\n return {\n \"jan\": 1,\n \"feb\": 2,\n \"mar\": 3,\n \"apr\": 4,\n \"may\": 5,\n \"jun\": 6,\n \"jul\": 7,\n \"aug\": 8,\n \"sep\": 9,\n \"oct\": 10,\n \"nov\": 11,\n \"dec\": 12,\n }[bib_value[:3].lower()]\n return bib_value\n\n cff_reference = {\n \"type\": _cff_transform(cff_field=\"type\", bib_value=bib_entry.type),\n \"authors\": [\n to_cff_person(person) for person in bib_entry.persons[\"author\"]\n ],\n }\n # Map BibTeX to CFF fields. This is just a subset of the most relevant\n # fields.\n fields = {\n \"doi\": \"doi\",\n \"edition\": \"edition\",\n \"isbn\": \"isbn\",\n \"license\": \"license\",\n \"month\": \"month\",\n \"number\": \"number\",\n \"pages\": \"pages\",\n \"publisher\": \"publisher\",\n \"title\": \"title\",\n \"url\": \"url\",\n \"version\": \"version\",\n \"volume\": \"volume\",\n \"year\": \"year\",\n \"booktitle\": \"collection-title\",\n }\n for bibtex_field, value in bib_entry.fields.items():\n bibtex_field = bibtex_field.lower()\n if bibtex_field in fields:\n cff_field = fields[bibtex_field]\n cff_reference[cff_field] = _cff_transform(\n cff_field=cff_field, bib_value=value\n )\n return cff_reference", "def insert_cit_ref_list_intodb(citation_dic, reference_dic, selfcbdic,\n selfdic, authorcitdic):\n insert_into_cit_db(reference_dic,\"reversedict\")\n insert_into_cit_db(citation_dic,\"citationdict\")\n insert_into_cit_db(selfcbdic,\"selfcitedbydict\")\n insert_into_cit_db(selfdic,\"selfcitdict\")\n\n for a in authorcitdic.keys():\n lserarr = (serialize_via_marshal(authorcitdic[a]))\n #author name: replace \" with something else\n a.replace('\"', '\\'')\n a = unicode(a, 'utf-8')\n try:\n ablob = run_sql(\"select hitlist from rnkAUTHORDATA where aterm = %s\", (a,))\n if not (ablob):\n #print \"insert into rnkAUTHORDATA(aterm,hitlist) values (%s,%s)\" , (a,lserarr)\n run_sql(\"insert into rnkAUTHORDATA(aterm,hitlist) values (%s,%s)\",\n (a,lserarr))\n else:\n #print \"UPDATE rnkAUTHORDATA SET hitlist = %s where aterm=%s\"\"\" , (lserarr,a)\n run_sql(\"UPDATE rnkAUTHORDATA SET hitlist = %s where aterm=%s\",\n (lserarr,a))\n except:\n register_exception(prefix=\"could not read/write rnkAUTHORDATA aterm=\"+a+\" hitlist=\"+str(lserarr), alert_admin=True)", "def get_id_from_ref(ref):\n ref_id = None\n if ref is not None and len(ref) > 0:\n ref_id = path.split(ref)[1]\n return ref_id", "def ref(self, refid: Optional[str] = None) -> str:\n if refid:\n self._ref = refid\n return self._ref", "def makeFootnoteRefId(self, id):\n if self.getConfig(\"UNIQUE_IDS\"):\n return 'fnref%s%d-%s' % (self.sep, self.unique_prefix, id)\n else:\n return 'fnref%s%s' % (self.sep, id)", "def circulation_build_item_ref(loan_pid, loan):\n return {\n \"$ref\": \"{scheme}://{host}/api/resolver/circulation/loans/{loan_pid}/\"\n \"item\".format(\n scheme=current_app.config[\"JSONSCHEMAS_URL_SCHEME\"],\n host=current_app.config[\"JSONSCHEMAS_HOST\"],\n loan_pid=loan_pid,\n )\n }", "def get_reference_for(document):\n k = Factory.build('shared.doc_reference')\n for key in ('name', 'canonical_name'):\n conditional_copy(document, k, key)\n if not getattr(k, 'canonical_name'):\n if getattr(k,'name'):\n setattr(k,'canonical_name',getattr(k,'name'))\n for key in ('version',):\n conditional_copy(document._meta, k, key)\n for inkey, outkey in [('uid','id'),]:\n conditional_copy(document._meta, k, inkey, outkey)\n for inkey, outkey in [('type_key','type'),]:\n setattr(k, outkey, getattr(document._osl,inkey))\n return k", "def _mk_coref_id():\n num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]\n if alpha == 'Z':\n alpha = 'A'\n num += 1\n else:\n alpha = chr(ord(alpha) + 1)\n\n _mk_coref_id.id = '%s%s' % (num, alpha)\n return _mk_coref_id.id", "def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")", "def get_reference_id(reference):\n ref_id = -1\n match = re.search('\\[[0-9]+\\]', reference)\n if match:\n ref_id = int(match.group(0).strip('[]'))\n return ref_id", "def reference_id(self) -> Optional[str]:\n return pulumi.get(self, \"reference_id\")", "def _paper_id(hit: DD) -> str:\n return hit[\"_source\"][\"paper_id\"]", "def reference_id(self, reference_id):\n\n self._reference_id = reference_id", "def create_reference_dict(target, sclass):\n # Retrieve reference & store in FileStoreID\n ref_path = sclass.unavoidable_download_method(target, 'ref.fasta')\n\n # Tool call\n output = os.path.splitext(sclass.docker_path(ref_path))[0]\n command = 'picard-tools CreateSequenceDictionary R={} O={}.dict'.format(sclass.docker_path(ref_path), output)\n sclass.docker_call(command, tool_name='picard')\n\n # Update FileStoreID\n target.updateGlobalFile(sclass.ids['ref.dict'], os.path.splitext(ref_path)[0] + '.dict')", "def _id(self, document):\n pass", "def process_bib_entry(\n cid, bibdatabase, bibnums, fallback_fmt=\"[{author_abbrev}, {year}]\"\n):\n entry = bibdatabase[cid]\n if cid not in bibnums:\n bibnums[cid] = len(bibnums) + 1\n\n if \"doi\" in entry:\n return r'<a href=\"https://doi.org/{doi}\">{text}</a>'.format(\n doi=entry[\"doi\"], text=bibnums[cid]\n )\n elif \"url\" in entry:\n return r'<a href=\"{url}\">{text}</a>'.format(url=entry[\"url\"], text=bibnums[cid])\n elif \"link\" in entry:\n return r'<a href=\"{url}\">{text}</a>'.format(\n url=entry[\"link\"], text=bibnums[cid]\n )\n else:\n return bibnums[cid]\n # add_abbreviated_author(entry)\n # split_date(entry)\n # return DefaultFormatter().format(fallback_fmt, **entry)", "def get_bibtex_entry(doi, bibtext_cache={}, shortdoi_cache={}):\r\n bibtext = get_bibtext(doi, cache = bibtext_cache)\r\n if not bibtext:\r\n return None\r\n\r\n short_doi = shorten(doi, cache = shortdoi_cache)\r\n parser = BibTexParser()\r\n parser.ignore_nonstandard_types = False\r\n bibdb = bibtexparser.loads(bibtext, parser)\r\n entry, = bibdb.entries\r\n quoted_doi = urllib.request.quote(doi)\r\n entry['link'] = 'https://doi.org/{}'.format(quoted_doi)\r\n if 'author' in entry:\r\n entry['author'] = ' and '.join(entry['author'].rstrip(';').split('; '))\r\n entry['ID'] = short_doi[3:]\r\n return entry", "def ref(element):\n return int(element['attrs']['ref'])", "def set_bibs(self, number):", "def setIdRef(self, *args):\n return _libsbml.SBaseRef_setIdRef(self, *args)", "def load_references(self, collections, item):", "def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ", "def mr2bib(id_list):\n d = mr2bib_dict(id_list)\n l = []\n for id in id_list:\n try:\n l.append(d[id])\n except:\n l.append(ReferenceErrorInfo(\"Not found\", id))\n\n return l", "def _resolve_dict_entry(self, doc_uri, main_doc, obj):\n # Interpret '$ref' key if present in obj\n if '$ref' in obj:\n result = self._load_ref(doc_uri, main_doc, obj['$ref'])\n else:\n result = self.dict_class()\n # Merge values from obj with result\n for k, v in obj.items():\n if k != '$ref':\n result[k] = self._resolve(doc_uri, main_doc, v)\n return result", "def ref(name):\n return { 'name': name } if name else None", "def _process_dbxref(self):\n\n raw = '/'.join((self.rawdir, 'dbxref'))\n logger.info(\"processing dbxrefs\")\n line_counter = 0\n\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (dbxref_id, db_id, accession, version, description, url) = line\n # dbxref_id\tdb_id\taccession\tversion\tdescription\turl\n # 1\t2\tSO:0000000\t\"\"\n\n db_ids = { # the databases to fetch\n 50: 'PMID', # pubmed\n 68: 'RO', # obo-rel\n 71: 'FBdv', # FBdv\n 74: 'FBbt', # FBbt\n # 28:, # genbank\n 30: 'OMIM', # MIM\n # 38, # ncbi\n 75: 'ISBN', # ISBN\n 46: 'PMID', # PUBMED\n 51: 'ISBN', # isbn\n 52: 'SO', # so\n # 76, # http\n 77: 'PMID', # PMID\n 80: 'FBcv', # FBcv\n # 95, # MEDLINE\n 98: 'REACT', # Reactome\n 103: 'CHEBI', # Chebi\n 102: 'MESH', # MeSH\n 106: 'OMIM', # OMIM\n 105: 'KEGG-path', # KEGG pathway\n 107: 'DOI', # doi\n 108: 'CL', # CL\n 114: 'CHEBI', # CHEBI\n 115: 'KEGG', # KEGG\n 116: 'PubChem', # PubChem\n # 120, # MA???\n 3: 'GO', # GO\n 4: 'FlyBase', # FlyBase\n # 126, # URL\n 128: 'PATO', # PATO\n # 131, # IMG\n 2: 'SO', # SO\n 136: 'MESH', # MESH\n 139: 'CARO', # CARO\n 140: 'NCBITaxon', # NCBITaxon\n # 151, # MP ???\n 161: 'DOI', # doi\n 36: 'BDGP', # BDGP\n # 55, # DGRC\n # 54, # DRSC\n # 169, # Transgenic RNAi project???\n 231: 'RO', # RO ???\n 180: 'NCBIGene', # entrezgene\n # 192, # Bloomington stock center\n 197: 'UBERON', # Uberon\n 212: 'ENSEMBL', # Ensembl\n # 129, # GenomeRNAi\n 275: 'PMID', # PubMed\n 286: 'PMID', # pmid\n 264: 'HGNC',\n # 265: 'OMIM', # OMIM_Gene\n 266: 'OMIM', # OMIM_Phenotype\n 300: 'DOID', # DOID\n 302: 'MESH', # MSH\n 347: 'PMID', # Pubmed\n }\n\n if accession.strip() != '' and int(db_id) in db_ids:\n # scrub some identifiers here\n m = re.match(\n r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',\n accession)\n if m:\n accession = re.sub(m.group(1)+r'\\:', '', accession)\n elif re.match(\n r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',\n accession):\n continue\n elif re.match(r'\\:', accession): # starts with a colon\n accession = re.sub(r'\\:', '', accession)\n elif re.search(r'\\s', accession):\n # skip anything with a space\n # logger.debug(\n # 'dbxref %s accession has a space: %s',\n # dbxref_id, accession)\n continue\n\n if re.match(r'http', accession):\n did = accession.strip()\n else:\n prefix = db_ids.get(int(db_id))\n did = ':'.join((prefix, accession.strip()))\n if re.search(r'\\:', accession) and prefix != 'DOI':\n logger.warning(\n 'id %s may be malformed; skipping', did)\n\n self.dbxrefs[dbxref_id] = {db_id: did}\n\n elif url != '':\n self.dbxrefs[dbxref_id] = {db_id: url.strip()}\n else:\n continue\n\n # the following are some special cases that we scrub\n if int(db_id) == 2 \\\n and accession.strip() == 'transgenic_transposon':\n # transgenic_transposable_element\n self.dbxrefs[dbxref_id] = {db_id: 'SO:0000796'}\n\n line_counter += 1\n\n return", "def set_id(self, refobj, identifier):\n cmds.setAttr(\"%s.identifier\" %refobj, identifier)", "def set_reference_id(self, reference_id):\n self.reference_id = reference_id", "def __init__(self,\n id = 'abbrev',\n title = \"Abbreviation Bibliography ID Cooker\"):\n self.id = id\n self.title = title", "def get_identifier(self, object):\n try:\n identifier = object[\"uri\"]\n except KeyError:\n identifier = object[\"ref\"]\n return identifier" ]
[ "0.6515439", "0.59412706", "0.5690313", "0.5684403", "0.56685394", "0.56310415", "0.5598006", "0.5590219", "0.55857784", "0.5583656", "0.55488837", "0.55436593", "0.54998034", "0.54959476", "0.54939634", "0.5449776", "0.5437612", "0.5418777", "0.5413845", "0.5396753", "0.53895867", "0.53893536", "0.53644747", "0.5351243", "0.53375936", "0.53137416", "0.5301197", "0.5299701", "0.52974594", "0.5279274" ]
0.61811495
1
Test for when clone lun is not passed a block count.
def test_clone_lun_zero_block_count(self): lun = netapp_api.NaElement.create_node_with_children( 'lun-info', **{'alignment': 'indeterminate', 'block-size': '512', 'comment': '', 'creation-timestamp': '1354536362', 'is-space-alloc-enabled': 'false', 'is-space-reservation-enabled': 'true', 'mapped': 'false', 'multiprotocol-type': 'linux', 'online': 'true', 'path': '/vol/fakeLUN/fakeLUN', 'prefix-size': '0', 'qtree': '', 'read-only': 'false', 'serial-number': '2FfGI$APyN68', 'share-state': 'none', 'size': '20971520', 'size-used': '0', 'staging': 'false', 'suffix-size': '0', 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412', 'volume': 'fakeLUN', 'vserver': 'fake_vserver'}) self.library._get_lun_attr = mock.Mock(return_value={ 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.zapi_client.get_lun_by_args.return_value = [lun] self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN') self.library.zapi_client.clone_lun.assert_called_once_with( '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', 'newFakeLUN', 'true', block_count=0, dest_block=0, src_block=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def corrected_clump_tester(clump):\n tester = True\n for block in clump:\n if len(block) >= 3: # Fixed block!\n tester = False\n break\n return tester", "def org_clump_tester(clump):\n tester = True\n for block in clump:\n if len(clump) >= 3: # clump should be block!\n tester = False\n break\n return tester", "def test_block_missing_batch(self):\n pass", "def test_clone_scenario(self):\n pass", "def illegal(self):\r\n ## First checks for integer value\r\n if type(self.blockList) is int: return 1\r\n ## Then checks for 6 rows\r\n if len(self.blockList) > 6: return 1\r\n for row in self.blockList:\r\n ## Then checks that each row has 6 columns\r\n if len(self.blockList[row]) > 6: return 1\r\n for column in self.blockList[row]:\r\n ## 18 blocks is the maximum number of blocks that can be on the board\r\n if block < 0 or block > 18: return 1\r\n return 0", "def __ne__(self, *args):\n return _ida_hexrays.cblock_t___ne__(self, *args)", "def is_clone(self):\n return not self.is_original()", "def test_create_cloned_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n orig = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n clone = {'id': '2', 'name': 'clone1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_cloned_volume(clone, orig)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'clone1', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'clone1'}\n self.assertDictMatch(expected_pid, pid)", "def has_clones(self, pool, project, share, snapshot):\n svc = self.snapshot_path % (pool, project, share, snapshot)\n ret = self.rest_get(svc, restclient.Status.OK)\n val = jsonutils.loads(ret.data)\n return val['snapshot']['numclones'] != 0", "def test_clone_system(self):\n pass", "def no_bricks(self):\n if self.brick_count == self.total_bricks:\n return True\n else:\n return False", "def test_block_bad_batch(self):\n pass", "def num_blocks(self): # -> int:\n ...", "def additional_cloning_checks(self):\n pass", "def test_clone_fail_unexpected_arg(self, cosmo):\n with pytest.raises(TypeError, match=\"unexpected keyword argument\"):\n newclone = cosmo.clone(not_an_arg=4)", "def check_no_empty_cell_inside(line, hints, block_origins, block_index):\n sth_changed = False\n # for compactness\n i = block_index\n\n required_cells = line[block_origins[i]:block_origins[i] + hints[i]]\n if -1 in required_cells:\n block_origins[i] += hints[i] - required_cells[::-1].index(-1)\n # pushing following blocks origins further away\n dummy, block_origins = push_block_origins(hints, block_origins, index=i)\n sth_changed = True\n\n return sth_changed, block_origins", "def test_clone_repository(m_check):\n m_check.return_value = 0\n assert clone_repository(\"test\", \"test\", \"test\") == 0", "def is_block(self):\n return self.v & 1 == 0", "def isSplittable(self,cutLoci):\n mirror = self.copy()\n # Make new random cuts\n mirror.cutNow(cutLoci)\n # Remove the CL concerning the cleavages\n if self.keepCL == False:\n mirror.removeCL() \n return mirror.isConnected()", "def check_block(self, block):\n pass", "def miner_lock_blocks(self) -> int:", "def test_good_fork_lower(self):\n bvh = self.BlockValidationHandler()\n\n root = self.btm.chain_head\n\n # create a new valid chain 5 long from the current root\n new_head = self.btm.generate_chain(root, 5,\n {'add_to_store': True})\n self.btm.set_chain_head(new_head[-1])\n # generate candidate chain 3 long from the same root\n new_block = self.btm.generate_chain(root, 3,\n {'add_to_cache': True})\n\n bv = self.create_block_validator(new_block[-1], bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block[-1].status == BlockStatus.Valid)\n self.assertFalse(bvh.result[\"commit_new_block\"])", "def component_clone ( same ) : \n if isinstance ( same , str ) \\\n and same.strip().lower() in ( 'clone' , 'cloned' , 'same' ) : return True \n return False", "def testNandsAreDifferent(self):\n\n nand_original = self.board.get((0, 1))\n nand_copied = self.board.get((3, 1))\n\n self.assertIsNot(nand_original, nand_copied)", "def check_size(prev, current, delta):\n before = prev.pools[0].used\n after = current.pools[0].used\n assert delta == (before - after) >> 20", "def test_block_structure_matrices_on_voronoi_validity(n_rods):\n world_rods = [MockRod(np.random.randint(10, 30 + 1)) for _ in range(n_rods)]\n block_structure = MemoryBlockCosseratRod(\n world_rods, [i for i in range(len(world_rods))]\n )\n\n for i in range(n_rods):\n start_idx = block_structure.start_idx_in_rod_voronoi[i]\n end_idx = block_structure.end_idx_in_rod_voronoi[i]\n\n # bend matrix\n assert np.shares_memory(block_structure.bend_matrix, world_rods[i].bend_matrix)\n assert np.shares_memory(\n block_structure.matrix_dofs_in_rod_voronois, world_rods[i].bend_matrix\n )\n assert_allclose(\n block_structure.bend_matrix[..., start_idx:end_idx],\n world_rods[i].bend_matrix,\n )", "def test_fork_missing_predecessor(self):\n bvh = self.BlockValidationHandler()\n\n root = self.btm.chain_head\n\n # generate candidate chain 3 long off the current head.\n new_block = self.btm.generate_chain(root, 3,\n {'add_to_cache': True})\n # remove one of the new blocks\n del self.btm.block_cache[new_block[1].identifier]\n\n bv = self.create_block_validator(new_block[-1], bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block[-1].status == BlockStatus.Invalid)\n self.assertFalse(bvh.result[\"commit_new_block\"])", "def bd_conditional_N_nll(params, N_w, trajectory, return_params=False,\n init_clone_size_resolution=10):\n s = params[0]\n \n # Compute time_steps\n trajectory['delta_t'] = np.insert(np.diff(trajectory.age),\n 0, 0)\n\n init_clone_size_prior = clone_size_prior(trajectory, N_w=N_w,\n prior_resolution=init_clone_size_resolution)\n\n # Initialise nll\n nll = 0\n #select time_point\n for i, row in trajectory[1:].iterrows():\n # Create list of AO probabilities conditional on initial clone size\n cond_init_prob = []\n # fix initial_clone_size\n for j in range(init_clone_size_resolution):\n cond_init_prob.append(\n binomial_sequencing_probability(AO=row.AO,\n DP=row.DP,\n init_clone_size=(\n init_clone_size_prior[0][j, i-1]),\n s=s,\n delta_t=row.delta_t,\n N_w=N_w))\n\n # marginalise AO_prob_cond_init with respect to initial clone_size\n AO_prob = np.trapz(y=cond_init_prob*init_clone_size_prior[1][:, i-1],\n x=init_clone_size_prior[0][:, i-1])\n\n # Compute negative log likelihood\n # Avoiding divide by zero\n if AO_prob != 0:\n nll -= np.log(AO_prob)\n\n else:\n # use min_nll = np.log(nextafter(0,1))\n nll -= min_nll\n\n if return_params is False:\n return nll\n else:\n return nll, params", "def testCheckSourceCopyOperation_FailBlockCountsMismatch(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,\n None, 0, 1, 'foo')", "def verify_lun_not_in_use(lun_id):\n luns = []\n for sd in ll_sd.get_storagedomain_objects():\n if sd.get_storage().get_type() in config.BLOCK_TYPES:\n luns += (\n sd.get_storage().get_volume_group()\n .get_logical_units().get_logical_unit()\n )\n lun_ids = [lun.get_id() for lun in luns]\n return lun_id not in lun_ids" ]
[ "0.5885451", "0.5682751", "0.56746435", "0.5612983", "0.5546021", "0.5530416", "0.55098057", "0.5433039", "0.54293126", "0.5397168", "0.536836", "0.5333882", "0.53137195", "0.530228", "0.52823144", "0.5272673", "0.52483624", "0.5217936", "0.5197577", "0.51798713", "0.5166777", "0.51497424", "0.51455504", "0.5144805", "0.5128626", "0.5121902", "0.5112802", "0.51115096", "0.51062566", "0.50871754" ]
0.7056724
0
Checks if symbol is a valid device
def _check_validdevice(self, symbol): if symbol.type == self.scanner.KEYWORD and \ symbol.id in self.validdeviceids: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_deviceline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.device_name = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '=' is used\n if self._is_equal(self.symbol):\n # Get next symbol\n self.symbol = self.scanner.get_symbol()\n # Check if name has been assigned to a valid device type\n if self._check_validdevice(self.symbol):\n self.device_kind = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # No device property\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create device if no previous errors\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind))\n # Send the returned error ID for error reporting\n self._display_semantic_error(device_error)\n self.symbol = self.scanner.get_symbol()\n elif self._is_comma(self.symbol):\n # Device property set\n self.symbol = self.scanner.get_symbol()\n self.device_param, \\\n self.device_paramvalue \\\n = self._check_paramindevice()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create device if no previous errors\n if self._device_type_returner(\n self.device_kind) == \\\n self.devices.SIGGEN:\n # Use symbol attribute 'value' to get parameter\n # value, since the symbol's 'id' attribute\n # would not capture a leading '0' in the signal\n # generator's signal string\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind),\n self.device_paramvalue.value)\n else:\n # For other device types\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind),\n self.device_paramvalue.id)\n # Send the returned error ID for error reporting\n self._display_semantic_error(device_error)\n self._check_semicolon_else_skip(self.symbol)\n self.symbol = self.scanner.get_symbol()\n else:\n # Neither semicolon nor comma\n self._display_syntax_error(\"semicoloncomma\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # The device type is not valid\n self._display_syntax_error(\"devicetype\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # No '='\n self._display_syntax_error(\"equal\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # The device name is not valid\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None", "def _valid_device(device):\n required_fields = ('name', 'type', 'group', 'canonical_name')\n if all(field in device for field in required_fields):\n return True\n return False", "def _check_devicelist(self):\n self.symbol = self.scanner.get_symbol()\n # Repeatedly call _check_deviceline() until END DEVICE\n while (\n not self._is_end(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self._check_deviceline()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def check_device_state(self):", "def devicelist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.DEVICES_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.device()\n while (self.symbol.type == self.scanner.NAME):\n self.device()\n # Check right curly bracket ends device block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.CONNECT_ID):\n # Error Type: missing '}'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Bad name terminated devices incorrectly\n # Error type: Invalid name\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.DEVICE_NAME, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Left curly needed after 'DEVICE'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NO_CURLY_DEVICE, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: 'DEVICE' keyword required\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NEED_DEVICE_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])", "def dev_is_ssd(dev):\n\n dev = proc_dev_to_sysfs_dev(dev)\n try:\n with open('/sys/block/{}/queue/rotational'.format(dev)) as typefd:\n return int(typefd.read()) == 0\n except IOError:\n print 'UNKNOWN: unable to read device type'\n sys.exit(NAGIOS_UNKNOWN)", "def __if_smart_err(disk_oj):\n\t\tif \"SAS\" in disk_oj.smart:\n\t\t\tif int(disk_oj.smart_attr[\"channel0Error\"][\"Invalid DWORD count\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel0Error\"][\"Running disparity error count\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel0Error\"][\"Loss of DWORD synchronization\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel0Error\"][\"Phy reset problem\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel1Error\"][\"Invalid DWORD count\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel1Error\"][\"Running disparity error count\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel1Error\"][\"Loss of DWORD synchronization\"]) > 0 or \\\n\t\t\t\t\t\t\tint(disk_oj.smart_attr[\"channel1Error\"][\"Phy reset problem\"]) > 0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif \"SATA\" in disk_oj.smart:\n\t\t\tif \"No Errors Logged\" not in disk_oj.smart:\n\t\t\t\treturn False\n\t\t\tfor attr_ in SATA_SMART_ERROR_LIST:\n\t\t\t\tif disk_oj.smart_attr[attr_][\"RAW_VALUE\"] > 0:\n\t\t\t\t\treturn False\n\t\t\treturn True", "def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if pydev_device.get(\"DM_VG_NAME\") or pydev_device.get(\"DM_LV_NAME\"):\n # Skip LVM devices\n return False\n if constants.DEVICE_NAME_MPATH in pydev_device.get(\"DM_NAME\", \"\") and pydev_device.get(\"DM_PART\", \"\"):\n # Skip mpath partition devices\n return False\n if pydev_device.get(\"ID_FS_TYPE\") == constants.DEVICE_FS_TYPE_MPATH:\n # Skip mpath member devices\n return False\n id_path = pydev_device.get(\"ID_PATH\", \"\")\n if \"iqn.\" in id_path or \"eui.\" in id_path:\n # Skip all iSCSI devices, they are links for volume storage.\n # As per https://www.ietf.org/rfc/rfc3721.txt, \"iqn.\" or \"edu.\"\n # have to be present when constructing iSCSI names.\n return False\n if ((\"-fc-\" in id_path or \"-lun-\" in id_path) and\n is_valid_multipath(pydev_device.get('DEVNAME'))):\n return False\n if pydev_device.get(\"ID_VENDOR\") == constants.VENDOR_ID_LIO:\n # LIO devices are iSCSI, should be skipped above!\n LOG.error(\"Invalid id_path. Device %s (%s) is iSCSI!\" %\n (id_path, pydev_device.get('DEVNAME')))\n return False\n return True", "def is_valid(self):\n if not self.__usb_if:\n return False\n return self.__usb_if.is_valid()\n #end is_valid()", "def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def test_invalid_device_type():\n _aws_device(wires=2, device_type=\"foo\", shots=None)", "def is_valid_pci_device_vendor_id(id):\n val = id.replace('0x', '').strip()\n if not is_valid_hex(id):\n return False\n if (len(val) > 4):\n return False\n return True", "def _check_validdtypeinput(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdtypeinputs:\n return True\n else:\n return False", "def is_dev_name_valid(self):\n return self._name_re.match(self.dev_name) is not None", "def system_valid(self):\n return self.udev.devices_exist", "def AssertDevice(self, device):\n cmd = ('getprop(\"ro.product.device\") == \"%s\" || '\n 'abort(\"E%d: This package is for \\\\\"%s\\\\\" devices; '\n 'this is a \\\\\"\" + getprop(\"ro.product.device\") + \"\\\\\".\");') % (\n device, common.ErrorCode.DEVICE_MISMATCH, device)\n self.script.append(cmd)", "def device(self):\n if (self.symbol.type == self.scanner.NAME):\n device_name = self.names.get_name_string(self.symbol.id)\n device_id = self.names.query(device_name)\n self.old_symbol = self.symbol # for reporting duplicate devices\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.COLON):\n self.symbol = self.scanner.get_symbol()\n device_kind = self.logictype()\n\n if(self.symbol.type == self.scanner.COMMA):\n self.symbol = self.scanner.get_symbol()\n if(self.symbol.type == self.scanner.KEYWORD):\n if(self.symbol.id in [self.scanner.initial_ID,\n self.scanner.inputs_ID,\n self.scanner.period_ID, self.scanner.sequence_ID]):\n\n self.symbol = self.scanner.get_symbol()\n\n # initialise list to hold device property numbers\n device_property_list = []\n\n if(self.symbol.type == self.scanner.NUMBER):\n number_val = int(\n self.names.get_name_string(self.symbol.id))\n if device_kind == self.names.query(\"SIGGEN\"):\n if (number_val == 0 or number_val == 1):\n device_property_list.append(number_val)\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Siggen signal value has\n # to be '0' or '1'.\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END.\n self.error(\n self.SIGGEN_QUALIFIER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Not a SIGGEN\n device_property_list.append(number_val)\n self.symbol = self.scanner.get_symbol()\n\n # Extract sequence of numbers for SIGGEN\n while (self.symbol.type == self.scanner.COMMA):\n if device_kind == self.names.query(\n \"SIGGEN\"):\n self.symbol = self.scanner.get_symbol()\n if(self.symbol.type == (\n self.scanner.NUMBER)):\n number_val = int(\n self.names.get_name_string(\n self.symbol.id))\n if (number_val == 0 or (\n number_val == 1)):\n device_property_list.append(\n number_val)\n self.symbol = (\n self.scanner.get_symbol())\n else:\n # Error: Signal value has\n # to be '0' or '1'.\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END.\n list1 = [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY\n ]\n list2 = [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID\n ]\n self.error(\n self.SIGGEN_QUALIFIER,\n list1,\n list2)\n else:\n # Error: Needs to be an integer\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END\n list1 = [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY\n ]\n list2 = [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID\n ]\n self.error(\n self.INTEGER, list1, list2)\n else:\n # Error: Excess qualifiers\n # for non-SIGGEN\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END\n self.error(\n self.devices.EXCESS_QUALIFIER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Needs to be an integer\n # Stop symbs:';','}','CONNECT','MONITOR', END\n self.error(\n self.INTEGER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Parameter: 'initial',\n # inputs, period, sequence.\n # Stopping symbols: ';' , '}','CONNECT', 'MONITOR'\n # or 'END' KEYWORD '\n self.error(self.NEED_QUALIFIER,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Comma has to be followed by parameter\n # speficification\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR'\n # or 'END' KEYWORD\n self.error(self.NEED_QUALIFIER,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # There is no device property\n device_property_list = None\n\n if (self.symbol.type == self.scanner.SEMICOLON):\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Device definition needs to end in ';'\n # Stopping symbols: NAME, ';' , '}', 'CONNECT', 'MONITOR'\n # or 'END' KEYWORD\n self.error(self.NO_DEVICE_SEMICOLON,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.NAME,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Device name has to be followed by ':'\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.NO_DEVICE_COLON, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Valid Device name required\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.DEVICE_NAME, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Check for device semantic errors\n if self.error_count == 0:\n # Only check for semantic errors if no errors so far\n err = self.devices.make_device(\n device_id, device_kind, device_property_list)\n if err != self.devices.NO_ERROR:\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n err, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Increment input pin counter by number of pins on new device\n if self.error_count == 0:\n device_name_string = self.names.get_name_string(device_kind)\n if device_name_string == \"DTYPE\":\n self.num_input_pin += 4\n elif device_name_string in [\"AND\", \"OR\", \"NAND\", \"NOR\"]:\n self.num_input_pin += device_property_list[0]\n elif device_name_string == \"XOR\":\n self.num_input_pin += 2", "def _check_validdtypeoutput(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdtypeoutputs:\n return True\n else:\n return False", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def test_get_device_unknown():\n device = get_device(SERIAL, CREDENTIAL, \"unknown\")\n assert device is None", "def is_valid(self):\n if not self.__usb_dev:\n return False\n if isinstance(self.__usb_dev, MpUsbApi) and (self.__handle_write != -1) and (self.__handle_read != -1):\n return True\n if isinstance(self.__usb_dev, usb.core.Device):\n return True\n return False\n #end valid()", "def _check_monitorline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.monitor_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if ';' is used\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id, None)\n self._display_semantic_error(monitor_error)\n elif self._is_period(self.symbol):\n # DType output\n self.symbol = self.scanner.get_symbol()\n if self._check_validdtypeoutput(self.symbol):\n self.monitor_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id,\n self.monitor_port.id)\n self._display_semantic_error(monitor_error)\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n self._display_syntax_error(\"doutput\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None", "def test_verify_state_of_a_device():", "def _is_valid_interface(device, switch, nos_driver):\n for key in device.keys():\n for (speed, interface) in device[key]:\n if not _is_valid_three_tupple(interface):\n return False\n if not _is_valid_interface_speed(speed):\n return False\n return True", "def is_valid(cls, addr):\n try:\n PCI(addr)\n except Exception:\n return False\n return True", "def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)", "def _IsDevice(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(file_attribute_flags & pyfsntfs.file_attribute_flags.DEVICE)", "def _check_validconnectionoutput(self):\n # Check if name is valid and has been initialised\n if self._check_name(self.symbol):\n first_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '->' is used\n if self._is_arrow(self.symbol):\n return first_device, None\n elif self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n if self._check_validdtypeoutput(self.symbol):\n first_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return first_device, first_port\n else:\n # Invalid DType output\n self._display_syntax_error(\"doutput\")\n self._semicolon_skipper()\n return None, None\n else:\n # Neither an arrow nor a DType output\n self._display_syntax_error(\"arrowperiod\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def chkDevForErrors(self,devPart):\n\t self.host.chk_if_dpp_errors(devPart)\n\t self.host.vgcproc.chk_if_ue_errors(devPart)\n \n\t return 1", "def fs_ok(fs_info):\n if fs_info.mountpoint == '/':\n return True\n\n if (fs_info.device == fs_info.fstype or fs_info.fstype == 'nullfs' or\n '/docker' in fs_info.mountpoint or\n fs_info.mountpoint.startswith('/etc') or\n fs_info.mountpoint.startswith('/lib/modules')):\n return False\n\n if fs_info.device.startswith('/dev/'):\n return True\n\n return False" ]
[ "0.75342447", "0.6746497", "0.65169084", "0.64804757", "0.63896406", "0.63296634", "0.6282044", "0.6268811", "0.62086785", "0.6193023", "0.61705214", "0.6164332", "0.615052", "0.6144333", "0.61336094", "0.6121036", "0.6086974", "0.6019076", "0.6016493", "0.59963256", "0.59561366", "0.5946646", "0.59276235", "0.59162086", "0.59123045", "0.5855976", "0.5854189", "0.5835043", "0.5818454", "0.5808024" ]
0.8153207
0
Checks if symbol is a valid parameter
def _check_validparam(self, symbol): if symbol.type == self.scanner.KEYWORD and \ symbol.id in self.validparamids: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def _is_valid_input(self, parameter_name):\n raise NotImplementedError()", "def _check_paramindevice(self):\n if self._check_validparam(self.symbol):\n param = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '=' is used\n if self._is_equal(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if value is valid\n if self._is_number(self.symbol):\n value = self.symbol\n self.symbol = self.scanner.get_symbol()\n return param, value\n else:\n # The parameter value is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '='\n self._display_syntax_error(\"equal\")\n self._semicolon_skipper()\n return None, None\n else:\n # The parameter type is not valid\n self._display_syntax_error(\"parameter\")\n self._semicolon_skipper()\n return None, None", "def test_name_validation(self, attr):\n kwargs = {'kind': POSITIONAL_ONLY, attr: 3}\n with pytest.raises(TypeError) as excinfo:\n FParameter(**kwargs)\n assert excinfo.value.args[0] == \\\n '{} must be a str, not a {}'.format(attr, 3)", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def _check_validdtypeinput(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdtypeinputs:\n return True\n else:\n return False", "def check_character(char, name, parameters):\r\n if char in name:\r\n raise NameError('Invalid character in the variable name: ' + name)\r\n\r\n # Make sure people don't include # within the name of parameters\r\n for item in parameters.keys():\r\n if char in item:\r\n raise NameError('Invalid character in the variable parameters: ' + item)", "def _check_for_parameter_syntax(self,parameter):\n err_msg = \"Illegal parameter name {}.\".format(parameter)\n if len(parameter) == 0:\n raise ValueError(err_msg + \" Empty parameter name\")\n if parameter[0] != '$':\n raise ValueError(err_msg + \" Parameter must start with '$'\")\n if parameter != string.join(parameter.split()).translate(None,' '):\n raise ValueError(err_msg + \" Parameter can't contain whitepaces\")\n if ('$' in parameter and parameter[0] != '$') or (parameter.count('$') > 1):\n raise ValueError(\n err_msg + \" Wrong parameter specification in {}\".format(parameter)\n )", "def is_accepted_symbol(self, symbol: str) -> bool:\n return symbol in self.accepted_symbols", "def _check_params(self):\n\t\tstrange_param_helper = False\n\t\tfor param in self.params:\n\t\t\n\t\t\t# It could be that the param encapsulates several values (e.g., \"FLUX_RADIUS(10)\")\n\t\t\t# So we have to dissect this\n\t\t\tmatch = re.compile(\"(\\w*)\\(\\d*\\)\").match(param)\n\t\t\tif match:\n\t\t\t\tcleanparam = match.group(1)\n\t\t\telse:\n\t\t\t\tcleanparam = param\n\t\t\t\t\n\t\t\tif cleanparam not in self.fullparamlist:\n\t\t\t\tlogger.warning(\"Parameter '%s' seems strange and might be unknown to SExtractor\" \\\n % (param))\n\t\t\t\tstrange_param_helper = True\n\t\t\t\t\n\t\tif strange_param_helper:\n\t\t\tlogger.warning(\"Known parameters are: %s\" % (self.fullparamtxt))", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0", "def valid_param(name, param, min_length, max_length, regex):\n\n if not StringValidator.is_valid_type(param):\n logging.error(f\"{name} is an invalid type - expecting string\")\n return False\n\n string_validator = StringValidator(\n param, min_length=min_length, max_length=max_length, regex=regex\n )\n\n if not string_validator.is_valid_length():\n logging.error(f\"{name} is invalid length {param}\")\n return False\n\n if not string_validator.valid_chars_only():\n logging.error(f\"{param} the param for {name} contains invalid characters.\")\n return False\n\n return True", "def _check_parameter(self, h, i, j, v, integral=False,\n name=None, sym=None):\n if name is None:\n name = self.PARAMETER\n if sym is None:\n sym = self.SYMBOL\n return ASParameters._check_parameter(h, i, j, v, integral=integral,\n name=name, sym=sym)", "def _check_params(self):\n pass", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def check_symbols(self):\n # this method has a bug in that it never raises KeyError, it raises \n # ValueError instead.\n \n def is_valid(sym):\n # what symbols are valid? (, ), digits, atoms\n if sym in \"()\": return True\n #if sym.isdigit(): return True\n #if sym in _atomic_mass: return True\n if sym.isalnum(): return True\n return False\n\n for t in self._gettokens():\n if not is_valid(t): raise ValueError(\"bad symbol \" + t)\n if t.isalpha() and t not in _atomic_mass: raise KeyError(\"key error \" + t)\n return True", "def _validate_param(name, value):\n\n # First things first -- check that we have a legal parameter name.\n try:\n validator = _legal_params[name]\n except KeyError:\n raise ViewVCException(\"An illegal parameter name was provided.\", \"400 Bad Request\")\n\n # Is there a validator? Is it a regex or a function? Validate if\n # we can, returning without incident on valid input.\n if validator is None:\n return\n elif hasattr(validator, \"match\"):\n if validator.match(value):\n return\n else:\n if validator(value):\n return\n\n # If we get here, the input value isn't valid.\n raise ViewVCException(\n 'An illegal value was provided for the \"%s\" parameter.' % (name), \"400 Bad Request\"\n )", "def check_params(self):\n raise NotImplementedError", "def check_params(self, name, fs_in, fs_out, window):\n if not isinstance(name, str):\n raise TypeError('name must be a string, not %s' % name)\n if fs_in <= 0:\n raise ValueError('fs_in should not be less than 0.')\n if fs_out <= 0:\n raise ValueError('fs_out should not be less than 0.')\n if window <= 0:\n raise ValueError('window must be greater than than 0.')", "def is_valid_function(self, paras):\n if len(paras) != 0:\n return True\n return True", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def is_symbol(obj):\n return isinstance(obj, Symbol)", "def validate_entered_symbol(self, existing_symbols):\n print(\"Consultando símbolo\")\n if self.symbol in existing_symbols:\n print(\"Símbolo validado\")\n validated_symbol = self.symbol\n return validated_symbol\n else:\n print(\"Símbolo no validado\")\n return None", "def _check_param(in_params, req_param, opt_param=list()):\n for param in req_param:\n if param not in in_params:\n raise ValueError('{} parameter is required'.format(param))\n defined_param = set(req_param + opt_param)\n for param in in_params:\n if param not in defined_param:\n print(\n \"WARNING: received unexpected parameter {}\".format(param))", "def legal_name(name, is_param_name=False):\n if name.startswith('_'):\n return False\n\n if name in ('self',):\n return False\n\n if keyword.iskeyword(name):\n return False\n\n regex = r'^[a-zA-Z][a-zA-Z0-9_]*$' if is_param_name else (\n r'^[a-zA-Z][.\\w-]*$')\n return bool(re.match(regex, name))", "def test_validate_non_fparameter_raises(self):\n param = inspect.Parameter('x', POSITIONAL_ONLY)\n fsig = FSignature([param], __validate_parameters__=False)\n with pytest.raises(TypeError) as excinfo:\n fsig.validate()\n assert excinfo.value.args[0] == \\\n \"Received non-FParameter '{}'\".format(param)", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def _break_symbol_from_argument(self, part):\n symbol = part[0]\n argument = part[1:]\n if not symbol in self._symbols:\n raise ValueError(\n 'Command ({}) contains identifier ({}) with no usable symbol.'.format(\n self._string, part)\n )\n return symbol, argument", "def _is_valid_passed_arg(self, console: io.IO, step: str,\n value: Optional[str],\n validate: Callable[[str], None]) -> bool:\n pass" ]
[ "0.6862855", "0.6799891", "0.6653889", "0.65039194", "0.64729655", "0.64628637", "0.6461699", "0.64211375", "0.6384333", "0.6335811", "0.63166904", "0.6274853", "0.62189996", "0.62188965", "0.61966276", "0.6187289", "0.6160707", "0.6061646", "0.6057763", "0.60409635", "0.6040337", "0.60395294", "0.60317945", "0.6003388", "0.59722894", "0.5954577", "0.59460807", "0.5936169", "0.59354925", "0.59294695" ]
0.82193774
0
Check correctness of the symbol START
def _check_fixed_start(self): self.symbol = self.scanner.get_symbol() if self.symbol.type == self.scanner.KEYWORD and \ self.symbol.id == self.scanner.START_ID: pass elif self._is_eof(self.symbol): # In case file ends prematurely pass else: self._display_syntax_error("start")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_start(start, lines):\r\n if start.isalpha(): # start word must be alphabetic\r\n if len(start) > 1: # start word must be larger than 1 character\r\n if start in lines: # start word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Start word not in list of words....please reenter\"\r\n else:\r\n return \"Start word must contain more than one letter....please reenter\"\r\n else:\r\n return \"Start word must contain only letters....please reenter\"", "def test_match_start_check_at_beginning_of_string(self):\n first_letter = \"a\"\n s = \"abcdef\"\n self.assertEqual(__, re.search(first_letter, s).group())", "def _is_start(self, line):\n if re.match(\".*\\:\\s*\\(groupid\", line):\n return True", "def isStart(self):\n return _libsbml.XMLToken_isStart(self)", "def test_match_must_start_at_the_beginning(self):\n third_letter = \"c\"\n s = \"abcdef\"\n self.assertEqual(__, re.match(third_letter, s))", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def test_evaluate_starts_with_expression(self):\n value = self.evaluate_common(\"startswith('startswith','start')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True)\n value = self.evaluate_common(\"startswith('startswith','end')\")\n self.assertTrue(value.value is False)\n value = self.evaluate_common(\"startswith('startswith','Start')\")\n # not case insensitive\n self.assertTrue(value.value is False)\n try:\n value = self.evaluate_common(\"startswith('3.14',3)\")\n self.fail(\"integer as prefix\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"startswith('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def LOWER_START():\n return 7", "def need_context_computation(self):\n\n # We check that symbols preceding the completion is smthing like foo.bar.\n if re.match('.*\\..*', self.get_word_before()):\n return True\n\n\n return False", "def _start_magic(line):\n return start(line)", "def find_start(self): # -> str | None:\n ...", "def test_should_accept_alphabetic_opening_parenthesis(self):\n validator = StartsWithValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def start_symbol(self) -> Variable:\n return self._start_symbol", "def _check_for_start(self):\n while self._serial_bytes_available():\n data = self._serial_read(1)\n if len(data) == 1:\n self._check_for_start_bytes.append(data[0])\n try:\n if self._check_for_start_bytes[-1] == self.MAGIC_NUM_3 and \\\n self._check_for_start_bytes[-2] == self.MAGIC_NUM_2 and \\\n self._check_for_start_bytes[-3] == self.MAGIC_NUM_1 and \\\n self._check_for_start_bytes[-4] == self.MAGIC_NUM_0:\n\n if self.verbose:\n self.log(\"Start Detected!\")\n return True\n except IndexError:\n pass\n else:\n break\n\n # default, no start :(\n if self.verbose:\n self.log(\"Failed to detect start...\")\n return False", "def test_sequence_start_stop(self):\n self.t(\"1,2 start\")\n code, out, err = self.t(\"_get 1.start 2.start\")\n self.assertRegex(out, \"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2} \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\n\")\n\n self.t(\"1,2 stop\")\n code, out, err = self.t(\"_get 1.start 2.start\")\n self.assertEqual(\" \\n\", out) # Space separating the two blank values.", "def start_with_the_beggining(rna: str):\n return 0", "def _check_opt_starts_with(self, docstring: PetscDocStringImpl, item: tuple[SourceRange, DescribableItem, int], entity_name: str, char: str) -> None:\n loc, descr_item, _ = item\n pre = descr_item.prefix\n if pre != char:\n eloc = docstring.make_source_range(pre, descr_item.text, loc.start.line)\n mess = f'{entity_name} parameter list entry must start with \\'{char}\\''\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, self.diags.prefix, mess, eloc, patch=Patch(eloc, char)\n )\n return", "def startsWith(self, prefix: str) -> bool:\n curr_chars = self.chars\n for c in list(prefix):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return True", "def test_starts_letter(x):\n return x[0].isalpha()", "def startsWith(self, prefix):\n pointer = self.tries\n for i in range(len(prefix)):\n ascii = ord(prefix[i]) - ord('a')\n if pointer[ascii] == None:\n return False\n pointer = pointer[ascii]\n return True", "def test_snippet_beginning_nonletter(self):\n message = Message(clean_text=u\"!I already know what this will be!!!!!\")\n self.assertEqual(\n message.snippet,\n 'I already know what...'\n )", "def has_preamble(command): \n if command.endswith('ascii'): # or command.endswith('length') \n # or command.endswith('name'):\n return False\n else:\n return True\n # end if", "def is_chunk_start(prev_tag, tag):\n prefix1, chunk_type1 = split_tag(prev_tag)\n prefix2, chunk_type2 = split_tag(tag)\n\n if prefix2 == 'O':\n return False\n if prefix1 == 'O':\n return prefix2 != 'O'\n\n if chunk_type1 != chunk_type2:\n return True\n\n return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']", "def _check_start_end_acceptable(start: str, end: str) -> None:\n\n char_regex = regex.compile(\"[A-Z]+\")\n\n if not char_regex.fullmatch(start) or not char_regex.fullmatch(end):\n raise ValueError(\"start and end must be characters\")\n\n _check_end_after_start(start, end)", "def test_match_regexp_including_start():\r\n runmatch(lcode)", "def start_at(self) -> global___Statement.Declaration:", "def starts_with(s, prefix):\n if prefix == '':\n return True\n elif s[0] != prefix[0]:\n return False\n else: # s[0] == prefix[0]\n return starts_with(s[1:], prefix[1:])", "def startswith( self, prefix ):\n return len(self.commands) >= 1 and self.commands[0].startswith( prefix )", "def hasIdentifierBeginningWith(self, *args):\n return _libsbml.SBasePlugin_hasIdentifierBeginningWith(self, *args)", "def __init__(self):\n super(StringStart, self).__init__(r\"^\")" ]
[ "0.71219003", "0.6362785", "0.63464314", "0.63366246", "0.6255174", "0.6188901", "0.6157201", "0.6143468", "0.60610247", "0.6050532", "0.5992676", "0.59828264", "0.59446603", "0.59429526", "0.5934591", "0.59285474", "0.5921997", "0.5911821", "0.5889964", "0.5861589", "0.5842854", "0.5826502", "0.58203965", "0.5784945", "0.57779616", "0.57670283", "0.5731615", "0.57218623", "0.5719956", "0.5702979" ]
0.77844435
0
Check if symbol is a valid DType input
def _check_validdtypeinput(self, symbol): if symbol.type == self.scanner.KEYWORD and \ symbol.id in self.validdtypeinputs: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_validdtypeoutput(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdtypeoutputs:\n return True\n else:\n return False", "def is_symbol(obj):\n return isinstance(obj, Symbol)", "def isValidDataTypeName(name: unicode) -> bool:\n ...", "def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0", "def valid_dtype(expected, found):\n if expected not in ('bool', 'byte', 'text', 'number', 'int', 'float', 'any'):\n raise SystemError((\"** Error: invalid value (%s) in definition file \"\n \"for expected data type\") % expected)\n if expected == 'any':\n return True\n if found in ('str', 'unicode') or re.match( r'^\\|S\\d+$', found) or 'byte' in found:\n # print \"found dtype '%s', interpreting as string\" % dtype\n dtype = 'text'\n elif 'bool' in found:\n dtype = 'bool'\n elif 'int' in found or 'long' in found:\n dtype = 'int'\n elif 'float' in found or 'double' in found:\n dtype = 'float'\n else:\n raise ValueError((\"** Error: unable to recognize data type (%s) for validation.\"\n \"expecting compatable with '%s'\") % (found, expected))\n valid = (dtype == expected or (dtype in ('int', 'float', 'bool', ) and expected == 'number'))\n return valid", "def is_int(symbol):\n return isa(symbol, int)", "def _is_real(symbol):\n return isa(symbol, float) or is_int(symbol)", "def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True", "def is_pyxb_d1_type_name(pyxb_obj, expected_pyxb_type_name):\n try:\n return pyxb_get_type_name(pyxb_obj) == expected_pyxb_type_name\n except AttributeError:\n return False", "def _want_sym(sym):\n if sym is None or len(sym) < 2:\n return False\n if sym['name'] in extract_ignore_names:\n return False\n bad_types = ['t', 'b', 'r', 'd', 'w']\n return (sym['type'] not in bad_types\n and sym['name'] not in ['__bss_start', '_end', '_edata'])", "def _check_dtype(input_dtype):\n\n product_version = tbe_platform.cce_conf.get_soc_spec(\"SOC_VERSION\")\n if product_version in (\"Hi3796CV300ES\"):\n if input_dtype == \"float32\":\n raise RuntimeError(\"float32 is not support in ES\")\n util.check_dtype_rule(input_dtype, (\"float16\",))\n else:\n util.check_dtype_rule(input_dtype, (\"float16\", \"float32\",))", "def _type_check_double(self, data):\n if type(data) not in self._VALID_TYPES:\n return False\n return True", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False", "def test_datatype_detection():\n\n grammar = \"\"\"\n IsObjectDatatype: INT | STRING | ID;\n IsIntDatatype: INT;\n IsIdDatatype: ID;\n IsAlsoDatatype: SubDT1 | SubDT2;\n SubDT1: INT;\n SubDT2: STRING;\n \"\"\"\n\n mm = metamodel_from_str(grammar)\n\n IsObjectDatatype = mm['IsObjectDatatype']\n assert isinstance(IsObjectDatatype, ecore.EDataType)\n assert IsObjectDatatype.name == 'IsObjectDatatype'\n assert IsObjectDatatype.eType == object\n\n IsIntDatatype = mm['IsIntDatatype']\n assert isinstance(IsIntDatatype, ecore.EDataType)\n assert IsIntDatatype.name == 'IsIntDatatype'\n assert IsIntDatatype.eType == int\n\n IsIdDatatype = mm['IsIdDatatype']\n assert isinstance(IsIdDatatype, ecore.EDataType)\n assert IsIdDatatype.name == 'IsIdDatatype'\n assert IsIdDatatype.eType == str\n\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert isinstance(IsAlsoDatatype, ecore.EDataType)\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert IsAlsoDatatype.eType == object", "def is_number(symbol):\n return isa(symbol, complex) or is_rational(symbol)", "def is_simple_symbol(obj):\n return is_symbol(obj) and not is_wildcard(obj)", "def _check_validdevice(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdeviceids:\n\n return True\n else:\n return False", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def check_type(self, data_input, debug_flag):\n _type = []\n \n if self.zigzag(data_input, debug_flag): _type.append(str(\"ZIGZAG\"))\n if self.flat(data_input, debug_flag): _type.append(str(\"FLAT\")) \n if self.r_flat(data_input, debug_flag): _type.append(str(\"R_FLAT\")) \n if self.e_flat(data_input, debug_flag): _type.append(str(\"E_FLAT\")) \n #print('_type:',_type)\n\n if _type:\n return True, _type\n else:\n return False, _type", "def check_typedef_style(ctx, stmt):\n\n elemtype = stmt.search_one(\"type\")\n if elemtype is None:\n return\n\n # errors are appended to the context, such that we can just call the\n # base checks here.\n OCLintFunctions.check_enumeration_style(ctx, stmt)\n OCLintFunctions.check_bad_types(ctx, stmt)\n OCLintFunctions.check_posix_pattern_equal(ctx, stmt)", "def _is_period(self, symbol):\n if symbol.type == self.scanner.PERIOD:\n return True\n else:\n return False", "def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False", "def _check_validparam(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validparamids:\n return True\n else:\n return False", "def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes", "def _kind(d):\n return _SYMBOL_KIND_MAP.get(d.type)", "def is_und_symbol(self):\n return self.__und", "def valid_dtype_assertion(expected_dtypes, actual_dtype, name):\n\tassert (actual_dtype in expected_dtypes), \"Invalid dtype of {} should be {}\".format(name, str(expected_dtypes))" ]
[ "0.7286696", "0.66887015", "0.6577127", "0.6570027", "0.64245325", "0.6364817", "0.6271129", "0.6229207", "0.6214596", "0.61741644", "0.6168954", "0.6150034", "0.6127641", "0.607109", "0.60133564", "0.59820586", "0.5958963", "0.5928896", "0.59214616", "0.5874561", "0.5868875", "0.58006155", "0.5744916", "0.56916875", "0.5675044", "0.5669672", "0.56609094", "0.56421214", "0.56330085", "0.5610119" ]
0.7812703
0
Check if symbol is a valid DType output
def _check_validdtypeoutput(self, symbol): if symbol.type == self.scanner.KEYWORD and \ symbol.id in self.validdtypeoutputs: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_validdtypeinput(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdtypeinputs:\n return True\n else:\n return False", "def is_symbol(obj):\n return isinstance(obj, Symbol)", "def _want_sym(sym):\n if sym is None or len(sym) < 2:\n return False\n if sym['name'] in extract_ignore_names:\n return False\n bad_types = ['t', 'b', 'r', 'd', 'w']\n return (sym['type'] not in bad_types\n and sym['name'] not in ['__bss_start', '_end', '_edata'])", "def is_pyxb_d1_type_name(pyxb_obj, expected_pyxb_type_name):\n try:\n return pyxb_get_type_name(pyxb_obj) == expected_pyxb_type_name\n except AttributeError:\n return False", "def check_type(self, data_input, debug_flag):\n _type = []\n \n if self.zigzag(data_input, debug_flag): _type.append(str(\"ZIGZAG\"))\n if self.flat(data_input, debug_flag): _type.append(str(\"FLAT\")) \n if self.r_flat(data_input, debug_flag): _type.append(str(\"R_FLAT\")) \n if self.e_flat(data_input, debug_flag): _type.append(str(\"E_FLAT\")) \n #print('_type:',_type)\n\n if _type:\n return True, _type\n else:\n return False, _type", "def valid_dtype(expected, found):\n if expected not in ('bool', 'byte', 'text', 'number', 'int', 'float', 'any'):\n raise SystemError((\"** Error: invalid value (%s) in definition file \"\n \"for expected data type\") % expected)\n if expected == 'any':\n return True\n if found in ('str', 'unicode') or re.match( r'^\\|S\\d+$', found) or 'byte' in found:\n # print \"found dtype '%s', interpreting as string\" % dtype\n dtype = 'text'\n elif 'bool' in found:\n dtype = 'bool'\n elif 'int' in found or 'long' in found:\n dtype = 'int'\n elif 'float' in found or 'double' in found:\n dtype = 'float'\n else:\n raise ValueError((\"** Error: unable to recognize data type (%s) for validation.\"\n \"expecting compatable with '%s'\") % (found, expected))\n valid = (dtype == expected or (dtype in ('int', 'float', 'bool', ) and expected == 'number'))\n return valid", "def _valid_output_type(self, output_type):\n # pylint: disable=W0613, R0201\n return True", "def _is_real(symbol):\n return isa(symbol, float) or is_int(symbol)", "def _type_check_double(self, data):\n if type(data) not in self._VALID_TYPES:\n return False\n return True", "def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0", "def isValidDataTypeName(name: unicode) -> bool:\n ...", "def _kind(d):\n return _SYMBOL_KIND_MAP.get(d.type)", "def is_und_symbol(self):\n return self.__und", "def _is_input_or_output_type(io: type, type_str: Literal[\"Input\", \"Output\", \"Meta\"]):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def check_type(s: pd.Series, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be VectorSeries: there are non-representation cells (every cell should be a list of floats) in the given Series.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n def is_numeric(x):\n try:\n float(x)\n except ValueError:\n return False\n else:\n return True\n\n def is_list_of_numbers(cell):\n return isinstance(cell, (list, tuple)) and all(is_numeric(x) for x in cell)\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not is_list_of_numbers(first_non_nan_value):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def is_int(symbol):\n return isa(symbol, int)", "def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def _check_dtype(input_dtype):\n\n product_version = tbe_platform.cce_conf.get_soc_spec(\"SOC_VERSION\")\n if product_version in (\"Hi3796CV300ES\"):\n if input_dtype == \"float32\":\n raise RuntimeError(\"float32 is not support in ES\")\n util.check_dtype_rule(input_dtype, (\"float16\",))\n else:\n util.check_dtype_rule(input_dtype, (\"float16\", \"float32\",))", "def is_simple_symbol(obj):\n return is_symbol(obj) and not is_wildcard(obj)", "def test_datatype_detection():\n\n grammar = \"\"\"\n IsObjectDatatype: INT | STRING | ID;\n IsIntDatatype: INT;\n IsIdDatatype: ID;\n IsAlsoDatatype: SubDT1 | SubDT2;\n SubDT1: INT;\n SubDT2: STRING;\n \"\"\"\n\n mm = metamodel_from_str(grammar)\n\n IsObjectDatatype = mm['IsObjectDatatype']\n assert isinstance(IsObjectDatatype, ecore.EDataType)\n assert IsObjectDatatype.name == 'IsObjectDatatype'\n assert IsObjectDatatype.eType == object\n\n IsIntDatatype = mm['IsIntDatatype']\n assert isinstance(IsIntDatatype, ecore.EDataType)\n assert IsIntDatatype.name == 'IsIntDatatype'\n assert IsIntDatatype.eType == int\n\n IsIdDatatype = mm['IsIdDatatype']\n assert isinstance(IsIdDatatype, ecore.EDataType)\n assert IsIdDatatype.name == 'IsIdDatatype'\n assert IsIdDatatype.eType == str\n\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert isinstance(IsAlsoDatatype, ecore.EDataType)\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert IsAlsoDatatype.eType == object", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def _type_check_output(\n output_def: \"OutputDefinition\", output: Any, context: \"BoundOpExecutionContext\"\n) -> Any:\n from ..execution.plan.execute_step import do_type_check\n\n op_label = context.describe_op()\n\n if isinstance(output, (Output, DynamicOutput)):\n dagster_type = output_def.dagster_type\n type_check = do_type_check(context.for_type(dagster_type), dagster_type, output.value)\n if not type_check.success:\n raise DagsterTypeCheckDidNotPass(\n description=(\n f'Type check failed for {op_label} output \"{output.output_name}\" - '\n f'expected type \"{dagster_type.display_name}\". '\n f\"Description: {type_check.description}\"\n ),\n metadata_entries=type_check.metadata_entries,\n dagster_type=dagster_type,\n )\n\n context.observe_output(\n output_def.name, output.mapping_key if isinstance(output, DynamicOutput) else None\n )\n return output\n else:\n dagster_type = output_def.dagster_type\n type_check = do_type_check(context.for_type(dagster_type), dagster_type, output)\n if not type_check.success:\n raise DagsterTypeCheckDidNotPass(\n description=(\n f'Type check failed for {op_label} output \"{output_def.name}\" - '\n f'expected type \"{dagster_type.display_name}\". '\n f\"Description: {type_check.description}\"\n ),\n metadata_entries=type_check.metadata_entries,\n dagster_type=dagster_type,\n )\n return output", "def _type_check_output(\n output_def: \"OutputDefinition\", output: T, context: \"BoundOpExecutionContext\"\n) -> T:\n from ..execution.plan.execute_step import do_type_check\n\n op_label = context.describe_op()\n\n if isinstance(output, (Output, DynamicOutput)):\n dagster_type = output_def.dagster_type\n type_check = do_type_check(context.for_type(dagster_type), dagster_type, output.value)\n if not type_check.success:\n raise DagsterTypeCheckDidNotPass(\n description=(\n f'Type check failed for {op_label} output \"{output.output_name}\" - '\n f'expected type \"{dagster_type.display_name}\". '\n f\"Description: {type_check.description}\"\n ),\n metadata=type_check.metadata,\n dagster_type=dagster_type,\n )\n\n context.observe_output(\n output_def.name, output.mapping_key if isinstance(output, DynamicOutput) else None\n )\n return output\n else:\n dagster_type = output_def.dagster_type\n type_check = do_type_check(context.for_type(dagster_type), dagster_type, output)\n if not type_check.success:\n raise DagsterTypeCheckDidNotPass(\n description=(\n f'Type check failed for {op_label} output \"{output_def.name}\" - '\n f'expected type \"{dagster_type.display_name}\". '\n f\"Description: {type_check.description}\"\n ),\n metadata=type_check.metadata,\n dagster_type=dagster_type,\n )\n return output", "def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes", "def inspect_dtype(series: pd.Series) -> str:\n\n mapping = {pd_types.is_bool_dtype: \"bool\",\n pd_types.is_integer_dtype: \"int\",\n pd_types.is_float_dtype: \"float\",\n pd_types.is_datetime64_any_dtype: \"datetime\"}\n\n for check, result in mapping.items():\n if check(series):\n return result\n\n raise TypeError(\"Type is not understand for column '{}'. Allowed \"\n \"types are bool, int, float, str and datetime.\"\n .format(series.name))", "def explore_type(name, datatype, is_child):\n if datatype.code == gdb.TYPE_CODE_ENUM:\n if is_child:\n print (\"%s is of an enumerated type '%s'.\" %\n (name, str(datatype)))\n else:\n print (\"'%s' is an enumerated type.\" % name)\n else:\n if is_child:\n print (\"%s is of a scalar type '%s'.\" %\n (name, str(datatype)))\n else:\n print (\"'%s' is a scalar type.\" % name)\n\n if is_child:\n Explorer.return_to_enclosing_type_prompt()\n Explorer.return_to_enclosing_type()\n\n return False" ]
[ "0.7144111", "0.62963384", "0.612206", "0.60689634", "0.6064693", "0.5982979", "0.5965407", "0.5920263", "0.5882332", "0.58719504", "0.58253753", "0.5759133", "0.57490194", "0.5722805", "0.57056916", "0.5703249", "0.5655178", "0.56458294", "0.5625231", "0.56242913", "0.56235576", "0.5580695", "0.55696446", "0.5542184", "0.5504451", "0.54849184", "0.54831946", "0.54722685", "0.5467157", "0.544266" ]
0.7976685
0
Checks for a semicolon
def _is_semicolon(self, symbol): if symbol.type == self.scanner.SEMICOLON: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_semicolon_else_skip(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n pass\n else:\n self._display_syntax_error(\"semicolon\")\n # Skip to semicolon at end of line\n self._semicolon_skipper()", "def _check_semicolon(line_index, input_line):\n global _total_lines_of_code\n if input_line.endswith(';'):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def _semicolon_skipper(self):\n while (\n not self._is_semicolon(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self.symbol = self.scanner.get_symbol()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def check_for_extra_semicolon(sql_str):\r\n try:\r\n if len(sql_str.split(';')) > 2:\r\n raise sqlErr(\"Extra Semi-Colon Detected!\")\r\n except Exception as e:\r\n raise e", "def validate_semicolon(s):\n positions = identify_create_table_view(s)\n validation = {\n \"exit_code\": 0,\n \"total_lines\": count_lines(s)\n }\n if len(positions) > 1:\n validation[\"exit_code\"] = 1\n validation[\"val_lines\"] = positions\n return validation", "def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'", "def checkForNewLineAndSemiColon(string):\r\n\t\tnew_string = \"\"\r\n\t\tfor i in string:\r\n\t\t\tif i != \"\\n\" and i != \";\":\r\n\t\t\t\tnew_string += i\r\n\t\treturn new_string", "def unnecessary_colon(self):\n if self.line.endswith(':'):\n return True", "def issafe(arg):\n return arg.find(';') == -1 and arg.find('|') == -1", "def separate_semicolon(s):\n return s.split(';')", "def test_comma_bef_semicolon(self):\n nt = NewickTokenizer(newick='(a,(b,c),(d,e)),;')\n self.assertRaises(ValueError, nt.tokens)", "def require_separator(self):\n return False", "def count_semi_colons(txt):\n \n count = 0\n for c in txt:\n if c == ';':\n count += 1\n return count", "def condense_semicolons(css):\n log.debug(\"Condensing all unnecessary multiple adjacent semicolons.\")\n return re.sub(r\";;+\", \";\", css)", "def dash_semicolon(dataset, idx):\n bert_ints = dataset.bert_tokens[idx].tolist()\n bert_tokens = dataset.bert_tokenizer.convert_ids_to_tokens(bert_ints)\n return \"-\" in bert_tokens or \";\" in bert_tokens", "def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True", "def isseparator(token):\n\n # Token is a comma\n return token == \",\"", "def test_missing_delim(self):", "def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False", "def _is_comma(self, symbol):\n if symbol.type == self.scanner.COMMA:\n return True\n else:\n return False", "def _check_fixed_others(self, symbol_id):\n\n # Get the next symbol\n self.symbol = self.scanner.get_symbol()\n if self.symbol.type == self.scanner.KEYWORD and \\\n self.symbol.id == symbol_id:\n self.symbol = self.scanner.get_symbol()\n self._check_semicolon_else_skip(self.symbol)\n elif self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n else:\n # Error in symbol\n self._display_syntax_error(symbol_id)\n # Skip to semicolon at end of line\n self._semicolon_skipper()", "def _read_colon(self):\n if self._peek_char() == ':':\n self._skip_and_peek_char()\n return TOKEN_DOUBLECOLON\n return TOKEN_COLON", "def semicolon_to_newline(text):\n # Convert semicolons to newlines.\n # Return converted text.\n return '\\n'.join(x for x in text.split(';'))", "def __is_quote(cls, char):\n return char in (\"'\", '\"')", "def newline_to_semicolon(text):\n # Convert newlines to semicolons.\n # Return converted text.\n return ';'.join(x for x in text.split('\\n'))", "def finalfrase(self, word):\n if word == '.' or word == ';' or word == ',' or word == '?' or word == '!':\n return True\n return False", "def test_get_separator_semi():\n # GIVEN a line with commas as delimiter\n line = \"one;two;three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert None is returned\n assert sep is None", "def test_escape_semicolon(self):\n card = Card(\n \"mocking\",\n \"ˈmɑkɪŋ\",\n \"Making fun of someone or something in a cruel way; derisive.\",\n \"The ruthless scientist changed from mocking to sad.\",\n )\n\n expected = (\n \"The ruthless scientist changed from <b>mocking</b> to sad.; \"\n '\"<b>mocking /ˈmɑkɪŋ/</b><br> Making fun of someone or '\n 'something in a cruel way; derisive.\"'\n )\n\n self.assertEqual(card.to_anki_txt_format(), expected)", "def check(self): # full program\n r = re.compile('(?!(^(((?!;)[A-Z][+-]?\\d+(\\.\\d+)?\\s?)*(\\s*;\\s.*)?)$))')\n for line in self.blocks:\n if r.match(line) and line and line != '\\r' and line != '\\n':\n return False\n return True", "def isfixline(number):\n if number[0] == '(':\n return True\n return False" ]
[ "0.77693564", "0.7576656", "0.73017335", "0.7250093", "0.67261046", "0.63885087", "0.63797534", "0.635322", "0.6284039", "0.615116", "0.6051766", "0.6046793", "0.60242265", "0.58955777", "0.5878949", "0.585406", "0.58402747", "0.57678705", "0.5749202", "0.5675741", "0.5645079", "0.56220293", "0.5590282", "0.55257285", "0.5520318", "0.55011344", "0.5495702", "0.54689986", "0.54195327", "0.53910446" ]
0.7972013
0
Check for a semicolon, otherwise skip to end of line
def _check_semicolon_else_skip(self, symbol): if symbol.type == self.scanner.SEMICOLON: pass else: self._display_syntax_error("semicolon") # Skip to semicolon at end of line self._semicolon_skipper()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _semicolon_skipper(self):\n while (\n not self._is_semicolon(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self.symbol = self.scanner.get_symbol()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def _check_semicolon(line_index, input_line):\n global _total_lines_of_code\n if input_line.endswith(';'):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'", "def _is_semicolon(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n return True\n else:\n return False", "def checkForNewLineAndSemiColon(string):\r\n\t\tnew_string = \"\"\r\n\t\tfor i in string:\r\n\t\t\tif i != \"\\n\" and i != \";\":\r\n\t\t\t\tnew_string += i\r\n\t\treturn new_string", "def unnecessary_colon(self):\n if self.line.endswith(':'):\n return True", "def is_eof(line):\n return line == \"\"", "def check_for_extra_semicolon(sql_str):\r\n try:\r\n if len(sql_str.split(';')) > 2:\r\n raise sqlErr(\"Extra Semi-Colon Detected!\")\r\n except Exception as e:\r\n raise e", "def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def skipComment(self):\r\n\t\tch = self.nextChar()\r\n\t\twhile ch and ch != \"\\n\":\r\n\t\t\tch = self.nextChar()", "def validate_semicolon(s):\n positions = identify_create_table_view(s)\n validation = {\n \"exit_code\": 0,\n \"total_lines\": count_lines(s)\n }\n if len(positions) > 1:\n validation[\"exit_code\"] = 1\n validation[\"val_lines\"] = positions\n return validation", "def eol(self):\n return self.pos == len(self.tokens)", "def do_eof(self, line):\n print \"\"\n return True", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def do_EOF(self, line):\n print(\"\")\n return True", "def _check_fixed_others(self, symbol_id):\n\n # Get the next symbol\n self.symbol = self.scanner.get_symbol()\n if self.symbol.type == self.scanner.KEYWORD and \\\n self.symbol.id == symbol_id:\n self.symbol = self.scanner.get_symbol()\n self._check_semicolon_else_skip(self.symbol)\n elif self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n else:\n # Error in symbol\n self._display_syntax_error(symbol_id)\n # Skip to semicolon at end of line\n self._semicolon_skipper()", "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def statements(self):\n\n while self.token.value not in ('EOF', 'else', 'end'):\n\n with self.resync('\\n', consume=True):\n self.statement()\n\n if not self.match(Tokens.SYMBOL, \";\"):\n self.error(\"expected ';' after statement \", token=self.prev_token, after_token=True)\n\n # consume the 'end' token if there is one\n self.match(Tokens.KEYWORD, 'end')", "def require_separator(self):\n return False", "def consume_endmarker(self) -> None:\n line = self.fetch(1, allow_endmarker=True)\n if self.pattern.match(line):\n self.step(1)", "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()", "def do_EOF(self, line):\n print()\n return True", "def have_end_quote(quote, line):\n\tclose_quote_match = close_quote_re.search(line)\n\treturn close_quote_match is not None and \\\n\t\tclose_quote_match.group(1) == quote", "def semicolon_to_newline(text):\n # Convert semicolons to newlines.\n # Return converted text.\n return '\\n'.join(x for x in text.split(';'))", "def test_missing_delim(self):", "def condense_semicolons(css):\n log.debug(\"Condensing all unnecessary multiple adjacent semicolons.\")\n return re.sub(r\";;+\", \";\", css)", "def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True" ]
[ "0.78582406", "0.76281416", "0.73624676", "0.7034011", "0.6740908", "0.65985113", "0.6566954", "0.649407", "0.62372047", "0.60137314", "0.60137314", "0.60137314", "0.5960441", "0.5945319", "0.58354354", "0.5831994", "0.58205914", "0.5817536", "0.5817003", "0.5816331", "0.5751225", "0.57369584", "0.57125634", "0.566617", "0.5665698", "0.56542385", "0.5649318", "0.56447923", "0.5610962", "0.5573762" ]
0.7773585
1
Checks for a comma
def _is_comma(self, symbol): if symbol.type == self.scanner.COMMA: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_comma(text):\n err = \"style-guide.serial-comma\"\n msg = \"Use serial comma after penultimate item.\"\n regex = \"\\,\\s[a-zA-Z0-9]*\\sand\\s\"\n\n return existence_check(text, [regex], err, msg, require_padding=False)", "def contains_comma(self, *args):\n return _ida_hexrays.cexpr_t_contains_comma(self, *args)", "def comma_detector(self) -> bool:\n curr_pos = self.fileobject.tell()\n line = self.nextline()\n comma = False\n # A bold presumption, perhaps\n if ',' in line:\n comma = True\n self.fileobject.seek(curr_pos)\n return comma", "def check_path_for_comma(path: str) -> None:\n if \",\" in path:\n raise Exception(f\"There should not be ',' in the path {path}\")", "def isseparator(token):\n\n # Token is a comma\n return token == \",\"", "def handle_commas(x):\n if type(x) != str:\n print (\"Please enter a string\")\n return False\n else:\n x = float(x.replace(\",\",\"\"))\n return x", "def valid_value(self, value):\n for val in value.split(','):\n valid = super(MultiSelectField, self).valid_value(val)\n if not valid:\n return False\n return True", "def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False", "def comma_filter(value):\n return '{:,}'.format(value)", "def contains_comma_or_insn_or_label(self, *args):\n return _ida_hexrays.cexpr_t_contains_comma_or_insn_or_label(self, *args)", "def uniqueCheckString(aList):\r\n check = ','\r\n for v in aList:\r\n if f',{v},' in check:\r\n return True\r\n check = f'{check}{v},'\r\n return False", "def comma_code(items):\n item_len = len(items)\n \n if item_len == 0:\n return ''\n elif item_len == 1:\n return items[0]\n\n return ', '.join(items[:-1]) + ', and ' + items[-1]", "def separate_comma(s):\n return s.split(',')", "def test_sans_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c)(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def is_number(value):\n try:\n float(value.replace(',', ''))\n except ValueError:\n return False\n return True", "def comma_separated_validator(**kwargs):\n for name, param in kwargs.items():\n if param is not None:\n try:\n param.split(',')\n except AttributeError:\n raise PyYouTubeException(ErrorMessage(\n status_code=ErrorCode.INVALID_PARAMS,\n message=f'Parameter {name} must be str or comma-separated list str'\n ))", "def test_unexpected_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c),,(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def clean_name(name):\n return \",\".join(name.split(\",\")[:1])", "def clean_name(name):\n return \",\".join(name.split(\",\")[:1])", "def count_commas(txt):\n \n count = 0\n for c in txt:\n if c == ',':\n count += 1\n return count", "def comma(block):\n if ',' in block:\n block = '\"' + block + '\"'\n return block", "def check_for_value(tokens):\n ntoks = 0\n for tok in tokens:\n if tok.isspace() or tok == ',':\n continue\n elif tok in ('=', '/', '$', '&'):\n break\n else:\n ntoks += 1\n\n # If ntoks reaches 2, then there must be at least one value.\n if ntoks > 1:\n break\n\n return ntoks > 0", "def split_by_comma(s):\n return s.strip().split(\",\")", "def intcomma(value):\r\n try:\r\n if isinstance(value, compat.string_types):\r\n float(value.replace(',', ''))\r\n else:\r\n float(value)\r\n except (TypeError, ValueError):\r\n return value\r\n orig = str(value)\r\n new = re.sub(\"^(-?\\d+)(\\d{3})\", '\\g<1>,\\g<2>', orig)\r\n if orig == new:\r\n return new\r\n else:\r\n return intcomma(new)", "def _handle_separator(sep):\n if sep is None or sep == \"\":\n return \",\"\n else:\n return str(sep)", "def test_get_separator_csv():\n # GIVEN a line with commas as delimiter\n line = \"one,two,three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert comma is returned\n assert sep == \",\"", "def comma_for_space(x):\n x = strip_stoich_wrapper(x)\n x = x.replace(\" \", \",\")\n return x", "def __comma_case(self, content: list, line: str) -> bool:\n if line[12].isdigit():\n final_line = line[13:].strip()\n if 'Opening Remarks' in final_line:\n self.__double_add(content, final_line, 15)\n else:\n content.append(final_line)\n return True\n return False", "def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")", "def valid_format(s):\n if len(s) > 7:\n return False\n elif '0' in s and len(s) == 1:\n return True\n elif s[0] == '0' and len(s) > 1:\n return False\n elif introcs.isalpha(s):\n return False\n elif (len(s) > 3) and (introcs.count_str(s, ',') == 0):\n return False\n elif introcs.count_str(s, ',') == 0:\n return introcs.isdecimal(s)\n elif introcs.count_str(s, ',') > 1:\n return False\n elif ',' in s and introcs.count_str(s,',') == 1:\n comma_check = s[introcs.find_str(s, ',')+1:]\n before_comma_check = s[:introcs.find_str(s, ',')]\n introcs.isdecimal(before_comma_check)\n return (True if len(comma_check) == 3 else False) and introcs.isdecimal(before_comma_check)" ]
[ "0.78176993", "0.7652777", "0.75575286", "0.739962", "0.6958344", "0.68487954", "0.67709404", "0.6737014", "0.654276", "0.6426066", "0.6025317", "0.60239697", "0.590207", "0.5873435", "0.5853992", "0.58382696", "0.5835906", "0.579905", "0.579905", "0.5768135", "0.5726377", "0.5719298", "0.5666592", "0.56149256", "0.56139493", "0.56096756", "0.55499035", "0.55434257", "0.55158836", "0.550232" ]
0.78183836
0
Checks if symbol is =
def _is_equal(self, symbol): if symbol.type == self.scanner.EQUALS: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_equality(s):\n return s == \"=\"", "def have_equal_symbol(l):\r\n if \"=\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def __eq__(self, other):\n if isinstance(other, Symbol):\n return self.arg == other.arg\n else:\n return False", "def is_prop_symbol(s):\n return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'", "def true(symbol):\n return True", "def dummy_eq(self, other, symbol=None):\n s = self.as_dummy()\n o = _sympify(other)\n o = o.as_dummy()\n\n dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]\n\n if len(dummy_symbols) == 1:\n dummy = dummy_symbols.pop()\n else:\n return s == o\n\n if symbol is None:\n symbols = o.free_symbols\n\n if len(symbols) == 1:\n symbol = symbols.pop()\n else:\n return s == o\n\n tmp = dummy.__class__()\n\n return s.xreplace({dummy: tmp}) == o.xreplace({symbol: tmp})", "def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def isSetSymbol(self):\n return _libsbml.InitialAssignment_isSetSymbol(self)", "def _is_equal_to_atom(self, atom):\n\n return False", "def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None", "def is_operator(self, symbol: str) -> bool:\n return symbol in self.operators", "def _is_equal_to_atom(self, atom):\n\n return (self.type == atom.type and self.shape == atom.shape\n and self.itemsize == atom.itemsize\n and np.all(self.dflt == atom.dflt))", "def is_other_symbol(self, symbol: str) -> bool:\n return symbol in self.other_symbols", "def has_symbol(self, sym):\n return self.symbols.has_key(sym)", "def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)", "def is_symbol(obj):\n return isinstance(obj, Symbol)", "def __contains__(self, symbol):\n return str(symbol) in self.__alphabet", "def is_var (string):\n if (string[0] == '?'):\n return True\n return False", "def check_symbol(s,next_index,symbol):\n try:\n next_index = jump_over_space(s,next_index)\n if s[next_index:next_index + len(symbol)] == symbol:\n return next_index + len(symbol) # We must ignore the symbol\n except IndexError:\n return False\n else:\n return False", "def _check_validparam(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validparamids:\n return True\n else:\n return False", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def __eq__(self, other):\n if other is self:\n return True\n if isinstance(other, CoordFunctionSymb):\n if other.parent() != self.parent():\n return False\n else:\n return bool(other._express == self._express)\n else:\n return bool(self._express == other)", "def __eq__(self,other):\n if type(other)==type(\"\"):\n return self.word == other\n return self.word == other.word", "def _check_paramindevice(self):\n if self._check_validparam(self.symbol):\n param = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '=' is used\n if self._is_equal(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if value is valid\n if self._is_number(self.symbol):\n value = self.symbol\n self.symbol = self.scanner.get_symbol()\n return param, value\n else:\n # The parameter value is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '='\n self._display_syntax_error(\"equal\")\n self._semicolon_skipper()\n return None, None\n else:\n # The parameter type is not valid\n self._display_syntax_error(\"parameter\")\n self._semicolon_skipper()\n return None, None", "def _is_equal_to_enumatom(self, enumatom):\n\n return (self.enum == enumatom.enum and self.shape == enumatom.shape\n and np.all(self.dflt == enumatom.dflt)\n and self.base == enumatom.base)", "def is_accepted_symbol(self, symbol: str) -> bool:\n return symbol in self.accepted_symbols", "def is_comparison_op(self):\r\n return self.value in [\"=\", \"!=\", \"<\", \"<=\", \">\", \">=\"]", "def isSympy(val):\n properties = dir(val)\n return (\"is_symbol\" in properties) or (\"evalf\" in properties)" ]
[ "0.7069563", "0.7053939", "0.6365846", "0.6041129", "0.6032814", "0.6008303", "0.5979109", "0.5968912", "0.59604686", "0.5947024", "0.5919421", "0.5879708", "0.5776344", "0.56683415", "0.5635651", "0.5520605", "0.5435992", "0.5415546", "0.54125285", "0.5401831", "0.5359267", "0.53586733", "0.53525686", "0.5341978", "0.5324488", "0.532332", "0.53186744", "0.52939206", "0.52861446", "0.52684784" ]
0.7887802
0
Checks if symbol is a number
def _is_number(self, symbol): if symbol.type == self.scanner.NUMBER: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_numeral(self, symbol: str) -> bool:\n return symbol in self.numerals", "def is_number(symbol):\n return isa(symbol, complex) or is_rational(symbol)", "def is_int(symbol):\n return isa(symbol, int)", "def _is_num(w):\n symbols = list(w)\n for s in symbols:\n if s in string.digits:\n return '<NUM>'\n return w", "def isnum(self, x):\n\n return x in '1234567890.-'", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(G):\n return True", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isCSymbolNumber(self, *args):\n return _libsbml.ASTBasePlugin_isCSymbolNumber(self, *args)", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def is_number(c):\n return '0' <= c <= '9'", "def is_num_char(x):\n return ord('0') <= ord(x) <= ord('9')", "def isnumeric(self):\n return isnumeric(self)", "def is_number(self) -> bool:\n return False", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def is_numeric(self) -> bool:\n return False", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_number(self,s):\n try:\n float(s.replace(\" \", \"\"))\n return True\n except ValueError:\n return False", "def is_numberish(G):\n return True" ]
[ "0.7981778", "0.796459", "0.7456952", "0.71271473", "0.7109606", "0.7074188", "0.7070969", "0.70643014", "0.7049879", "0.70249534", "0.7018031", "0.701756", "0.701756", "0.701756", "0.7005208", "0.69720167", "0.6957312", "0.69454503", "0.69375134", "0.6913471", "0.6888762", "0.6881911", "0.6866895", "0.68619585", "0.68616503", "0.6836739", "0.6834825", "0.682347", "0.6806474", "0.679946" ]
0.87825435
0
Checks if symbol is END
def _is_end(self, symbol): if symbol.id == self.scanner.END_ID: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_eof(self, symbol):\n if symbol.type == self.scanner.EOF:\n return True\n else:\n return False", "def is_eof(eof):\n return eof == Symbol('#!eof')", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def end_marker(data):\n if ord(data[-1]) == 10 and data[-2] == '}':\n return True", "def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)", "def end(self): # type: () -> bool\n return self._idx >= len(self._src) or self._current == \"\\0\"", "def is_chunk_end(prev_tag, tag):\n prefix1, chunk_type1 = split_tag(prev_tag)\n prefix2, chunk_type2 = split_tag(tag)\n\n if prefix1 == 'O':\n return False\n if prefix2 == 'O':\n return prefix1 != 'O'\n\n if chunk_type1 != chunk_type2:\n return True\n\n return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']", "def has_end(self):\n return bool(self._end)", "def is_eof(self) -> bool:\n ...", "def is_end_node():\n return False", "def end(event: EventType, widget: WidgetType) -> bool:\n return event.key == _locals.K_END", "def have_end_quote(quote, line):\n\tclose_quote_match = close_quote_re.search(line)\n\treturn close_quote_match is not None and \\\n\t\tclose_quote_match.group(1) == quote", "def is_end(self, activity) -> bool:\n return activity == self.activity_concept_name(TRACE_END)", "def end(self, e):\n if e is not None:\n if self.charMap[e[0]][e[1]].c in [\"1\", \"3\"]: # wall or start\n print(\"[Error] Invalid end position.\", file=sys.stderr)\n raise UserInputException\n try:\n self.charMap[e[0]][e[1]] = CharMapCell(4)\n except IndexError:\n print(\"[Error] Invalid end position.\", file=sys.stderr)\n raise UserInputException\n self.__end = e", "def end_of_chunk(prev_tag, tag, prev_type, type_):\n chunk_end = False\n\n if prev_tag == 'E': chunk_end = True\n if prev_tag == 'S': chunk_end = True\n\n if prev_tag == 'B' and tag == 'B': chunk_end = True\n if prev_tag == 'B' and tag == 'S': chunk_end = True\n if prev_tag == 'B' and tag == 'O': chunk_end = True\n if prev_tag == 'I' and tag == 'B': chunk_end = True\n if prev_tag == 'I' and tag == 'S': chunk_end = True\n if prev_tag == 'I' and tag == 'O': chunk_end = True\n\n if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:\n chunk_end = True\n\n return chunk_end", "def _is_end_of_label(self, prev_top: str, now_top: str, prev_type: str, now_type: str) -> bool:\n\n if prev_top in ['E', 'S']:\n return True\n elif prev_top == 'B' and now_top in ['B', 'O']:\n return True\n elif prev_top == 'I' and now_top in ['B', 'O', 'S']:\n return True\n elif prev_top != 'O' and prev_type != now_type:\n return True\n return False", "def test_sv_end_svend():\n # Example:\n # 2 321682 . T <DEL> 6 PASS SVTYPE=DEL;END=321887;SVLEN=-205;CIPOS=-56,20;CIEND=-10,62 GT:GQ 0/1:12\n end = sv_end(pos=321682, alt=\"<DEL>\", svend=321887, svlen=-205)\n assert end == 321886", "def is_eof(line):\n return line == \"\"", "def test_end(self):\n return self._endTest('\\x05')", "def endOfChunk(prevTag, tag, prevType, type_):\r\n return ((prevTag == \"B\" and tag == \"B\") or\r\n (prevTag == \"B\" and tag == \"O\") or\r\n (prevTag == \"I\" and tag == \"B\") or\r\n (prevTag == \"I\" and tag == \"O\") or\r\n\r\n (prevTag == \"E\" and tag == \"E\") or\r\n (prevTag == \"E\" and tag == \"I\") or\r\n (prevTag == \"E\" and tag == \"O\") or\r\n (prevTag == \"I\" and tag == \"O\") or\r\n\r\n (prevTag != \"O\" and prevTag != \".\" and prevType != type_) or\r\n (prevTag == \"]\" or prevTag == \"[\"))\r\n # corrected 1998-12-22: these chunks are assumed to have length 1\r", "def get_end_brace(self):\n # Find the code to run\n\n brack_num, found_first = 0, False\n for iline, line in enumerate(self.file_ltxt[self.line_num:]):\n if '{' in line: brack_num += 1\n if '}' in line: brack_num -= 1\n\n if not found_first:\n if brack_num > 0: found_first = True\n else: continue\n\n if brack_num == 0: break\n\n else:\n self.print_error(\"Can't find the closing brace\")\n\n end_line = self.line_num + iline\n return end_line", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def handle_close_tag(self, token, lexer):\n prefix, tag=self._split_tagname(token.text[1:])\n if self.tagName==tag and self.prefix==prefix:\n self._token=t=lexer.next()\n if t.tokenType==t_END_TAG:\n return True\n else:\n self.handle_error(\"malformed tag\")\n # if we get here\n self.handle_error('close tag not expected in this context')", "def is_sentence_end(mystem_element):\n word = mystem_element.get('text', '')\n return word == '\\\\s' or word == '\\n'", "def check_end_block_height_of_term(self, context: 'IconScoreContext') -> bool:\n return self.term.end_block_height == context.block.height", "def isEndFor(self, *args):\n return _libsbml.XMLToken_isEndFor(self, *args)" ]
[ "0.7342705", "0.7149009", "0.70896596", "0.70774", "0.67542297", "0.6487307", "0.63351965", "0.6282967", "0.6278268", "0.6223454", "0.6220154", "0.6128539", "0.6115099", "0.6088857", "0.60728556", "0.60151976", "0.5978687", "0.5965214", "0.5928278", "0.5921364", "0.5912482", "0.590478", "0.59034353", "0.59034353", "0.59034353", "0.59034353", "0.5894372", "0.586769", "0.58575004", "0.58462507" ]
0.8258185
0
Checks if symbol is period
def _is_period(self, symbol): if symbol.type == self.scanner.PERIOD: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_dot(f):\n return f.startswith('.')", "def contains_only_digit_period(cell):\n # Check if empty\n if check_empty(cell):\n return True\n return not bool(re.match(\"^[\\d\\.]+$\", str(cell)))", "def test_assert_does_not_contain_period(self):\n\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a (.) character\")):\n api._assert_does_not_contain(\"There is a period in this string.\", \".\", \"quote\")", "def validPeriod(period):\r\n try:\r\n i = float(period)\r\n except ValueError:\r\n return False\r\n else:\r\n if i>0:\r\n return True\r\n else:\r\n return False", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def test_trailing_period(self):\r\n self.assertEqual(4.0, calc.evaluator({}, {}, '4.'))", "def test_period(self):\r\n with self.assertRaises(ParseException):\r\n calc.evaluator({}, {}, '.')\r\n with self.assertRaises(ParseException):\r\n calc.evaluator({}, {}, '1+.')", "def must_be_alphanumeric_space_period(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return not bool(re.match(r\"^[a-zA-Z .0-9]+$\", str(cell)))", "def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0", "def is_number_char(c: str) -> bool:\n return c.isdigit() or c == \".\"", "def _read_dot(self):\n if self._next_char() != '.':\n raise SyntaxError(self.build_error_string(\"Unknown token '.'\"))\n return TOKEN_INTERVAL", "def is_numeral(self, symbol: str) -> bool:\n return symbol in self.numerals", "def period(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[0]", "def count_periods(txt):\n count = 0\n for c in txt:\n if c == '.':\n count += 1\n return count", "def is_und_symbol(self):\n return self.__und", "def is_fraction(s: Any) -> bool:\n return bool(re.match(r\"^-?\\d+/\\d+$\", s))", "def have_mod_symbol(l):\r\n if \"%\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def check_pra_symbol(symbol):\n # Platts\n if len(symbol) == 7 and symbol[:2] in [\n 'PC', 'PA', 'AA', 'PU', 'F1', 'PH', 'PJ', 'PG', 'PO', 'PP', ]:\n return True\n\n # Argus\n if '.' in symbol:\n sm = symbol.split('.')[0]\n if len(sm) == 9 and sm.startswith('PA'):\n return True\n\n return False", "def is_prop_symbol(s):\n return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'", "def isvalid(str_input):\r\n if str_input == \"\":\r\n return True\r\n if str_input[0] not in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"-\", \".\"]:\r\n return False\r\n dot_count = 0\r\n for ch in str_input[1:]:\r\n if ch == \".\":\r\n dot_count += 1\r\n if dot_count == 2:\r\n return False\r\n if ch not in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \".\"]:\r\n return False\r\n return True", "def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n # print line[-1]\n return line + \" .\"", "def _isDecimalNumber(strWord):\n return NumberFormula.DECIMALNUMBERREGEX.match(strWord) != None", "def period(self) -> int:", "def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"", "def is_punctuation(ch):\n if (ch == '.'): return False\n if (ch >= '!' and ch <= '/'): return True\n if (ch >= ':' and ch <= '@'): return True\n if (ch >= '\\u2010' and ch <= '\\u2014'): return True # various dashes\n if (is_quote_mark(ch)): return True\n return False", "def check_extension(f):\n parts = f.split('.')\n last = parts[len(parts) - 1]\n return last in allowed_extensions", "def match1(p, text):\n if not text: return False\n return p == '.' or p == text[0]", "def period(self, value: int, /) -> None:", "def isNumber(st):\n\treturn st.replace('.','',1).isdigit()", "def parse_period(self, default=None): # TODO: this method is absolutly useless here...\n\t\tcfg_period = self.cfg_root.find('period')\n\t\tif cfg_period and cfg_period.text.isnumeric():\n\t\t\tcfg_period = int(cfg_period.text)\n\t\telse: # period not specified or not numerical value\n\t\t\tcfg_period = default\n\n\t\treturn cfg_period" ]
[ "0.67990226", "0.66136557", "0.6595603", "0.6524375", "0.65057904", "0.62380713", "0.6233876", "0.61981905", "0.6056237", "0.6032704", "0.5983063", "0.59301597", "0.5924762", "0.57949024", "0.57834405", "0.5684264", "0.5677624", "0.56559867", "0.5553411", "0.55044025", "0.54918545", "0.5485322", "0.54816276", "0.54769325", "0.54549664", "0.5421861", "0.54068285", "0.53944623", "0.53672606", "0.5363251" ]
0.84950036
0
Checks if symbol is arrow
def _is_arrow(self, symbol): if symbol.type == self.scanner.ARROW: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_arrow(arrow):\r\n if arrow == '->':\r\n return u'\\u2192'\r\n if arrow == '<->':\r\n return u'\\u2194'\r\n\r\n # this won't be reached unless we add more arrow types, but keep it to avoid explosions when\r\n # that happens.\r\n return arrow", "def arrow(m) -> str:\n return m.arrow", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def is_operator(self, symbol: str) -> bool:\n return symbol in self.operators", "def _is_equal(self, symbol):\n if symbol.type == self.scanner.EQUALS:\n return True\n else:\n return False", "def standalone_arrow(m) -> str:\n return m.standalone_arrow", "def is_other_symbol(self, symbol: str) -> bool:\n return symbol in self.other_symbols", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def have_anchor_symbol(l):\r\n if \"<\" in str(l) or \">\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def next_symbol(self):\r\n try:\r\n return self.rule.rightside[self.position]\r\n except IndexError:\r\n return None", "def check_symbol(s,next_index,symbol):\n try:\n next_index = jump_over_space(s,next_index)\n if s[next_index:next_index + len(symbol)] == symbol:\n return next_index + len(symbol) # We must ignore the symbol\n except IndexError:\n return False\n else:\n return False", "def isNavigationKey(event_string):\n\n navigationKeys = [ \"Left\", \"Right\", \"Up\", \"Down\" ]\n\n reply = (event_string in navigationKeys) or _orcaModifierPressed\n\n debug.println(debug.LEVEL_FINEST,\n \"orca.isNavigationKey: returning: %s\" % reply)\n return reply", "def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0", "def is_connective(char):\n return char in [u\"¬\", u\"∧\", u\"∨\", u\"→\", u\"↔\"]", "def arrows(m) -> str:\n return m.arrow_list", "def checkkey(self, k):\r\n if k == self.shortcut:\r\n return True\r\n return False", "def isLeFormula(formula):\r\n return '->' in formula", "def handle_arrowkeys(self, key):\n return key", "def _is_end(self, symbol):\n if symbol.id == self.scanner.END_ID:\n return True\n else:\n return False", "def arrow_direction(prev_val, current_val):\n if current_val > prev_val:\n return \"up-big\"\n if current_val < prev_val:\n return \"down-big\"\n return \"minus\"", "def _is_comma(self, symbol):\n if symbol.type == self.scanner.COMMA:\n return True\n else:\n return False", "def true(symbol):\n return True", "def is_und_symbol(self):\n return self.__und", "def __isDirection__(self, word):\n self.directions = ('north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back')\n for direction in self.directions:\n if direction == word:\n return ('direction', word), True\n return None, False", "def get_arrow_type(self) -> str:\n return ARROW_TYPES.inv[self.arrowType()]", "def hasShortcuts(self):\n return '.' in self.columnName()", "def is_prop_symbol(s):\n return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'", "def is_pressed(self) -> bool:", "def _is_semicolon(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n return True\n else:\n return False", "def should_lex(cls, char):\n return char == '{' or char == '}'" ]
[ "0.6579438", "0.62176615", "0.6158133", "0.61140186", "0.56991667", "0.56856215", "0.5675952", "0.56478024", "0.56177944", "0.56067", "0.5604297", "0.55857825", "0.5528179", "0.549328", "0.549197", "0.5484557", "0.54716647", "0.5453517", "0.54349774", "0.54213643", "0.5419489", "0.5397471", "0.5382525", "0.53502846", "0.5325364", "0.5324871", "0.5324486", "0.52891594", "0.52742416", "0.52660394" ]
0.8980897
0
Checks if symbol is EOF
def _is_eof(self, symbol): if symbol.type == self.scanner.EOF: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_eof(eof):\n return eof == Symbol('#!eof')", "def _is_end(self, symbol):\n if symbol.id == self.scanner.END_ID:\n return True\n else:\n return False", "def isEOF(self):\n return _libsbml.XMLToken_isEOF(self)", "def is_eof(self) -> bool:\n ...", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def at_eof(self) -> bool:\n ...", "def eof(self):\r\n\t\treturn self.index == len(self.data)", "def is_eof(line):\n return line == \"\"", "def do_EOF(self, arg):\n \treturn True", "def do_EOF(self, arg):\n return True", "def isEOF(self):\n return _libsbml.XMLInputStream_isEOF(self)", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def _check_fixed_start(self):\n self.symbol = self.scanner.get_symbol()\n if self.symbol.type == self.scanner.KEYWORD and \\\n self.symbol.id == self.scanner.START_ID:\n pass\n elif self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n else:\n self._display_syntax_error(\"start\")", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)", "def at_eof(self):\n return self._eof and not self._buffer", "def do_EOF(self, args):\n return True", "def do_EOF(self, args):\n return True", "def do_EOF(self, args):\n return True", "def do_EOF(self):\n return self.do_exit()", "def at_eof(self):\n return self.tell() == len(self)", "def handle_eof_in_block(self):\n self.handle_error(\"hit EOF, expected close tag\")", "def _check_fixed_others(self, symbol_id):\n\n # Get the next symbol\n self.symbol = self.scanner.get_symbol()\n if self.symbol.type == self.scanner.KEYWORD and \\\n self.symbol.id == symbol_id:\n self.symbol = self.scanner.get_symbol()\n self._check_semicolon_else_skip(self.symbol)\n elif self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n else:\n # Error in symbol\n self._display_syntax_error(symbol_id)\n # Skip to semicolon at end of line\n self._semicolon_skipper()", "def is_symbol(p):\n return len(p) == 1 and p.isalpha()", "def trata_EOF(self):\n pass", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')" ]
[ "0.81757635", "0.75523627", "0.72510964", "0.7247274", "0.72082675", "0.6746605", "0.6746605", "0.6746605", "0.6746605", "0.6720954", "0.6688368", "0.6590599", "0.6573672", "0.6370021", "0.62316614", "0.62316614", "0.62316614", "0.6191076", "0.61427915", "0.6137312", "0.61229044", "0.61229044", "0.61229044", "0.6102183", "0.6099963", "0.6088348", "0.608147", "0.6014212", "0.5962607", "0.5954708" ]
0.89874196
0
Checks the entire devices list until END DEVICE is reached
def _check_devicelist(self): self.symbol = self.scanner.get_symbol() # Repeatedly call _check_deviceline() until END DEVICE while ( not self._is_end( self.symbol)) and ( not self._is_eof( self.symbol)): self._check_deviceline() if self._is_eof(self.symbol): # In case file ends prematurely pass return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_devices(self):\n _LOGGER.debug(\"Scan_devices invoked.\")\n if self._update_info() == False:\n # self.hass.data[DOMAIN]['devices'] = None\n self.hass.data[DOMAIN]['scanning'] = False\n _LOGGER.warning(\"Can't update device list\")\n return []\n else:\n clients = [client.mac for client in self.last_results]\n self.hass.data[DOMAIN]['devices'] = clients\n self.hass.data[DOMAIN]['scanning'] = True\n return clients", "def poll_device(self):\n #self.logger.info(\"poll_device: Checking online status\")\n for tasmota_topic in self.tasmota_devices:\n if self.tasmota_devices[tasmota_topic].get('online', None) is not None:\n if self.tasmota_devices[tasmota_topic]['online_timeout'] < datetime.now():\n self.tasmota_devices[tasmota_topic]['online'] = False\n self.set_item_value(tasmota_topic, 'item_online', False, 'poll_device')\n self.logger.info(f\"poll_device: {tasmota_topic} is not online any more - online_timeout={self.tasmota_devices[tasmota_topic]['online_timeout']}, now={datetime.now()}\")", "def update_device_list(self):\n\n # Update devices via HTTP request (basic device data - no status)\n self.__http_update_device_list()\n\n # Fetch status for each known device via MQTT\n for gdev in self.__devices.values():\n gdev.request_status()", "def find_devices (devicelist):\n vprint(\"\\nFind known devices:\")\n for device in devicelist:\n if find_device(device) is not None :\n vprint(\"\\tFound :\", device)\n else:\n vprint(\"\\tNOT found:\", device )\n vprint(\"..........\") \n return", "def update(self):\n self._devices_list = []\n self.sendto(\"FIND%-8s\" % (self.search_password,))\n\n start = time.time()\n while start + self.timeout > time.time():\n rfds, _, _ = select.select([self.device_s], [], [], 0.5)\n\n for sock in rfds:\n data = self.recvfrom()\n if data[0:4] in (\"IMIN\", \"SETC\"):\n try:\n dev = WizSearch.DEVICE_TYPES[self.device_type](data[4:])\n # devices.append(self.extract_IMIN(data, wiztype))\n if not self.allowed_mac or dev.mac in self.allowed_mac:\n self._devices_list.append(dev)\n except:\n logger.exception(\"parsing error.\")\n\n if not self._devices_list:\n logger.error(\"Timeout, no devices found\")\n return self._devices_list", "def listDevices(self):\n count = 0\n for device in self:\n count += 1\n printLog(\"Device \" + str(count) + \": '%s %s (%s, %s, %s)'\" % (\n device.make, device.model, device.deviceId, device.androidVersion, device.operator))\n if device.idle:\n printLog(\"[Idle]\")\n else:\n printLog(\"[Busy]\")", "async def _async_has_devices(hass) -> bool:\n gree_discovery = Discovery(DISCOVERY_TIMEOUT)\n devices = await gree_discovery.scan(wait_for=DISCOVERY_TIMEOUT)\n return len(devices) > 0", "async def async_scan_devices(self):\n await self.async_update_info()\n return list(self.last_results.keys())", "def check_device_state(self):", "def refreshData(self):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"refreshData() method called.\")\n\n try:\n # Check to see if there have been any devices created.\n if indigo.devices.itervalues(filter=\"self\"):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Updating data...\")\n\n for dev in indigo.devices.itervalues(filter=\"self\"):\n self.refreshDataForDev(dev)\n\n else:\n indigo.server.log(u\"No Emby Client devices have been created.\")\n\n return True\n\n except Exception as error:\n self.errorLog(u\"Error refreshing devices. Please check settings.\")\n self.errorLog(unicode(error))\n return False", "def devicelist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.DEVICES_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.device()\n while (self.symbol.type == self.scanner.NAME):\n self.device()\n # Check right curly bracket ends device block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.CONNECT_ID):\n # Error Type: missing '}'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Bad name terminated devices incorrectly\n # Error type: Invalid name\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.DEVICE_NAME, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Left curly needed after 'DEVICE'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NO_CURLY_DEVICE, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: 'DEVICE' keyword required\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NEED_DEVICE_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])", "def test_get_devices(self):\n pass", "def test_get_devices(self):\n pass", "def check_devices():\n\n cur = g.db.execute('select node from slaves')\n NODES=[r[0] for r in cur.fetchall()]\n dev = {}\n for node in NODES:\n try:\n print \"Getting Device and status from \"+ node\n logger.info('Getting Devices and status from ' +node)\n url = Request(\"http://\"+ node +\":5000/api/all/status\")\n data = json.load(urllib2.urlopen(url))\n dev[node] = data\n except URLError, e:\n print e\n logger.error(e)\n dev[node] = e\n if not dev:\n dev[\"error\"] = \"error No devices found please add\"\n return dev\n else:\n return dev", "def test_verify_list_of_devices_in_my_network():", "def __build_device_list(self):\r\n if self.__lib.TLI_BuildDeviceList() != 0:\r\n raise FailedException(\"build the device list.\")", "def test_get_devices(self):\n print(\"Test Device List\")\n self.mock_api.return_value = call_json.DeviceList.device_list_response()\n self.manager.get_devices()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.get_devices, all_kwargs, None,\n self.write_api, self.overwrite)\n assert len(self.manager.bulbs) == call_json_bulbs.BULBS_NUM\n assert len(self.manager.outlets) == call_json_outlets.OUTLETS_NUM\n assert len(self.manager.fans) == call_json_fans.FANS_NUM\n assert len(self.manager.switches) == call_json_switches.SWITCHES_NUM", "def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None", "async def async_get_devices(self):\n if self.token is None:\n await self.async_initialize_token()\n\n self.devices.clear()\n raw = await self._async_ws_get_function(CMD_DEVICES)\n\n try:\n xml_root = element_tree.fromstring(raw)\n mac_adresses: List[str] = [mac.text for mac in xml_root.iter(\"MACAddr\")]\n hostnames: List[str] = [mac.text for mac in xml_root.iter(\"hostname\")]\n ip_addresses: List[str] = [mac.text for mac in xml_root.iter(\"IPv4Addr\")]\n interfaces: List[str] = [mac.text for mac in xml_root.iter(\"interface\")]\n speeds: List[str] = [mac.text for mac in xml_root.iter(\"speed\")]\n interface_ids: List[str] = [\n mac.text for mac in xml_root.iter(\"interfaceid\")\n ]\n methods: List[str] = [mac.text for mac in xml_root.iter(\"method\")]\n lease_times: List[str] = [mac.text for mac in xml_root.iter(\"leaseTime\")]\n\n for (\n mac_address,\n hostname,\n ip_address,\n interface,\n speed,\n interface_id,\n method,\n lease_time,\n ) in zip(\n mac_adresses,\n hostnames,\n ip_addresses,\n interfaces,\n speeds,\n interface_ids,\n methods,\n lease_times,\n ):\n self.devices.append(\n Device(\n mac_address,\n hostname,\n ip_address.partition(\"/\")[0],\n interface,\n speed,\n interface_id,\n method,\n lease_time,\n )\n )\n except (element_tree.ParseError, TypeError):\n _LOGGER.warning(\"Can't read device from %s\", self.host)\n self.token = None\n raise exceptions.ConnectBoxNoDataAvailable() from None", "def check_usb(self):\n print(\"This will read from the station console repeatedly to see if\")\n print(\"there are errors in the USB communications. Leave this running\")\n print(\"for an hour or two to see if any bad reads are encountered.\")\n print(\"Bad reads will be reported in the system log. A few bad reads\")\n print(\"per hour is usually acceptable.\")\n ptr = data_start\n total_count = 0\n bad_count = 0\n while True:\n if total_count % 1000 == 0:\n active = self.station.current_pos()\n while True:\n ptr += 0x20\n if ptr >= 0x10000:\n ptr = data_start\n if active < ptr - 0x10 or active >= ptr + 0x20:\n break\n result_1 = self.station._read_block(ptr, retry=False)\n result_2 = self.station._read_block(ptr, retry=False)\n if result_1 != result_2:\n log.info('read_block change %06x' % ptr)\n log.info(' %s' % str(result_1))\n log.info(' %s' % str(result_2))\n bad_count += 1\n total_count += 1\n print(\"\\rbad/total: %d/%d \" % (bad_count, total_count), end=' ')\n sys.stdout.flush()", "def test_gwservice_listdevices(self, setup_controller):\n resp = setup_controller.request(\"gw\", \"devices\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw list devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)", "async def _check_api(self) -> None:\n await self._api_request(\"devices\")", "def pop_adv_devices(self):\r\n if self.localSDK.devList:\r\n del self.localSDK.devList[:]\r\n try:\r\n self.localSDK.get_devices() # Get list of boards from KSDK manifest file\r\n except IOError:\r\n self.localSDK.devList = ['None']\r\n return", "def check_expected_devices():\n\n res = devices()\n error = extract_error_from(res)\n if error:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Check expected devices got error result: {}\".format(res))\n return\n\n expected_usb_devices = __opts__.get(\"expected_usb_devices\", [])\n vendors_products = [ \"{}:{}\".format(dev[\"vendor\"], dev[\"product\"]) for dev in res[\"values\"] ]\n\n for dev in expected_usb_devices:\n if dev not in vendors_products:\n vendor, product = dev.split(\":\")\n tag = \"system/usb/{}/{}/not_connected\".format(vendor, product)\n __salt__[\"minionutil.trigger_event\"](tag)", "async def get_device_list(self):\n self.logger.debug(\"Retrieving device list information.\")\n #url = 'https://{}/api/user/device'.format(self.apiHost) #suddenly stopped worrking, so use\n '''\n #full version\n url = 'https://{}/api/user/device?lang=en&apiKey={}&getTags=1&version={}&ts={}&nonce={}&appid={}&imei={}&os={}&model={}&romVersion={}&appVersion={}'.format(self.apiHost,\n self.apikey,\n self.timestamp,\n self._version,\n self._nonce,\n self._appid,\n self._imei,\n self._os,\n self._model,\n self._romVersion,\n self._appVersion)\n '''\n url = 'https://{}/api/user/device?version={}&appid={}'.format(self.apiHost, self._version, self._appid)\n headers = {\n 'Authorization': 'Bearer %s' % self.authenticationToken,\n }\n self.logger.debug('url: %s, headers: %s' % (url, headers))\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n json_response = await response.json()\n \n self.logger.debug('received response status: %s' % response.status) \n self.logger.debug('received response: %s' % self.pprint(json_response))\n if response.status != 200:\n self.logger.error('error: %s received' % response.status)\n return\n \n if json_response.get(\"devicelist\"):\n self.logger.info('New response format found')\n json_response = json_response[\"devicelist\"]\n \n self.logger.debug('number of device(s) is: %d' % len(json_response))\n \n self._devices = json_response #list of devices and current configurations\n \n self._create_client_devices()\n \n '''\n Example Response:\n [\n {\n \"__v\": 0,\n \"_id\": \"5becffa6d2b4a3c34cb79b38\",\n \"apikey\": \"530303a6-cf2c-4246-894c-xxxxxxxxxxx\",\n \"brandName\": \"AUTOSLIDE\",\n \"createdAt\": \"2018-11-15T05:09:58.341Z\",\n \"deviceStatus\": \"\",\n \"deviceUrl\": \"\",\n \"deviceid\": \"100050xxxxx\",\n \"devicekey\": \"4123ec79-d2c3-4d32-930a-xxxxxxxxxxxxx\",\n \"extra\": {\n \"_id\": \"xxxxxxxxxxxxxxxx\",\n \"extra\": {\n \"apmac\": \"xx:xx:xx:xx:xx:xx\",\n \"brandId\": \"5a6fcf00f620073c67efc280\",\n \"description\": \"20180813001\",\n \"mac\": \"xx:xx:xx0:xx:xx:xx\",\n \"manufacturer\": \"\\u9752\\u5c9b\\u6fb3\\u601d\\u5fb7\\u667a\\u80fd\\u95e8\\u63a7\\u7cfb\\u7edf\\u6709\\u9650\\u516c\\u53f8\",\n \"model\": \"PSA-BTA-GL\",\n \"modelInfo\": \"5af3f5332c8642b001540dac\",\n \"ui\": \"\\u63a8\\u62c9\\u5ba0\\u7269\\u95e8\",\n \"uiid\": 54\n }\n },\n \"group\": \"\",\n \"groups\": [],\n \"ip\": \"xxx.xx.xx.xxx\",\n \"location\": \"\",\n \"name\": \"Patio Door\",\n \"offlineTime\": \"2018-12-31T07:23:31.018Z\",\n \"online\": true,\n \"onlineTime\": \"2018-12-31T12:19:33.216Z\",\n \"params\": {\n \"a\": \"3\",\n \"b\": \"3\",\n \"c\": \"1\",\n \"d\": \"1\",\n \"e\": \"1\",\n \"f\": \"1\",\n \"fwVersion\": \"2.0.2\",\n \"g\": \"0\",\n \"h\": \"1\",\n \"i\": \"0\",\n \"j\": \"00\",\n \"k\": \"0\",\n \"l\": \"1\",\n \"m\": \"2\",\n \"n\": \"0\",\n \"rssi\": -53,\n \"staMac\": \"xx:xx:xx:xx:xx:xx\"\n },\n \"productModel\": \"WFA-1\",\n \"settings\": {\n \"alarmNotify\": 1,\n \"opsHistory\": 1,\n \"opsNotify\": 0\n },\n \"sharedTo\": [\n {\n \"note\": \"\",\n \"permit\": 15,\n \"phoneNumber\": \"[email protected]\",\n \"shareTime\": 1542259546087\n }\n ],\n \"showBrand\": true,\n \"type\": \"10\",\n \"uiid\": 54\n }\n ]\n \n or New format:\n {\n \"devicelist\": [\n {\n \"__v\": 0,\n \"_id\": \"5c3665d012d28ae6ba4943c8\",\n \"apikey\": \"530303a6-cf2c-4246-894c-50855b00e6d8\",\n \"brandLogoUrl\": \"https://us-ota.coolkit.cc/logo/KRZ54OifuGmjoEMxT1YYM3Ybu2fj5K2C.png\",\n \"brandName\": \"Sonoff\",\n \"createdAt\": \"2019-01-09T21:21:20.402Z\",\n \"devConfig\": {},\n \"devGroups\": [],\n \"deviceStatus\": \"\",\n ... as before\n '''", "def checkIsAlive(devices):\n while True:\n\n for device in devices:\n isAlive = checkIP(device[\"ip\"])\n\n if isAlive:\n msg = f'O dispositivo {device[\"name\"]} está na rede!'\n print(msg)\n playMessage(msg)\n else:\n print(f'O dispositivo {device[\"name\"]} está morto!')\n \n sleep(30*60)", "def InventoryDevices(self):\n self.logger.debug(\"Start Inventory...\")\n \n # Find our desired usb devices. These should be present in /dev somewhere.\n osDevices = os.listdir(\"/dev\")\n osDevices.sort()\n\n # Loop through all devices in /dev asking them what they are.\n for anOSDevice in osDevices:\n \n deviceName = \"/dev/\" + anOSDevice\n # We're making use of the unix command \"udevadm\". Read up on it!\n cmd = [\"udevadm\", \"info\", \"-q\", \"all\", \"-n\", deviceName]\n #print(cmd)\n pid=\"\"\n vid=\"\"\n uid=\"\"\n \n # Launch udevadm for the current device name.\n FNULL = open(os.devnull, 'w')\n proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=FNULL)\n while True:\n line = proc.stdout.readline()\n if len(line) != 0:\n #print(line.rstrip())\n # Parse out the pieces of the output lines looking for the relavent information.\n parts = re.split(\"[ ]\", line.__str__())\n #print(parts)\n if len(parts) > 1:\n kvParts = re.split(\"[=]\", parts[1].__str__())\n #print(kvParts)\n # We care about procuct id, vendor id and serial number.\n if (kvParts[0] == \"ID_VENDOR_ID\"):\n vid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_MODEL_ID\"):\n pid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_SERIAL\"):\n uid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_SERIAL_SHORT\"):\n uid = kvParts[1][:-1]\n else:\n break\n\n # We found a device with a Product ID and Vendor ID. Is it one were expecting?\n if len(pid) > 0 and len(vid) > 0:\n self.logger.info( \"Checking if device with ProductID: \" + pid + \" and VendorID: \" + vid + \" on \" + deviceName + \" is needed...\") \n foundItem = next((x for x in self.expectedDevices if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and \n x.pid == pid and\n x.vid == vid and\n x.uid == uid and\n x.inventoried == False), None)\n \n if foundItem is not None:\n if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:\n if anOSDevice.startswith( 'tty') == True:\n # Device is a Serial USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n else:\n #Device is a plain USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n \n FNULL.close()\n\n\n # At this point, we may still not have all the found devices. So we'll fall back to using \"lsub\" to look for devices.\n # The reason they are not found is that some devices do not add an entry to /dev. However, lsusb does not give a\n # serial number\n cmd = [\"lsusb\"]\n # print(cmd)\n pid = \"\"\n vid = \"\"\n uid = \"\"\n\n # Launch udevadm for the current device name.\n FNULL = open(os.devnull, 'w')\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=FNULL)\n while True:\n line = proc.stdout.readline()\n if len(line) != 0:\n # print(line.rstrip())\n # Parse out the pieces of the output lines looking for the relavent information.\n parts = re.split(\"[ ]\", line.__str__())\n # print(parts)\n if len(parts) > 1:\n kvParts = re.split(\"[:]\", parts[5].__str__())\n # print(kvParts)\n # We care about procuct id, vendor id.\n vid = kvParts[0]\n pid = kvParts[1]\n\n # We found a device with a Product ID and Vendor ID. Is it one were expecting?\n if len(pid) > 0 and len(vid) > 0:\n self.logger.info(\n \"Checking if device with ProductID: \" + pid + \" and VendorID: \" + vid + \" is needed...\")\n foundItem = next((x for x in self.expectedDevices if\n isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and\n x.pid == pid and\n x.vid == vid and\n x.uid == uid and\n x.inventoried == False), None)\n\n if foundItem is not None:\n if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:\n if anOSDevice.startswith('tty') == True:\n # Device is a Serial USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n else:\n # Device is a plain USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n\n\n else:\n break\n\n\n FNULL.close()\n\n # Here, we probe to see if any ethernet connected devices are up and listening for connections.\n while True:\n foundItem = next((x for x in self.expectedDevices if isinstance(x, (ethernet_device.EthernetDevice)) and \n x.inventoried == False and x.checked == False), None)\n if foundItem is not None:\n #socket.setdefaulttimeout(10.0)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(10.0)\n try:\n s.connect((foundItem.host, foundItem.port))\n foundItem.inventoried = True;\n except:\n foundItem.inventoried = False;\n # Okay to swallow!\n pass\n finally:\n s.close()\n foundItem.checked = True;\n else:\n break\n \n # Record what we found.\n self.logger.info(\"The following devices were inventoried:\")\n for x in self.expectedDevices:\n if x.inventoried == True:\n if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) == True:\n self.logger.info(x.name + \" Device Node: \" + x.devPath)\n else:\n self.logger.info(x.name)\n self.foundDevices.append(x)", "def scan():\n debug(\"CBA4.scan()\")\n num = MpOrLibUsb.get_device_count()\n devices = []\n i = 0\n while i < num:\n cba = CBA4(interface=MpOrLibUsb(i))\n i += 1\n sn = cba.get_serial_number()\n if sn:\n devices.append(sn)\n cba.close()\n #end loop\n return devices\n #end scan()", "def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices", "def test_get_devices1(self):\n pass" ]
[ "0.6879687", "0.660112", "0.65719104", "0.6563505", "0.6508826", "0.6344587", "0.62265825", "0.6206864", "0.6176136", "0.61714685", "0.61622983", "0.61597127", "0.61597127", "0.6158857", "0.61501837", "0.6118743", "0.6103006", "0.6098765", "0.60909706", "0.6088176", "0.60640395", "0.603752", "0.60330534", "0.6021458", "0.60148245", "0.600745", "0.5997087", "0.599504", "0.5977212", "0.5977058" ]
0.7390407
0
Checks the entire connections list until END CONNECTIONS is reached
def _check_connectionlist(self): self.symbol = self.scanner.get_symbol() # Repeatedly call _check_connectionline() until END CONNECTIONS while ( not self._is_end( self.symbol)) and ( not self._is_eof( self.symbol)): self._check_connectionline() if self._is_eof(self.symbol): # In case file ends prematurely pass return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def check_connection_status(self):\n while True:\n if not self.connected:\n self.log.error(\"Lost connection to spa, attempting reconnect.\")\n await self.connect()\n await asyncio.sleep(10)\n continue\n if (self.lastupd + 5 * self.sleep_time) < time.time():\n self.log.error(\"Spa stopped responding, requesting panel config.\")\n await self.send_panel_req(0, 1)\n await asyncio.sleep(self.sleep_time)", "def check_connection(self):\n connections = [self.google.check_connection(), self.dbx.check_connection(), self.box.check_connection()]\n\n if connections.count(True) == 3:\n logging.warning(' All connections OK. System can be used for reads and writes.')\n return []\n elif connections.count(True) == 2:\n logging.critical(\"\\nOnly two connections available. System only usable for reads\")\n down = [i for i in enumerate(connections) if i == False ]\n if 0 in down:\n pass\n #logging.critical(\"Cannot connect to Google.\")\n if 1 in down:\n pass\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 in down:\n pass\n ##logging.critical(\"Cannot connect to Box\")\n return down\n elif connections.count(True) < 2:\n logging.critical(\"Sufficient connections could not be made. System unsuitable for reads or writes.\")\n down = [i for i in enumerate(connections) if i[1] == False]\n for entry in down:\n if 0 == entry[0]:\n down[0] += ('Google',)\n #logging.critical(\"Cannot connect to Google.\")\n if 1 == entry[0]:\n down[1] += ('Dropbox',)\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 == entry[0]:\n down[2] += ('Box',)\n #logging.critical(\"Cannot connect to Box\")\n return down", "def clients_done(self):\r\n if not self.client_list:\r\n return False\r\n elif len(asyncore.socket_map) > 1:\r\n return False\r\n else:\r\n return True", "def remove_connections(self):\n while self.qsize() > 0:\n try:\n cnx = self.get(block=True, timeout=self.queue_timeout)\n except queue.Empty:\n pass\n else:\n try:\n cnx.close_connection()\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n self.remove_connection(cnx)", "def test_connection_is_established(self):\n for conn in self.connections:\n assert conn.is_connected is True", "def check(self):\n\t\tif self.deaduntil and self.deaduntil > time.time():\n\t\t\traise StopIteration(False)\n\n\t\ttry:\n\t\t\tyield self.connect()\n\t\texcept tcp.ConnectionException, exc:\n\t\t\tself.mark_dead(exc)\n\t\t\traise StopIteration(False)\n\n\t\traise StopIteration(True)", "def accept_connections(self):\n for c in self.all_connections:\n c.close()\n self.all_connections = []\n # self.all_addresses = []\n while 1:\n\n try:\n # lock.acquire()\n conn, address = self.socket.accept()\n # If set blocking is 0 server does not wait for message and this try block fails.\n conn.setblocking(1)\n\n # This is a special message since it is authentication\n json_string = self.read_message_from_connection(conn).decode(\"utf-8\")\n\n print(\"Accepting connection \" + str(json_string))\n\n # new_message = Message.json_string_to_message(json_string)\n json_package = json.loads(json_string)\n username = json_package[\"username\"]\n password = json_package[\"password\"]\n hostname = json_package[\"hostname\"]\n host_system_username = json_package[\"host_system_username\"]\n\n # Ping timer checks whether the client is alive or not by pinging it\n t = Timer(self.ping_timer_time, self.ping, [username])\n t.start()\n\n except Exception as e:\n ColorPrint.print_message(\"ERROR\", \"accept_connections\", 'Error accepting connections: %s' % str(e))\n # Loop indefinitely\n continue\n # lock.release()\n # we need setblocking 0 so that select works in server_controller.\n conn.setblocking(0)\n self.all_connections.append(conn)\n # Put the newly connected client to the list\n self.all_clients[username] = conn\n # Push a message to the queue to notify that a new client is connected\n client_connected_message = Message(username, \"server\", \"event\", \"Connected\")\n\n client_received.put(client_connected_message)", "def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")", "def _outgoing_check_cb(self, timer):\n\n for peer_id, remote in self._conf.items():\n if peer_id in self._outgoing:\n continue\n\n # A None value means that we're currently attempting a\n # connection.\n self._outgoing[peer_id] = None\n\n remote_host, remote_port = remote\n self._logger.info(\n \"attempting connection to %s:%d\",\n remote_host,\n remote_port,\n )\n\n remote_ip = socket.gethostbyname(remote_host)\n client = pyuv.TCP(self._loop)\n client.connect(\n (remote_ip, remote_port),\n partial(self._outgoing_connect_cb, peer_id),\n )\n\n for peer_id, outgoing in self._outgoing.items():\n if not outgoing:\n continue\n\n if outgoing.remote_addr == self._conf.get(peer_id):\n continue\n\n # if we get here then we have an outgoing connection that\n # doesn't belong\n self._logger.info(\n \"closing unncessary connection to %s\",\n outgoing.remote_addr,\n )\n self._outgoing_read_cb(\n peer_id,\n outgoing.handle,\n None,\n \"force close\",\n )", "def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)", "def check_connection(self):\n pass", "def serviceConnects(self):\n #log.debug(f\"{self.name}: servicing new connections for.\")\n for ca, ix in list(self.server.ixes.items()):\n if ix.cutoff:\n self.closeConnection(ca)\n continue\n\n if ca not in self.connections:\n log.debug(f\"Adding new connection for {ix}.\")\n self.connections[ca] = Requester(self.dhtdoer, remoter=ix, name=ca)\n\n if ix.timeout > 0.0 and ix.tymer.expired:\n self.closeConnection(ca)", "def killconnections(self):\n for conn in self._connections:\n try:conn.close()\n except:pass\n self._connections=[]", "async def checkNewLoop(self):\n pass", "def handle_conns(self, read_sockets):\n for notified_socket in read_sockets:\n print(\"Checking sockets...\")\n if notified_socket == self.server_listen_socket:\n if not self.handle_new_conn():\n continue\n \n else:\n try:\n if not self.handle_existing_conn(notified_socket):\n continue\n except Exception as e:\n print(f\"Error handling message from socket {e}\")", "def update_connections():\n try:\n connection_cache.update_connection_ddb_items()\n except ClientError as error:\n print(error)\n return True", "def run(self):\n try:\n self.is_activated=True\n while self.is_activated:\n try:\n #make sure all of the connections are saved.\n connection = self._socket.accept()\n self._connections.append(connection)\n except timeout: pass\n except Exception as e:\n logging.exception(e)\n finally:\n self.deactivate()", "def listen_connections(self):\n self.MAIN_CONNECTION.listen(server.MAX_CONNECTIONS)", "def _runner_loop(self):\n while not self._stop_loop_runner.is_set():\n with self._connection_observer_lock:\n if self._copy_of_connections_observers != self._connections_observers:\n self._copy_of_connections_observers = copy_list(self._connections_observers, deep_copy=False)\n # ConnectionObserver is feed by registering data_received in moler connection\n self._check_last_feed_connection_observers()\n self._check_timeout_connection_observers()\n self._remove_unnecessary_connection_observers()\n time.sleep(self._tick)", "def reconnecting(self) -> bool:", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def node_check():\n while True:\n if LAST_RECEIVED:\n for loc in dict(LAST_RECEIVED):\n if (datetime.datetime.utcnow() - LAST_RECEIVED[loc]).total_seconds() >= 60:\n ROLLING_TEMPS.pop(loc)\n ROLLING_HUMS.pop(loc)\n LAST_RECEIVED.pop(loc)\n print(\"connection to\", loc, \"has been lost\")\n else:\n print(\"No nodes found. Please check that nodes are running.\")\n\n time.sleep(6)", "def Connected(self) -> bool:", "def Connected(self) -> bool:", "def run(self):\n while True:\n # Get the list sockets which are ready to be read through select\n all_connections = list(self.connection_dict.values()) + self.new_connections\n read_sockets, _, _ = select.select(all_connections, [], [])\n\n for sock in read_sockets:\n #New connection\n if sock == self.server_socket:\n # Handle the case in which there is a new connection recieved through\n # server_socket\n sockfd, addr = self.server_socket.accept()\n self.new_connections.append(sockfd)\n self.log(\"Client (%s, %s) connected\" % addr)\n\n #Some incoming message from a client\n else:\n # Data recieved from client, process it\n try:\n #In Windows, sometimes when a TCP program closes abruptly,\n # a \"Connection reset by peer\" exception will be thrown\n data = sock.recv(self.recv_buffer)\n if data:\n self.parse(sock, data)\n except:\n self.log(\"Client (%s, %s) is offline\" % addr)\n traceback.print_exc()\n sock.close()\n self.remove_socket(sock)\n continue\n\n self.server_socket.close()", "def _check_last_feed_connection_observers(self):\n current_time = time.time()\n for connection_observer in self._copy_of_connections_observers:\n life_status = connection_observer.life_status\n if (life_status.inactivity_timeout > 0.0) and (life_status.last_feed_time is not None):\n expected_feed_timeout = life_status.last_feed_time + life_status.inactivity_timeout\n if current_time > expected_feed_timeout:\n try:\n connection_observer.on_inactivity()\n except Exception as ex:\n self.logger.exception(msg=r'Exception \"{}\" (\"{}\") inside: {} when on_inactivity.'.format(\n ex, repr(ex), connection_observer))\n connection_observer.set_exception(exception=ex)\n finally:\n connection_observer.life_status.last_feed_time = current_time", "def run(self):\n while True:\n socks = select.select(self.sockets.values(), [], [], 0.1)[0]\n for conn in socks:\n try:\n k = conn.recv(65535)\n except:\n # either died on a connection reset, or was SIGTERM's by parent\n return\n if k:\n for sock in self.sockets:\n if self.sockets[sock] == conn:\n srcif = sock\n msg = json.loads(k)\n\n # DEBUG STATEMENTS\n if False:\n print(\"<--------------------------------->\")\n print(\"[NEW MESSAGE INCOMING]\")\n print(\"SOURCE:\", srcif)\n print(\"MSG:\", msg)\n print(\"<--------------------------------->\")\n\n if not self.handle_packet(srcif, msg):\n self.send_error(conn, msg)\n else:\n print (\"ROUTES:\", self.routes)\n return", "def isconnected(self) -> bool:", "def callback_connect_client(self, connection_object):\n # Assign a new connection object to the address (as a key value pair)\n if not self.client_exists(connection_object.address[0]):\n if len(self.client_list) <= 6:\n self.client_list[connection_object.address[0]] = connection_object\n logger.info(f\"Client at {connection_object.address} is connected\")\n else:\n self.accepting_disallow()\n logger.info(\"Limit reached, stop accepting connections\")\n return super(MastermindServerUDP, self).callback_connect_client(connection_object)", "def wait_until_all_nodes_connected(\n ethereum_manager_connect_at_start,\n ethereum,\n timeout: int = NODE_CONNECTION_TIMEOUT,\n):\n connected = [False] * len(ethereum_manager_connect_at_start)\n try:\n with gevent.Timeout(timeout):\n while not all(connected):\n for idx, node_name in enumerate(ethereum_manager_connect_at_start):\n if node_name in ethereum.web3_mapping:\n connected[idx] = True\n\n gevent.sleep(0.1)\n except gevent.Timeout:\n names = [\n str(x) for idx, x in enumerate(ethereum_manager_connect_at_start) if not connected[idx]\n ]\n log.info(\n f'Did not connect to nodes: {\",\".join(names)} due to '\n f'timeout of {NODE_CONNECTION_TIMEOUT}',\n )" ]
[ "0.6651885", "0.66485655", "0.658435", "0.65760034", "0.65238696", "0.65040714", "0.645094", "0.63097805", "0.63036615", "0.628794", "0.6277836", "0.6257548", "0.6239977", "0.62278676", "0.6214399", "0.61141676", "0.61134064", "0.6110468", "0.6097771", "0.60918623", "0.6063635", "0.60604775", "0.6057014", "0.6057014", "0.60334", "0.6026384", "0.6022626", "0.60221153", "0.5999971", "0.59933525" ]
0.7198108
0
Checks validity of each line in the connections list
def _check_connectionline(self): self.connection_first_device, \ self.connection_first_port \ = self._check_validconnectionoutput() if self._is_arrow(self.symbol): # Get next symbol self.symbol = self.scanner.get_symbol() self.connection_second_device, \ self.connection_second_port \ = self._check_validconnectioninput() if len( self.semantic_errors_list) == 0 and len( self.syntax_errors_list) == 0: # Only create connection if no previous errors connection_error = self._connection_maker( self.connection_first_device, self.connection_first_port, self.connection_second_device, self.connection_second_port) # Send the returned error ID for error reporting self._display_semantic_error(connection_error) # Run a while loop to check for possible multiple connections from # same output while ( not self._is_semicolon( self.symbol)) and ( not self._is_eof( self.symbol)): if self._is_comma(self.symbol): self.symbol = self.scanner.get_symbol() self.connection_second_device, \ self.connection_second_port \ = self._check_validconnectioninput() if len( self.semantic_errors_list) == 0 and len( self.syntax_errors_list) == 0: # Only create connection if no previous errors connection_error = self._connection_maker( self.connection_first_device, self.connection_first_port, self.connection_second_device, self.connection_second_port) # Send the returned error ID for error reporting self._display_semantic_error(connection_error) else: # No comma self._display_syntax_error("comma") self._semicolon_skipper() self.symbol = self.scanner.get_symbol() elif self._is_semicolon(self.symbol): self.symbol = self.scanner.get_symbol() else: # No '->' self._display_syntax_error("arrow") self._semicolon_skipper() self.symbol = self.scanner.get_symbol() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_connectionlist(self):\n self.symbol = self.scanner.get_symbol()\n # Repeatedly call _check_connectionline() until END CONNECTIONS\n while (\n not self._is_end(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self._check_connectionline()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def validate(self):\n valid = True\n \n # Check that link information is valid\n for ij in self.link:\n valid = valid and self.link[ij].head in self.node\n valid = valid and self.link[ij].tail in self.node\n if not valid:\n print(\"Error: Link tail/head not found: %s %s\" % (self.link[ij].tail, self.link[ij].head))\n raise utils.BadFileFormatException\n valid = valid and self.link[ij].capacity >= 0\n valid = valid and self.link[ij].length >= 0\n valid = valid and self.link[ij].freeFlowTime >= 0\n valid = valid and self.link[ij].alpha >= 0\n valid = valid and self.link[ij].beta >= 0\n valid = valid and self.link[ij].speedLimit >= 0\n valid = valid and self.link[ij].toll >= 0\n if not valid:\n print(\"Link %s has negative parameters.\" % ij)\n \n # Then check that all OD pairs are in range\n for ODpair in self.ODpair:\n (origin, destination) = (self.ODpair[ODpair].origin, self.ODpair[ODpair].destination)\n valid = valid and origin in self.node\n valid = valid and destination in self.node\n if not valid:\n print(\"Error: Origin/destination %s not found\" % ODpair)\n raise utils.BadFileFormatException\n valid = valid and self.node[origin].isZone == True\n valid = valid and self.node[destination].isZone == True\n if not valid:\n print(\"Error: Origin/destination %s does not connect two zones\" % str(ODpair))\n raise utils.BadFileFormatException\n valid = valid and self.ODpair[ODpair].demand >= 0\n if not valid:\n print(\"Error: OD pair %s has negative demand\" % ODpair)\n raise utils.BadFileFormatException\n \n # Now error-check using metadata\n if self.numNodes != None and len(self.node) != self.numNodes:\n print(\"Warning: Number of nodes implied by network file %d different than metadata value %d\" % (len(self.node), self.numNodes))\n self.numNodes = len(self.node)\n if self.numLinks != None and len(self.link) != self.numLinks:\n print(\"Warning: Number of links given in network file %d different than metadata value %d\" % (len(self.link), self.numLinks))\n self.numLinks = len(self.link)\n if self.numZones != None and len([i for i in self.node if self.node[i].isZone == True]) != self.numZones:\n print(\"Warning: Number of zones given in network file %d different than metadata value %d\" % (len([i for i in self.node if self.node[i].isZone == True]), self.numZones))\n self.numLinks = len(self.link)\n if self.totalDemandCheck != None:\n if self.totalDemand != self.totalDemandCheck:\n print(\"Warning: Total demand is %f compared to metadata value %f\" % ( self.totalDemand, self.totalDemandCheck))", "def check_Lines(self):\n\n pass", "def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)", "def validate_conn(self, solution):\r\n\r\n active_nodes = [idx for idx, value in enumerate(solution) # remove not included nodes in solution\r\n if value != 0 and idx not in self.dead_nodes and self.network.get_node(idx).energy >= cf.COMMUNICATION_ENERGY]\r\n active_nodes.append(-1) # add a sink node \r\n visited = self.DFS(self.network_graph, active_nodes[0], active_nodes)\r\n if len(visited) == len(active_nodes):\r\n return True\r\n else:\r\n return False", "def __validate_conn_pattern(conns:str)->str:\n pattern1 = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n # pattern2 = re.compile(r'^\\w+:\\w+@\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n\n for conn in conns.split(\",\"):\n if not pattern1.match(conn) and not pattern2.match(conn):\n raise argparse.ArgumentTypeError(f'Invalid connection format: {conn}. Supported formats: 127.0.0.1:32049 or user:[email protected]:32049')\n\n return conns", "def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True", "def check(self):\n\n constrains = pm.ls(type='constraint')\n uselessConstrains = []\n\n for const in constrains:\n connections = const.listConnections(scn=True, s=False, d=True)\n if const in connections:\n connections.remove(const)\n\n if len(connections) == 0:\n uselessConstrains.append(const)\n\n if not uselessConstrains:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = uselessConstrains\n for obj in uselessConstrains:\n self.addError(\"%s doesn't have outgoing connections.\" % obj)\n self.errorMessage = \"%s useless constrains\" % (\n len(uselessConstrains))", "def check_line(self):\n if not self.hosts and not self.line:\n self.msg(\"There is no line here. You can create one with +line/createline.\")\n return\n return True", "def valid_connection(\n graph: list[list[int]], next_ver: int, curr_ind: int, path: list[int]\n) -> bool:\n\n # 1. Validate that path exists between current and next vertices\n if graph[path[curr_ind - 1]][next_ver] == 0:\n return False\n\n # 2. Validate that next vertex is not already in path\n return not any(vertex == next_ver for vertex in path)", "def input_file_check(router_list):\n if os.path.exists(router_list):\n print(\"Found device file ({}): OK\".format(router_list))\n else:\n print('''\nUnable to find device list >>>{}<<<, please verify it exists and/or update the\nvariable ___router_list___ at the top of this script file to point to a new one.\n\nScript error, exiting.'''.format(router_list))\n sys.exit(1)\n\n with open(router_list) as f:\n for line in f:\n if \";\" not in line:\n if \"r\" in line:\n if \"p\" in line:\n print(\"Processing line:\", line.strip())\n else:\n print(\"ERROR with line:\", line.strip())\n print('''\nYour {} file may contain invalid entries, please double check it.\n\nExamples:\n\nOne Juniper router with one peer\nr10.10.10.10, p3.3.3.3\n\nTwo Juniper routers, one with one peer, the other with multiple\nr10.20.30.40, p4.4.4.4\nr192.168.1.22, p5.5.5.5, p6.6.6.6, p7.7.7.7\n\n'''.format(router_list))\n sys.exit(1)\n\n print(\"Line check: OK\")\n return", "def check_connection(self):\n connections = [self.google.check_connection(), self.dbx.check_connection(), self.box.check_connection()]\n\n if connections.count(True) == 3:\n logging.warning(' All connections OK. System can be used for reads and writes.')\n return []\n elif connections.count(True) == 2:\n logging.critical(\"\\nOnly two connections available. System only usable for reads\")\n down = [i for i in enumerate(connections) if i == False ]\n if 0 in down:\n pass\n #logging.critical(\"Cannot connect to Google.\")\n if 1 in down:\n pass\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 in down:\n pass\n ##logging.critical(\"Cannot connect to Box\")\n return down\n elif connections.count(True) < 2:\n logging.critical(\"Sufficient connections could not be made. System unsuitable for reads or writes.\")\n down = [i for i in enumerate(connections) if i[1] == False]\n for entry in down:\n if 0 == entry[0]:\n down[0] += ('Google',)\n #logging.critical(\"Cannot connect to Google.\")\n if 1 == entry[0]:\n down[1] += ('Dropbox',)\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 == entry[0]:\n down[2] += ('Box',)\n #logging.critical(\"Cannot connect to Box\")\n return down", "def check_line(self, line):\n line = line.rstrip('\\r\\n')\n try:\n line = line.decode('utf8')\n except:\n pass\n return self.rules['all'].validate(line)", "def check(self): # full program\n r = re.compile('(?!(^(((?!;)[A-Z][+-]?\\d+(\\.\\d+)?\\s?)*(\\s*;\\s.*)?)$))')\n for line in self.blocks:\n if r.match(line) and line and line != '\\r' and line != '\\n':\n return False\n return True", "def _check_coms(self):\n self.com._check_rep()", "def connectlist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.CONNECT_ID):\n self.symbol = self.scanner.get_symbol()\n\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n\n while (self.symbol.type == self.scanner.NAME):\n self.connection()\n # Each connection decrements pin count by one\n self.num_input_pin -= 1\n\n # Check right curly bracket ends connections block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.MONITOR_ID):\n # Error Type: missing '}'\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Bad name terminated connections incorrectly\n # Error type: Invalid name\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.NAME_STRING, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Left curly needed after 'CONNECT'\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.NO_CURLY_CONNECT, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])\n\n else:\n # Error: 'CONNECT' keyword required\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.NEED_CONNECT_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])\n\n # Check all input pins have been connected\n if self.error_count == 0:\n if self.num_input_pin != 0:\n # Error: Floating inputs pins\n # Stopping Symbols: MONITOR' or 'END' KEYWORD\n self.error(self.FLOATING_INPUT_PIN, [self.scanner.KEYWORD],\n [self.scanner.MONITOR_ID, self.scanner.END_ID])", "def is_valid(self):\n num_lines = len(self.lines)\n if self.statement.conclusion != self.lines[num_lines-1].conclusion:\n return False\n for line_num in range(num_lines):\n line = self.lines[line_num]\n if line.rule is None:\n if line.conclusion not in self.statement.assumptions:\n return False\n if (line.rule is not None and not self.instance_for_line(line_num).is_instance_of(self.rules[line.rule])) \\\n or (line.justification is not None and any(i >= line_num for i in line.justification)):\n return False\n\n return True", "def validate(self) -> None:\n super().validate()\n if self.pipe_mode.value is SocketMode.CONNECT and self.pipe_format.value is None:\n raise Error(\"'pipe_format' required for CONNECT pipe mode.\")", "def checkLineStandardCompliance(line):\n if len(line) != 5:\n print(line + \" HAS WRONG NUMBER OF COLUMNS: \" + str(len(line)))\n exit(5)", "def test_validate_connector(self):\n connector = {'wwpns': [\"not empty\"],\n 'wwnns': [\"not empty\"]}\n self.volume.driver.validate_connector(connector)", "def validate_graph(self, entry):\n check_fields(entry, ['name', 'nodes'])\n for node_name in entry['nodes']:\n node = entry['nodes'][node_name]\n if len(node) < 1 or 'components' not in node:\n raise exceptions.BadInputError(f\"invalid entry for {node_name}: {node}\")\n\n self.graphs.append(entry)", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def validate_line(self, line):\n splitline = line.split('\\t')\n if len(splitline) is not 9:\n return []\n if not \"ID\" in splitline[8]:\n return []\n if not int(splitline[3]) <= int(splitline[4]):\n return []\n # Everything except genes must have parent id\n if not \"Parent\" in splitline[8] and not splitline[2] == \"gene\":\n return []\n return splitline", "def valid_state(line): # line is an instance of Order class\n state = line.o_state \n invalid_state = state in app.config.get('ALLOWED_STATES')\n if invalid_state:\n rule = 'Allowed states'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def check(self, connection):\n return True", "def validate_relays(mapping):\n if not unique_list([ x['id'] for x in mapping ]):\n return (False, \"relay id's not unique\")\n\n chip_lines = [ (x['chip'],x['line']) for x in mapping ]\n for idx, val in enumerate(chip_lines):\n if chip_lines[idx] in chip_lines[idx+1:]:\n return (False, \"relay chip,line duplication\")\n\n return (True, None)", "def is_valid(self):\n for lineedit in self.lineedits:\n if lineedit in self.validate_data and lineedit.isEnabled():\n validator, invalid_msg = self.validate_data[lineedit]\n text = to_text_string(lineedit.text())\n if not validator(text):\n QMessageBox.critical(self, self.get_name(),\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\n QMessageBox.Ok)\n return False\n return True", "def check_all_lines(self):\n self.E_str = \"check_all_lines\"\n variables = []\n lines = self.file_ltxt\n while self.line_num < len(lines):\n line = lines[self.line_num].strip()\n if line == \"}\": self.line_num += 1; continue\n\n # Error check any variables\n if self.line_declarations['variable'](line):\n self.check_variable_line(line)\n name, _ = self.parse_variable_line(line)\n variables.append(name)\n\n # # Error check any splice commands\n # elif self.line_declarations['splice'](line):\n # self.check_splice_command(line)\n\n # Error check any file loading commands\n elif self.line_declarations['load'](line):\n var = self.check_load_command(line)\n variables.append(var)\n\n elif self.line_declarations['plot'](line):\n var = self.check_plot_command(line)\n if var is not None: variables.append(var)\n\n # Error check any file loading commands\n elif self.line_declarations['write'](line):\n self.check_write_command(line)\n\n # Error check any file loading commands\n elif self.line_declarations['math'](line):\n var = self.check_math_line(line)\n variables.append(var)\n\n # Error check any echo commands\n elif self.line_declarations['echo'](line):\n self.check_echo_command(line)\n\n # Error check any calc commands\n elif self.line_declarations['calc'](line):\n var = self.check_calc_command(line)\n if var != \"\": variables.append(var)\n\n # Error check any calc commands\n elif self.line_declarations['set'](line):\n var = self.check_set_command(line)\n # if var != \"\": variables.append(var)\n\n # Error check any for loop commands\n elif self.line_declarations['for'](line):\n self.check_for_command(line)\n\n # Error check any for script commands\n elif self.line_declarations['script'](line):\n _vars = self.check_script_command(line)\n for var in _vars:\n if var not in variables: variables.append(var)\n\n # Error checking the glue command\n elif self.line_declarations['glue'](line):\n var = self.check_glue_command(line)\n variables.append(var)\n\n # Error check any python commands\n elif self.line_declarations['inline_code'](line):\n if self.line_declarations['python'](line):\n _vars = self.check_python_command(line)\n for var in _vars:\n if var not in variables: variables.append(var)\n else:\n # Run the check_{script_type}_command() fnc\n getattr(self, f\"check_{line.split()[0]}_command\")(line)\n\n # Error check any python commands\n elif self.line_declarations['if'](line):\n self.check_if_statement(line)\n\n elif self.line_declarations['exit'](line):\n break\n\n self.line_num += 1\n\n # Reset the inp file variables and line number\n self.line_num = 0\n for var in set(variables):\n delattr(self, var)\n self.variables.remove(var)\n\n self.files_written = []" ]
[ "0.68231344", "0.6541388", "0.62784106", "0.6260675", "0.60896343", "0.60538226", "0.60501355", "0.597213", "0.59351665", "0.5903994", "0.58946735", "0.58336484", "0.5699847", "0.5687605", "0.567442", "0.5646546", "0.56359893", "0.56153196", "0.56123793", "0.5572138", "0.5549895", "0.55445236", "0.5537989", "0.55333453", "0.5516283", "0.5502432", "0.5470385", "0.54646146", "0.5449395", "0.5448208" ]
0.6729478
1
Create own make_connection to handle the fact that first device may sometimes not have a port specified.
def _connection_maker( self, first_device, first_port, second_device, second_port): if first_port is None: return self.network.make_connection( first_device.id, None, second_device.id, second_port.id) else: return self.network.make_connection( first_device.id, first_port.id, second_device.id, second_port.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def connect(self, device_ip, device_port=DEFAULT_PORT):\n return", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def open(device, **kwargs):\n if len(device.split(':')) == 6:\n from .bluepy import BluepyConnection\n return BluepyConnection(device, **kwargs)\n else:\n from .serial import SerialConnection\n return SerialConnection(device, **kwargs)", "def open(self):\n # Move all of the connection arguments into connect_args\n connect_args = {}\n\n # check for mode\n if self.get_option('port') is None:\n if self.get_option('mode') == 'telnet':\n connect_args['port'] = 23\n elif self.get_option('mode') == 'serial':\n connect_args['port'] = '/dev/ttyUSB0'\n else:\n connect_args['port'] = 830\n else:\n connect_args['port'] = self.get_option('port')\n\n if (self.get_option('mode') == 'telnet' or\n self.get_option('mode') == 'serial'):\n if self.get_option('baud') is None:\n # Default baud if serial or telnet mode\n connect_args['baud'] = 9600\n if self.get_option('attempts') is None:\n # Default attempts if serial or telnet mode\n connect_args['attempts'] = 10\n\n connect_args['host'] = self.get_option('host')\n # connect_args['port'] = self.get_option('port')\n connect_args['user'] = self.get_option('remote_user')\n connect_args['passwd'] = self.get_option('password')\n connect_args['ssh_private_key_file'] = self.get_option('private_key_file')\n connect_args['ssh_config'] = self.get_option('pyez_ssh_config')\n connect_args['timeout'] = self.get_option('persistent_connect_timeout')\n try:\n log_connect_args = dict(connect_args)\n log_connect_args[\"passwd\"] = \"NOT_LOGGING_PARAMETER\"\n\n self.queue_message(\"vvvv\", \"Creating device parameters: %s\" % log_connect_args)\n timeout = connect_args.pop(\"timeout\")\n self.dev = jnpr.junos.device.Device(**connect_args)\n self.queue_message(\"vvvv\", \"Opening device.\")\n self.dev.open()\n self.queue_message(\"vvvv\", \"Device opened.\")\n\n self.dev.timeout = self.get_option('persistent_command_timeout')\n self.queue_message(\"vvvv\", \"Setting default device timeout to %d.\" % timeout)\n # Exceptions raised by close() or open() are all sub-classes of\n # ConnectError, so this should catch all connection-related exceptions\n # raised from PyEZ.\n except pyez_exception.ConnectError as ex:\n raise AnsibleError(\"Unable to make a PyEZ connection: %s\" % (str(ex)))", "def ConnectDevice(\n self, *args, **kwargs\n ): # pylint: disable=invalid-name, no-self-use\n raise socket_error", "def connect(from_tuple, to_tuple):\n\n from_unit, from_port = from_tuple\n to_unit, to_port = to_tuple\n\n to_unit.connect(from_unit, from_port, to_port)", "def connect(self, host, port):\n pass", "def createConnection(addr):\r\n\r\n # cast port number to integer\r\n addr = (addr[0],int(addr[1]))\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(5)\r\n try:\r\n s.connect(addr)\r\n except (socket.timeout, ConnectionRefusedError):\r\n return None\r\n return s", "def make_data_port(self):\n err = None\n sock = None\n for res in socket.getaddrinfo(None, 0, socket.AF_INET, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):\n af, socktype, proto, canonname, sa = res\n try:\n sock = socket.socket(af, socktype, proto)\n sock.bind(sa)\n except OSError as _:\n err = _\n if sock:\n sock.close()\n sock = None\n continue\n break\n if sock is None:\n if err is not None:\n raise err\n else:\n raise OSError(\"getaddrinfo returns an empty list\")\n sock.listen(1)\n port = sock.getsockname()[1]\n host = self.sock.getsockname()[0]\n response = self._send_port_command(host, port)\n return sock, response", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def test_make_connection(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n or1 = devices.get_device(OR1_ID)\n\n # or1 inputs are initially unconnected\n assert or1.inputs == {I1: None,\n I2: None}\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # or1 inputs should now be connected\n assert or1.inputs == {I1: (SW1_ID, None),\n I2: (SW2_ID, None)}", "def port_connection(self, sock):\n sock.bind(('', 0)) # Bind to OS-assigned available & random port.\n sock.listen(1)", "def tryconnect(name, port):\n return port_talker.TCPTalk(name, port, 2, '', None, 0, 1) # use ext. resolver", "def _setup_serial_connection(self):\n conn_set = False\n serial_port_num = 0\n\n attempts = 0\n delay = 5\n\n LOGGER.info('Attempting to esablish connection with sensors...')\n\n while not conn_set:\n try:\n serial_port = self.serial_port_pattern.format(port_num=serial_port_num)\n ser = Serial(port=serial_port, baudrate=self.baudrate)\n self.serial_port_num = serial_port_num\n conn_set = True\n except SerialException:\n attempts += 1\n msg = 'Could not establish connection w/ SCB via serial port %s. '\n msg += 'Attempts: %s'\n\n LOGGER.debug(msg, serial_port, attempts)\n\n if serial_port_num < 2:\n serial_port_num += 1\n else:\n serial_port_num = 0\n\n time.sleep(delay)\n\n msg = 'Connection established on port %s'\n LOGGER.info(msg, serial_port)\n return ser", "def build_connection(\r\n self,\r\n socket,\r\n address = None,\r\n datagram = False,\r\n ssl = False\r\n ):\r\n\r\n return Connection(\r\n owner = self,\r\n socket = socket,\r\n address = address,\r\n datagram = datagram,\r\n ssl = ssl\r\n )", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def _build_connection(self,\n node,\n source_port: PipelineNodeIO = None,\n target_port: PipelineNodeIO = None,\n filters: entities.Filters = None,\n action: str = None) -> PipelineConnection:\n if source_port is None and self.outputs:\n source_port = self.outputs[0]\n\n if target_port is None and node.inputs:\n target_port = node.inputs[0]\n\n if node.is_root():\n self._pipeline.set_start_node(self)\n\n source_connection = PipelineConnectionPort(node_id=self.node_id, port_id=source_port.port_id)\n target_connection = PipelineConnectionPort(node_id=node.node_id, port_id=target_port.port_id)\n if action is None and source_port.actions is not None and source_port.actions != []:\n action = source_port.actions[0]\n connection = PipelineConnection(source=source_connection, target=target_connection, filters=filters,\n action=action)\n return connection", "def __init__(self, port):\n self.port = port\n self.connection = serial.Serial(timeout=1)\n self.connection.port = self.port", "def get_socket():\n return socket.create_connection((HOST, PORT))", "async def connection_factory(*args, **kwargs):\n if args[1] == \"5L\":\n protocol.telegram = {\n LUXEMBOURG_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject(\n [{\"value\": \"123456789\", \"unit\": \"\"}]\n ),\n }\n if args[1] == \"5S\":\n protocol.telegram = {\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n if args[1] == \"Q3D\":\n protocol.telegram = {\n Q3D_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n }\n\n return (transport, protocol)", "def _connect_to_hardware(self):\n if False: # !!!TEMP:need to validate config...\n if len(self.config['ports']) > 1:\n self.log.fatal(\"only one slave com port is supported\")\n if len(self.config['ports']) == 0:\n self.log.warning(\"no communication port setted!\")\n return\n port = self.config['ports'][0]\n self.communicator = RaspSerialCommunicator(\n platform=self, port=port,\n baud=self.config['baud'])\n self.communicator = RaspSerialCommunicator(\n platform=self, port='/dev/ttyAMA0',\n baud=115200)", "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: TCP port opening started...\" % \\\n self.__class__.__name__)\n errno = 115\n while errno == 115:\n try:\n self._tcp_socket.connect(self._addr)\n errno = 0\n except socket.error as fx:\n if fx.args[0] != 115:\n raise fx\n self._socket = HorizonTransport_Socket(sock = self._tcp_socket,\n host = self._addr[0],\n port = self._addr[1],\n name = \"%s:%d\" % self._addr,\n encryption =self._encryption,\n key = self._key,\n store_timeout = self.store_timeout,\n version = self.version)\n self._socket.opened = True\n logger.debug(\"%s: ...TCP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...TCP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"TCP Port open failed!\\n\" + str(ex))", "def makeConnection(self, transport):\n pass", "def connect_device(device):\n return ConnectHandler(**device)", "def connect_device(uri):\n d = urlparse(uri)\n platform = d.scheme\n host = d.netloc\n uuid = d.path.lstrip(\"/\")\n params = dict(parse_qsl(d.query))\n if host:\n params[\"host\"] = host.split(\":\")\n dev = init_device(platform, uuid, **params)\n return dev", "def connect(self):\n\n log.info('Connecting to device \"{0}\" using {1} at \"{2}\".'.format(\n self.name, self.driver, self.connection_resource))\n\n if self.driver == drivers.pyvisa:\n try:\n if not (legacyVisa):\n rm = pyvisa.ResourceManager()\n self.device = rm.open_resource(**self.connection_resource)\n else:\n self.device = pyvisa.Instrument(**self.connection_resource)\n except pyvisa.VisaIOError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n\n elif self.driver == drivers.telnet:\n self.device = telnetlib.Telnet(\n timeout=2, **self.connection_resource)\n elif self.driver == drivers.requests:\n r = requests.get(self.request_address)\n if r.status_code != 200:\n raise DeviceNotFoundError(\n 'Could not connect to device at \"{0}\".'.format(self.connection_resource), e)\n\n elif self.driver == drivers.lgpib:\n try:\n self.device = Gpib.Gpib(**self.connection_resource)\n except gpib.GpibError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif self.driver == drivers.pyvisa_usb:\n try:\n if not (legacyVisa):\n rm = pyvisa.ResourceManager()\n self.device = rm.open_resource(**self.connection_resource)\n else:\n class USBDevice(pyvisa.Instrument):\n \"\"\"\n Using USB devices with PyVISA requires a small hack: the object must be an Instrument, but we can't call Instrument.__init__.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Bypass the initialization in visa.Instrument, due to \"send_end\" not being valid for USB.\n pyvisa.ResourceTemplate.__init__(\n self, *args, **kwargs)\n\n self.device = USBDevice(**self.connection_resource)\n\n except pyvisa.VisaIOError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n\n try:\n self._connected()\n except Exception as e:\n raise DeviceNotFoundError('Could not finish connection to device at \"{0}\".'.format(\n self.connection_resource), e)", "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: TCP port opening started...\" % \\\n self.__class__.__name__)\n self._tcp_socket.bind(tuple(['',self._port]))\n self._tcp_socket.listen(self._max)\n logger.debug(\"%s: ...TCP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Instantiate router\n self._platform = router.HorizonRouteable()\n self._platform._version = self._version\n self._platform.message_routed = self.message_received\n def tmp():\n return self.__str__()\n self._platform.__str__ = tmp\n self._router = router.HorizonRouter(platform = self._platform, \n clients = [], \n send_all = self._send_all)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...TCP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"TCP Port open failed!\\n\" + str(ex))", "def sequential_connect(self):\r\n\r\n # Connect to rendezvous server.\r\n try:\r\n mappings = sequential_bind(self.mapping_no + 1, self.interface)\r\n con = self.server_connect(mappings[0][\"sock\"])\r\n except Exception as e:\r\n log.debug(e)\r\n log.debug(\"this err\")\r\n return None\r\n\r\n # First mapping is used to talk to server.\r\n mappings.remove(mappings[0])\r\n\r\n # Receive port mapping.\r\n msg = \"SOURCE TCP %s\" % (str(mappings[0][\"source\"]))\r\n con.send_line(msg)\r\n reply = con.recv_line(timeout=2)\r\n remote_port = self.parse_remote_port(reply)\r\n if not remote_port:\r\n return None\r\n\r\n # Generate port predictions.\r\n predictions = \"\"\r\n if self.nat_type != \"random\":\r\n mappings = self.predict_mappings(mappings)\r\n for mapping in mappings:\r\n predictions += str(mapping[\"remote\"]) + \" \"\r\n predictions = predictions.rstrip()\r\n else:\r\n predictions = \"1337\"\r\n\r\n return [con, mappings, predictions]" ]
[ "0.6627861", "0.6501172", "0.63174254", "0.6263077", "0.6138852", "0.6062171", "0.6033618", "0.6004392", "0.59724677", "0.59683317", "0.5954122", "0.5947945", "0.59181684", "0.59173775", "0.58790517", "0.58762664", "0.58634245", "0.58634245", "0.5857357", "0.5852349", "0.5844449", "0.5837568", "0.5832908", "0.5831868", "0.58263963", "0.58176285", "0.581365", "0.58119076", "0.57984334", "0.57971805" ]
0.79061735
0
Use network's check_network() to test all connections.
def _check_whole_network(self): if not self.network.check_network(): # check_network has failed, issue error self._display_semantic_error("network")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_network(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n # Inputs are unconnected, check_network() should return False\n assert not network.check_network()\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Inputs are now connected, check_network() should return True\n assert network.check_network()", "def test_connected(self):\n networktables_mock = unittest.mock.Mock()\n networktables_mock.isConnected.side_effect = [True, False]\n\n network_instance = network.Network(networktables_mock, None, None)\n self.assertTrue(network_instance.connected())\n self.assertFalse(network_instance.connected())", "def test_get_network(self):\n pass", "def check_network(config_name, urls = ''):\n\n logging.info(\"calling obsolete network diagnotic. Use '-interactive' instead\")\n\n config = config_namespace.ConfigNameSpace({})\n config.ExecFile(config_name)\n # get relevant parameters from config file:\n dns_servers = string.split(config.namespace['BOT_DNS_SERVERS'], ',')\n\n if Check_Gateway(config.namespace['EXTERNAL_DEFAULT_ROUTE']) != 0:\n return 1\n\n good_dns_servers = 0\n for s in dns_servers:\n if Check_DNS(s) != 4: # all other errors are non-fatal\n good_dns_servers = good_dns_servers + 1\n # if no DNS servers are up, we give up:\n if good_dns_servers == 0:\n return 1\n\n # First check the SMTP server\n logging.info(\"testing SMTP server %s\" % config.namespace['SMTP_SERVER'] )\n Check_SMTP(config.namespace['SMTP_SERVER'],\n config.namespace['EXTERNAL_CRAWL_IP'])\n\n # what about NTP:\n logging.info(\"testing NTP server %s\" % config.namespace['NTP_SERVERS'])\n for s in config.namespace['NTP_SERVERS']:\n Check_NTP(s)\n\n # SYSLOG server:\n logging.info(\"testing SYSLOG server %s\" % config.namespace['SYSLOG_SERVER'] )\n Check_SYSLOG(config.namespace['SYSLOG_SERVER'])\n\n # OK, now walk over all collections and try to get starturls\n for u in urls:\n check_url(u, dns_servers)\n\n return 0", "def test_get_networks(self):\n pass", "def main():\n return run_network_interface_check()", "def test_connection(self):\n\n self.speed_test()\n if self.runs >= self.min_runs:\n self.lg.debug('Minimum number of speed tests performed.')\n self.check_performance()\n if self.bad_performance:\n self.lg.debug('Performance is below tolerance level.')\n self.notify_ISP()\n self.results_up.pop(0)\n self.results_down.pop(0)\n self.results_timestamp.pop(0)\n self.runs += 1", "def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")", "def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())", "def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()", "def cmd_net_contest():\n\n print(\"DNS: %s\" % contest.check_dns())\n print(\"FTP: %s\" % contest.check_ftp())\n print(\"SSH: %s\" % contest.check_ssh())\n print(\"HTTP: %s\" % contest.check_http())\n print(\"HTTPS: %s\" % contest.check_https())", "def test_connection_is_established(self):\n for conn in self.connections:\n assert conn.is_connected is True", "def _check_consistency(self):\n\n # Run forward inference with n_sim=2 and catch any exception\n try:\n _, sim_data = self._forward_inference(n_sim=2, n_obs=10)\n except Exception as err:\n raise SimulationError(repr(err))\n\n # Run summary network check\n if self.summary_stats is not None:\n try:\n _ = self.summary_stats(sim_data)\n except Exception as err:\n raise SummaryStatsError(repr(err))\n\n # TODO: Run checks whether the network works with the data format\n\n # TODO: Run checks that loss works with the provided network", "def check_connection(self):\n pass", "def test_connection_is_established(self):\n assert self.connection_node_1.is_connected is True\n assert self.connection_node_2.is_connected is True\n assert self.connection_client_1.is_connected is True\n assert self.connection_client_2.is_connected is True", "def test_external_networks(self):\n network_list = self.neutron_operations.find_networks(router_external=True)\n self.assertNotEqual(len(network_list), 0, \"No external networks found\")", "def test_verify_list_of_devices_in_my_network():", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def wifi_connectivity_verify(self):\n self.sendline(\"iw %s link\" % self.iface_wifi)\n matched = self.expect([\"Connected\", \"Not connected\", pexpect.TIMEOUT])\n if matched == 0:\n return True\n else:\n return False", "def test_verify_connection_to_a_device():", "def test_02_verify_ipv6_network_redundant(self):\n\n self.createIpv6NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''", "def test_add_network(self):\n pass", "def test_register_network(self):\n pass", "def test_check_workers(self):\r\n\r\n workers, client_sockets = self._setup_server_and_clients()\r\n\r\n self.assertTrue(check_workers(workers, client_sockets))\r\n\r\n # Now close and terminate a client, wait and check again\r\n client_sockets[0].close()\r\n self.server_socket.close()\r\n sleep(1)\r\n self.assertFalse(check_workers(workers, client_sockets))", "def check_connection(self):\n connections = [self.google.check_connection(), self.dbx.check_connection(), self.box.check_connection()]\n\n if connections.count(True) == 3:\n logging.warning(' All connections OK. System can be used for reads and writes.')\n return []\n elif connections.count(True) == 2:\n logging.critical(\"\\nOnly two connections available. System only usable for reads\")\n down = [i for i in enumerate(connections) if i == False ]\n if 0 in down:\n pass\n #logging.critical(\"Cannot connect to Google.\")\n if 1 in down:\n pass\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 in down:\n pass\n ##logging.critical(\"Cannot connect to Box\")\n return down\n elif connections.count(True) < 2:\n logging.critical(\"Sufficient connections could not be made. System unsuitable for reads or writes.\")\n down = [i for i in enumerate(connections) if i[1] == False]\n for entry in down:\n if 0 == entry[0]:\n down[0] += ('Google',)\n #logging.critical(\"Cannot connect to Google.\")\n if 1 == entry[0]:\n down[1] += ('Dropbox',)\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 == entry[0]:\n down[2] += ('Box',)\n #logging.critical(\"Cannot connect to Box\")\n return down", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def check(self):\n\n constrains = pm.ls(type='constraint')\n uselessConstrains = []\n\n for const in constrains:\n connections = const.listConnections(scn=True, s=False, d=True)\n if const in connections:\n connections.remove(const)\n\n if len(connections) == 0:\n uselessConstrains.append(const)\n\n if not uselessConstrains:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = uselessConstrains\n for obj in uselessConstrains:\n self.addError(\"%s doesn't have outgoing connections.\" % obj)\n self.errorMessage = \"%s useless constrains\" % (\n len(uselessConstrains))" ]
[ "0.77178425", "0.6734853", "0.67199486", "0.66826373", "0.6626964", "0.6584825", "0.65507597", "0.649634", "0.6491266", "0.6462607", "0.6385012", "0.6379867", "0.6345907", "0.6305116", "0.6298622", "0.6279326", "0.6273643", "0.62547606", "0.62362325", "0.6226283", "0.62199765", "0.6198212", "0.61738783", "0.61323065", "0.61252725", "0.61089677", "0.61059374", "0.6038301", "0.6023384", "0.601174" ]
0.7808896
0
Checks the entire monitors list until END MONITORS is reached
def _check_monitorlist(self): self.symbol = self.scanner.get_symbol() # Repeatedly call _check_monitorline() until END MONITORS while ( not self._is_end( self.symbol)) and ( not self._is_eof( self.symbol)): self._check_monitorline() if self._is_eof(self.symbol): # In case file ends prematurely pass return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reDetectMonitors(self):\n if self.os_type == 'Windows8':\n try:\n self.notifyPut('Running a quick monitor detect')\n self.checkMetrics()\n pythoncom.CoInitialize() # Initialize COM lib on thread\n shell = win32com.client.Dispatch('WScript.Shell')\n time.sleep(0.1)\n subprocess.Popen(['control','desk.cpl'])\n time.sleep(2)\n shell.SendKeys(\"%C\", 0)\n time.sleep(1)\n shell.SendKeys(\"%{F4}\", 0)\n except Exception, e:\n self.logQ.put('{0} - Unable to redetect display(s)'.format(e))\n if self.os_type == 'WindowsXP':\n pass\n '''\n try:\n self.checkMetrics()\n expected_total = len(self.saved_metrics) \n missing_screens = len(self.saved_metrics) - len(self.current_metrics)\n \n if missing_screens > 0:\n self.notifyPut('Attempting to redetect {0} missing monitors'.format(missing_screens))\n pythoncom.CoInitialize() # Initialize COM lib on thread\n shell = win32com.client.Dispatch('WScript.Shell')\n time.sleep(1)\n subprocess.Popen(['control','desk.cpl'])\n time.sleep(2)\n shell.SendKeys(\"^+{TAB}\", 0)\n \n time.sleep(0.5)\n for x in range(expected_total + 1 - missing_screens , expected_total + 1):\n time.sleep(0.5)\n key = \"{\"+str(x)+\"}\"\n time.sleep(0.1)\n shell.SendKeys(key , 0)\n for x in range(4):\n time.sleep(0.5)\n shell.SendKeys(\"{TAB}\", 0)\n time.sleep(0.5)\n shell.SendKeys(\" \", 0)\n shell.SendKeys(\"%A\", 0)\n time.sleep(0.5)\n shell.SendKeys(\"%{F4}\", 0)\n time.sleep(0.5)\n \n except Exception, e:\n self.logQ.put('{0} - Unable to redetect display(s)'.format(e))\n '''\n return", "def monitor(self, rms):\n pass", "def monitorlist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.MONITOR_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.monitor_point()\n while (self.symbol.type == self.scanner.NAME):\n self.monitor_point()\n\n # Check right curly bracket ends monitors block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.END_ID):\n # Error: missing '}'\n # Stopping Symbols: END' KEYWORD\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Bad name terminated monitors incorrectly\n # Error: Invalid name\n # Stopping Symbols: END' KEYWORD\n self.error(self.NAME_STRING, [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Error: Curly needed after 'MONITOR'\n # Stopping Symbols: END' KEYWORD\n self.error(self.NO_CURLY_MONITOR, [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Error: 'MONITOR' keyword required\n # Stopping Symbols: END' KEYWORD\n self.error(self.NEED_MONITOR_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.END_ID])", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def _monitor_loop(self):\n while self._continue_running():\n for wl in self._workloads:\n if not wl.running():\n self.log.info('%-20s FAILED', wl.name())\n self._restart_workload(wl)\n else:\n self.log.info('%-20s OK', wl.name())\n\n time.sleep(self._monitor_delay)", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def monitor(self):", "def _wait_for_schedule_changes_to_apply(monitor):\n for _ in retries(\n max_retry_count=100,\n exception_message_prefix=\"Waiting for schedule to leave 'Pending' status\",\n seconds_to_sleep=5,\n ):\n schedule_desc = monitor.describe_schedule()\n if schedule_desc[\"MonitoringScheduleStatus\"] != \"Pending\":\n break", "def wait_for_signal_monitor(self, mons, timeout=None):\n if not isinstance(mons,(list,tuple)):\n mons=[mons]\n for mon in mons:\n if mon not in self._monitored_signals:\n raise KeyError(\"signal monitor {} doesn't exist\".format(mon))\n ctd=general.Countdown(timeout)\n while True:\n for mon in mons:\n if self._monitored_signals[mon].messages:\n return self.TWaitResult(mon,self._monitored_signals[mon].messages.pop(0))\n self.wait_for_any_message(ctd.time_left())", "def getMonitors(self):\n return [self.monitor]", "def monitor(self):\n while True:\n complete = True\n for thread in self._running:\n if not thread.complete:\n complete = False\n\n if thread.complete:\n thread.join()\n elif thread.failed:\n pass\n\n if complete:\n break\n time.sleep(Threadable.THREAD_SLEEP)", "def __monitor_enum_proc_callback(hmonitor_: wintypes.HMONITOR, hdc, lprect, lparam) -> bool:\n all_hmonitor.append(hmonitor_)\n return True", "def click_on_monitors(self: object) -> object:\n monitors = self.driver.find_element(*BasePageLocators.MONITORS)\n wait = WebDriverWait(self.driver, 10)\n wait.until(expected_conditions.element_to_be_clickable(BasePageLocators.MONITORS))\n monitors.click()\n return self", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def monitor(self, seconds=1):\r\n\r\n for ts in range(0, seconds):\r\n self.listenFiles = self.listen_files_list(self.listenDir)\r\n FoI = list(set(self.listenFiles)-set(self.logFiles))\r\n if len(FoI) == 0:\r\n time.sleep(1)\r\n else:\r\n self.CHANGE_appendAll() # Can be probamatic for first iter..\r\n return True\r\n\r\n return False", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def monitor(self):\n import curses\n import inspect\n\n stdscr = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.cbreak()\n width_split = curses.COLS//3-1\n win_done = curses.newwin(curses.LINES-1, width_split, 0, 0)\n win_running = curses.newwin(curses.LINES-1, width_split,\n 0, width_split+1)\n win_pending = curses.newwin(curses.LINES-1, width_split,\n 0, 2*width_split+1)\n stdscr.addstr(curses.LINES-1, 0,\n 'Monitoring started. Press Ctrl+C to stop.')\n stdscr.refresh()\n win_done.addstr(0, 0, 'DONE')\n win_pending.addstr(0, 0, 'PENDING')\n while True:\n try:\n win_done.addstr(1, 0,\n f'{len(self.done)} jobs done')\n list_done = list(self.done)[:curses.LINES-3]\n for idx, fut in enumerate(list_done, start=2):\n fmt_str = f'{id(fut):x} {fut._state}'\n win_done.addstr(idx, 0, fmt_str)\n win_done.refresh()\n\n win_running.clear()\n win_running.addstr(0, 0, 'RUNNING')\n win_running.addstr(1, 0,\n f'{self.running.qsize()} jobs running')\n list_running = list(self.running.items())[:curses.LINES-3]\n for idx, (fut, coro) in enumerate(list_running, start=2):\n coro_state = inspect.getcoroutinestate(coro)\n fmt_str = f'{id(fut):x} {coro_state}'\n win_running.addstr(idx, 0, fmt_str)\n win_running.refresh()\n\n win_pending.clrtoeol()\n win_pending.addstr(1, 0,\n f'{self.pending.qsize()} jobs pending')\n win_pending.refresh()\n time.sleep(.1)\n except KeyboardInterrupt:\n break\n\n curses.nocbreak()\n curses.echo()\n curses.endwin()", "def monitor_and_terminate(self):\n import time\n import datetime\n\n keep_running = True\n\n while keep_running:\n\n print()\n print(datetime.datetime.now().replace(microsecond=0))\n print(self.get_monitor_string())\n\n time.sleep(30)\n\n _, status = self.reporter.get_job_status(self.info)\n if status[\"active\"]+status[\"running\"] == 0:\n keep_running = False\n\n print(\"All tasks done.\")", "def test_cycle_monitor(hlwm, mon_num, focus_idx, delta, command):\n for i in range(1, mon_num):\n hlwm.call('add tag' + str(i))\n hlwm.call('add_monitor 800x600+' + str(i * 10))\n hlwm.call(['focus_monitor', str(focus_idx)])\n assert hlwm.get_attr('monitors.focus.index') == str(focus_idx)\n assert hlwm.get_attr('monitors.count') == str(mon_num)\n\n hlwm.call([command, delta])\n\n new_index = (focus_idx + int(delta) + mon_num) % mon_num\n assert hlwm.get_attr('monitors.focus.index') == str(new_index)", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "def clear_monitor(self):\n self._monitored_patients = PatientList()", "def monitor(self):\n logging.debug(\"monitor entered\")\n # monitor machines...\n # first, get a list of machine IDs\n res = progress_table(self.machines)\n return res", "def run(self):\n run = True\n while run:\n statuses = self.check_status()\n unhealthy = [ups for ups in statuses.keys() if not statuses[ups]]\n drivers_to_bounce = set()\n for ups in unhealthy:\n driver = self.ups_confs[ups].get('driver', 'usbhid-ups')\n drivers_to_bounce.add(driver)\n if drivers_to_bounce:\n self.bounce_drivers(drivers_to_bounce)\n time.sleep(self.monitor_cycle/1000)", "def start_continuous_scans(self):\n monitor = self.monitor\n laser = self.devices[monitor['laser']['name']]\n \n for d in self.daqs:\n daq = self.daqs[d]\n daq_driver = self.devices[d].driver\n #daq_driver.reset_device()\n if len(daq['monitor'])>0:\n devs_to_monitor = daq['monitor'] # daqs dictionary groups the channels by daq to which they are plugged \n if daq_driver.is_task_complete(daq['monitor_task']):\n daq_driver.trigger_analog(daq['monitor_task'])\n sleep(.1)\n laser.driver.execute_sweep()", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)", "def monitoredRun(self, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def monitoredRun(self, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def wait_until_all_activity_stops():\n if main_greenlet is None:\n return\n while other_threads_are_active():\n fake_sleep(1)", "def noMatchLaunch(self):\n self.retry_count += 1 # keep count of retry attempts\n if self.retry_count <= self.retry_limit:\n for process in range(self.redetect_limit): # only run for MAX detect limit\n time.sleep(3)\n self.checkMetrics()\n if self.metrics_match == False:\n self.logQ.put('Display Metrics do not match, attempting to redetect')\n self.reDetectMonitors()\n time.sleep(2)\n else:\n self.logQ.put('Success: Display Metrics match')\n break\n if self.metrics_match == False:\n self.logQ.put('Display Metrics still do not match, restarting system')\n self.restartSystem()\n else:\n if self.startup_flag == True:\n if self.slave_unit == False:\n self.startGsdmc()\n self.startup_flag = False\n self.startGsdm()\n else:\n self.logQ.put('Retry limit of {0} reached!'.format(self.retry_limit))\n self.logQ.put('Starting GSDMC & GSDM with current screen configuration... ')\n if self.startup_flag == True:\n if self.slave_unit == False:\n self.startGsdmc()\n self.startup_flag = False\n if self.system_awake:\n self.startGsdm()" ]
[ "0.64343464", "0.64005023", "0.636382", "0.6222358", "0.61567485", "0.61138576", "0.6028731", "0.590527", "0.5892099", "0.58482844", "0.58335686", "0.57457507", "0.5730217", "0.56420094", "0.56305265", "0.5628375", "0.55960834", "0.5589741", "0.5579765", "0.5569071", "0.55687344", "0.5545561", "0.5467073", "0.5463794", "0.5460805", "0.53668547", "0.5365453", "0.5365453", "0.53631985", "0.5350416" ]
0.68545663
0
Function that gets run once every watchdog_period_seconds.
def watchdog(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AutonomousPeriodic(self):\n Scheduler.GetInstance().Run()", "def reset_timer(self, *_) -> \"ALL\":\n self.last = time.time()\n delta = time.time() - self.last\n if delta > 180:\n print(\n \"!!! Warning: Watchdog failure detected, spawning a fallback \"\n \"thread.\"\n )\n self.watchdog = FallbackWatchdog(self)\n self.watchdog.start()", "def __periodic_maintenance__(self):\n pass", "def watchdog(self, loop):\n while True:\n if (self.reconnect_time > 0 and \n time.time() - self.reconnect_time > 60):\n\n self.logger.error(\n \"Could not reconnect to MQTT broker after 60s\")\n loop.stop()\n break\n else:\n yield from asyncio.sleep(1)", "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def watchdog(sleep_interval):\n mtimes = {}\n worker_process = restart_with_reloader()\n signal.signal(\n signal.SIGTERM, lambda *args: kill_program_completly(worker_process)\n )\n signal.signal(\n signal.SIGINT, lambda *args: kill_program_completly(worker_process)\n )\n while True:\n for filename in _iter_module_files():\n try:\n mtime = os.stat(filename).st_mtime\n except OSError:\n continue\n\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n kill_process_children(worker_process.pid)\n worker_process.terminate()\n worker_process = restart_with_reloader()\n mtimes[filename] = mtime\n break\n\n sleep(sleep_interval)", "def run_forever(self):\n while True:\n self.run_once()\n\n self.logger.info(f\"Sleeping for {self.config.sleep()} seconds\")\n time.sleep(self.config.sleep())", "def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))", "def periodic(self) -> None:\n self._on_iteration(self.timer.get())", "def start(self):\n self._watchdog_thread.start()", "def run(self):\n while True:\n try:\n sleep(influx_settings.write_freq)\n self._redis2influx()\n except InterruptExceptions as ex:\n raise ex", "def autonomousPeriodic(self):\n self.teleopPeriodic()", "def run_checks():\n while True:\n if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):\n for stuff in stuff_to_do:\n threading.Thread(target=stuff).start()\n core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every\n time.sleep(5*60*60)", "def start_periodic_update(self) -> None:\n self._stop_periodic_update = async_track_time_interval(\n self._hass, self.async_update, timedelta(seconds=SCAN_INTERVAL)\n )", "def watch_runs(run_storage_base, options, tight_loop_timer=1):\n import time\n import crython\n\n # @crython.job(minute=range(0, 60, frequency), second=0)\n # @crython.job(second=[5]) # once every minute\n # @crython.job(second=range(0, 60, 5)) # once every 5 seconds\n @crython.job(minute=range(0, 60, 15)) # once every 15 mins\n def _process_all_cron():\n logging.info('Running scheduled autoprocessing on: %s', run_storage_base)\n process_all_runs(run_storage_base, options)\n\n crython.start()\n\n try:\n while True:\n time.sleep(tight_loop_timer)\n except KeyboardInterrupt:\n logging.info(\"Stopped watching: %s\" % run_storage_base)\n pass", "def TeleopPeriodic(self):\n Scheduler.GetInstance().Run()\n LiveWindow.Run()", "def sleep(self):\n # Just spending cycles of sleep till next date\n timeTarget = self.startTime + timedelta(hours=int(self.newsFrequency))\n while datetime.now() < timeTarget:\n # sleep for 30 min\n # TODO move time to sleep into config\n logging.info(f\"Sleep for 30 min target to wakeup {timeTarget}\")\n time.sleep(60*30)", "def _loop(self):\n while True:\n if GameLoop.getInstance()._cancelation_token==True:\n break\n self._update_signal.notify_all()\n sleep(1/60)", "def setup_period(self):\n for p in self.processes:\n if self.time % p.period == 0:\n p.ready = True\n p.working_time = 0", "def testPeriodic(self):\n wpilib.LiveWindow.run()", "def run(self):\n while True:\n print(\"I'm running in the background\")\n time.sleep(self.interval)", "def periodic_tasks(self, context, raise_on_error=False):\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)", "def periodic_tasks(self, context, raise_on_error=False):\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)", "def periodic_tasks(self, context, raise_on_error=False):\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)", "def _async_initialized_callback(*_: Any):\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)", "def register_background_task_reporter():\n from tornado import ioloop\n\n cb = ioloop.PeriodicCallback(log_background_task_status, 60 * 3 * 1000)\n cb.start()\n return cb", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def bus_watchdog(self, value):\n self._write(MX_BUS_WATCHDOG, value)", "def periodicUpdate(self):\n\n if not self.currentStep or not self.currentStepSpace:\n # We're probably between steps\n return\n\n # Check for events\n if self.cmsswFile:\n run, event = searchForEvent(self.cmsswFile)\n if run and event:\n # Then we actually found something, otherwise do nothing\n # Right now I don't know what to do\n pass\n\n # Do timeout\n if not self.softTimeOut:\n return\n\n if time.time() - self.startTime > self.softTimeOut:\n # Then we have to kill the process\n\n # First, get the PID\n stepPID = getStepPID(self.currentStepSpace, self.currentStepName)\n\n # Now kill it!\n msg = \"\"\n msg += \"Start Time: %s\\n\" % self.startTime\n msg += \"Time Now: %s\\n\" % time.time()\n msg += \"Timeout: %s\\n\" % self.softTimeOut\n msg += \"Killing Job...\\n\"\n msg += \"Process ID is: %s\\n\" % stepPID\n if time.time() - self.startTime < self.hardTimeOut or not self.killFlag:\n msg += \"WARNING: Soft Kill Timeout has Expired:\"\n logging.error(msg)\n os.kill(stepPID, signal.SIGUSR2)\n self.killFlag = True\n elif self.killFlag:\n msg += \"WARNING: Hard Kill Timeout has Expired:\"\n logging.error(msg)\n os.kill(stepPID, signal.SIGTERM)\n killedpid, stat = os.waitpid(stepPID, os.WNOHANG)\n if killedpid == 0:\n os.kill(stepPID, signal.SIGKill)\n killedpid, stat = os.waitpid(stepPID, os.WNOHANG)\n if killedpid == 0:\n logging.error(\"Can't kill job. Out of options. Waiting for system reboot.\")\n # Panic! It's unkillable!\n pass\n\n\n # logging.error(msg)", "def startFactory(self):\n self.watchdog.start()\n super().startFactory()" ]
[ "0.64125323", "0.63243026", "0.62250376", "0.61276174", "0.60985696", "0.6009293", "0.59422404", "0.58473533", "0.57617235", "0.5760029", "0.563189", "0.56076306", "0.55893135", "0.5549261", "0.5547748", "0.5533392", "0.5522406", "0.551928", "0.54789424", "0.54440325", "0.54221445", "0.5419253", "0.5419253", "0.5419253", "0.54117006", "0.54059744", "0.5399837", "0.53902483", "0.53755164", "0.53686696" ]
0.69782615
0
Computes the L2 error of the adapted solution compared to the solution on the finest grid. The tree_adapted must have been projected to the finest grid first.
def global_error_to_finest_grid(tree_adapted, tree_finest): error = 0 for index in tree_finest.tree_leaves: error += (tree_finest.nvalue[index] - tree_adapted.nvalue[index])**2 error = math.sqrt(error) if tree_finest.dimension == 1: dx = mesh.space_step(tree_finest, tree_finest.max_level, 0) return error*dx elif tree_finest.dimension == 2: dx = mesh.space_step(tree_finest, tree_finest.max_level, 0) dy = mesh.space_step(tree_finest, tree_finest.max_level, 1) return error*dx*dy elif tree_finest.dimension == 3: dx = mesh.space_step(tree_finest, tree_finest.max_level, 0) dy = mesh.space_step(tree_finest, tree_finest.max_level, 1) dz = mesh.space_step(tree_finest, tree_finest.max_level, 2) return error*dx*dy*dz
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def potentialSolver2(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def getL2Error(self,exactSolution):\n value = 0\n error = np.array(self.solution)-np.array([exactSolution(x) for x in self.triangulation.points])\n for ele,triPoints in enumerate(self.triangulation.simplices):\n transformMatrix,translateVector = self.calculateTransform(ele)\n determinant = abs(np.linalg.det(transformMatrix))\n #Last vector is the precalculated integral of the basisfunctions over a reference element\n value+=determinant*np.dot(error[triPoints]**2,np.array([1/6.,1/3.,1/3.]))\n return(math.sqrt(value))", "def l2_error(dist_orig, dist_proj):\n return torch.mean((dist_orig - dist_proj) ** 2).item()", "def test_linear_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='linear')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='linear')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred(method='linear')\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def potentialSolver(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # solve potential\n for it in np.arange(1,max_it+1):\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n\n R = -self.phi[i][j][k]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1])\n\n sum += R*R;\n\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n \n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0.6, 1)\r\n colsample_bytree = trial.suggest_uniform(\"colsample_bytree\", 0.6, 1)\r\n reg_alpha = trial.suggest_uniform(\"reg_alpha\", 0.1, 100)\r\n reg_lambda = trial.suggest_uniform(\"reg_lambda\", 0.1, 100)\r\n\r\n model = LGBMRegressor(\r\n num_leaves=num_leaves,\r\n min_child_samples=min_child_samples,\r\n min_child_weight=min_child_weight,\r\n subsample=subsample,\r\n colsample_bytree=colsample_bytree,\r\n reg_alpha=reg_alpha,\r\n reg_lambda=reg_lambda,\r\n )\r\n\r\n model = ModelTrainer(file_object=self.file_object).get_trained_model(\r\n model, self.X_train, self.y_train\r\n )\r\n r_squared, rmse = ModelScorer(file_object=self.file_object).get_model_scores(\r\n model, self.X_test, self.y_test\r\n )\r\n\r\n return r_squared", "def estimate_tree(self):\n logger.info('TreeCatTrainer.estimate_tree given %d rows',\n len(self._added_rows))\n complete_grid = self._tree.complete_grid\n edge_logits = self.compute_edge_logits()\n edges = estimate_tree(complete_grid, edge_logits)\n return edges, edge_logits", "def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def adjusted_r2_score(ctx, pipeline, initial_metrics, num_rows):\n\n # Only count root columns that end up in a formula with a nonzero weight.\n # (A \"root column\" in a column that appeared in the initial matrix, before\n # any preprocessing, and was used to derive other columns.)\n roots = _nonzero_root_columns(ctx, pipeline)\n num_cols = len(roots)\n ratio = (num_rows - 1) / (num_rows - num_cols - 1)\n\n if ratio > 0:\n r2 = initial_metrics['r2_score']\n return 1 - (1 - r2) * ratio\n\n raise expected('more rows than columns',\n f'{num_rows} rows and {num_cols} columns.')", "def epsilon_fit_Chang_homemade(l_onde,vl1,vl2,vt1,vt2,gl1,gl2,gt1,gt2,f_t1,f_t2,f_l1,f_l2,epsinf1,epsinf2):\n # Chang PRB38 12369\n v = 1e4/l_onde\n \n epsx = (epsinf1+epsinf2)/2 - (f_l1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) - (f_l2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2)\n epsz = 1/(1/((epsinf1+epsinf2)/2) + (f_l1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) + (f_l2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2))\n #epsx = (1/2)*(eps1+eps2)\n #epsz = 1/((1/2)*(1/eps1 + 1/eps2))\n \n #epsx = (1/2)*epsinf1*(1-(f_t1*(v**2 - vl1**2 + 1j*v*gl1)/(v**2 - vt1**2 + 1j*v*gt1))-\\\n # (f_t2*(v**2 - vl2**2 + 1j*v*gl2)/(v**2 - vt2**2 + 1j*v*gt2)))\n #epsz = 1/((1/2)*(1/epsinf2)*(1+(f_l1*(v**2 - vt1**2 +1j*v*gl1)/(v**2 - vl1**2 +1j*v*gl1))+\\\n # (f_l2*(v**2 - vt2**2 +1j*v*gl2)/(v**2 - vl2**2 +1j*v*gl2))))\n return (epsx.real + 1j*np.abs(epsx.imag)),(epsz.real + 1j*np.abs(epsz.imag))", "def l2_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.square(obs - actual), 1)", "def test_cubic_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='cubic')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='cubic')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred()\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def computeBound(A, omega, Q, omega2, Q2, k):\n A = A.todense()\n M = Q2.T.dot(A).dot(Q2)\n R = A.dot(Q2) - Q2.dot(M)\n \n normR = numpy.linalg.norm(R) \n lmbda, U = numpy.linalg.eigh(M)\n L2 = omega[k:]\n\n delta = float(\"inf\")\n \n for i in lmbda: \n for j in L2: \n if abs(i-j) < delta: \n delta = abs(i-j) \n \n #print(lmbda, L2)\n print(\"normR=\" + str(normR), \"delta=\"+ str(delta), \"bound=\"+str(normR/delta))\n \n return normR/delta", "def epsilon_fit_Chang(l_onde,vl1,vl2,vt1,vt2,gl1,gl2,gt1,gt2,f_l1,f_l2,f_t1,f_t2,epsinf1,epsinf2):\n # Chang PRB38 12369\n v = 1e4/l_onde\n \n epsx = (epsinf1+epsinf2)/2 - (f_t1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) - (f_t2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2)\n epsz = 1/(((1/2)*(1/epsinf1 + 1/epsinf2)) + (f_l1*(vl1**2 - vt1**2))/(-vl1**2 + v**2 + 1j*v*gl1) + (f_l2*(vl2**2 - vt2**2))/(-vl2**2 + v**2 + 1j*v*gl2))\n \n# eps1 = epsinf1*(1 - (f_t1*(vl1**2 - vt1**2))/(vt1**2 - v**2 - 1j*v*gt1))\n# eps2 = epsinf2*(1 - (f_t2*(vl2**2 - vt2**2))/(vt2**2 - v**2 - 1j*v*gt2))\n# epsx = (1/2)*(eps1+eps2)\n# epsz = 1/((1/2)*(1/eps1 + 1/eps2))\n \n return (epsx.real + 1j*np.abs(epsx.imag)),(epsz.real + 1j*np.abs(epsz.imag))", "def itersolve(self, epsilon=0.85, maxiter=100, tol=1e-12):\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def relative_l2_error(u, U):\n return l2(u - U) / l2(u)", "def l2_loss(self, t, use_logit: bool = False):\n c = 0\n if use_logit:\n return np.mean([(self._irf[i].interpolant(t[p]) - logit(self._x[p, i])) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])\n else:\n return np.mean([(self._irf[i].interpolant(t[p]) - self._x[p, i]) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])", "def get_residuals_loo(self, tree):\n R_j = self.Y - (self.sum_trees_output - tree.predict_output(self.num_observations))\n return R_j", "def calc_error_parameter(X, y, target, dimension): #change if more parameters\n\n pos_max = np.argmax(y)\n best_parameters = X[pos_max, 0:dimension]\n best_parameters = np.reshape(best_parameters, (-1, 1))\n\n l2_errors = (\n np.power(best_parameters[0, :] - target[0], 2) +\n np.power(best_parameters[1, :] - target[1], 2) +\n np.power(best_parameters[2, :] - target[2], 2))\n\n return l2_errors.tolist(), best_parameters.tolist()", "def fit(self, X:np.ndarray, e=0, improved=False):\n if e>=self.height_limit or len(X)<=1:\n self.n_nodes = self.n_nodes + 1\n return Tree(X,None,None,None,None,'ex')\n else:\n Q = np.arange(X.shape[1], dtype='int')\n q = np.random.choice(Q)\n q_min = X[:,q].min()\n q_max = X[:,q].max()\n if improved:\n p_list = np.random.uniform(q_min,q_max,5)\n best_p = q_max\n x_len = len(X)\n for p in p_list:\n X_left = X[np.where(X[:,q] < p)]\n X_right = X[np.where(X[:,q] >= p)]\n if min(len(X_left), len(X_right))<=5:\n best_p = p\n break\n if min(len(X_left), len(X_right))<x_len:\n best_p = p\n else:\n best_p = np.random.uniform(q_min,q_max)\n X_left = X[np.where(X[:,q] < best_p)]\n X_right = X[np.where(X[:,q] >= best_p)]\n self.n_nodes = self.n_nodes + 1\n self.root = Tree(None,q, best_p, self.fit(X_left,e+1), self.fit(X_right,e+1), 'in')\n return self.root", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def test_for_convergence(self, error_tol):\n list_of_best_indvs = []\n for island in self._islands:\n best_indv = island.best_individual()\n list_of_best_indvs.append(best_indv)\n list_of_best_indvs.sort(key=lambda x: x.fitness)\n\n best_indv = list_of_best_indvs[0]\n converged = best_indv.fitness <= error_tol\n\n self._best_indv = best_indv\n self._converged = converged\n return converged", "def relative_L2_error(u, U, x):\n return L2(lambda x: u(x) - U(x), x) / L2(u, x)", "def standardError2(self):\n if self.count<=self.n:\n return float('inf')\n return self.residualNorm2()/self.sumWeight*(self.count / (self.count-self.n))", "def _grid_search_wl_kernel(\n k: WeisfilerLehman,\n subtree_candidates,\n train_x: list,\n train_y: torch.Tensor,\n lik: float,\n subtree_prior=None, # pylint: disable=unused-argument\n lengthscales=None,\n lengthscales_prior=None, # pylint: disable=unused-argument\n):\n # lik = 1e-6\n assert len(train_x) == len(train_y)\n best_nlml = torch.tensor(np.inf)\n best_subtree_depth = None\n best_lengthscale = None\n best_K = None\n if lengthscales is not None and k.se is not None:\n candidates = [(h_, l_) for h_ in subtree_candidates for l_ in lengthscales]\n else:\n candidates = [(h_, None) for h_ in subtree_candidates]\n\n for i in candidates:\n if k.se is not None:\n k.change_se_params({\"lengthscale\": i[1]})\n k.change_kernel_params({\"h\": i[0]})\n K = k.fit_transform(train_x, rebuild_model=True, save_gram_matrix=True)\n # self.logger.debug(K)\n K_i, logDetK = compute_pd_inverse(K, lik)\n # self.logger.debug(train_y)\n nlml = -compute_log_marginal_likelihood(K_i, logDetK, train_y)\n # self.logger.debug(f\"{i} {nlml}\")\n if nlml < best_nlml:\n best_nlml = nlml\n best_subtree_depth, best_lengthscale = i\n best_K = torch.clone(K)\n # self.logger.debug(f\"h: {best_subtree_depth} theta: {best_lengthscale}\")\n # self.logger.debug(best_subtree_depth)\n k.change_kernel_params({\"h\": best_subtree_depth})\n if k.se is not None:\n k.change_se_params({\"lengthscale\": best_lengthscale})\n k._gram = best_K # pylint: disable=protected-access" ]
[ "0.5702751", "0.56921744", "0.564493", "0.5359636", "0.5309151", "0.5170774", "0.51317436", "0.51264113", "0.50781935", "0.50201297", "0.49748135", "0.49748135", "0.4940611", "0.48971447", "0.48584917", "0.48562384", "0.48285002", "0.48250198", "0.48124108", "0.48025668", "0.47660094", "0.47640368", "0.47345546", "0.4716593", "0.46936524", "0.4685785", "0.4674281", "0.4671964", "0.46586794", "0.46563956" ]
0.6766839
0
Retrieves an updated list of tickers for companies included in the S&P 500. Saves to a pickle file.
def retrieve_sp500(): source = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies' resp = requests.get(source) soup = bs.BeautifulSoup(resp.text, 'lxml') table = soup.find('table', {'class': 'wikitable sortable'}) tickers = [] for row in table.findAll('tr')[1:]: ticker = row.findAll('td')[0].text ticker = ticker.replace('.','-') tickers.append(ticker.rstrip()) with open('sp500_tickers.pickle', 'wb') as file: pickle.dump(tickers, file) return tickers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()", "def Save(self, filename = 'tickers.dat'):\n if not self.changed:\n if not self.silent:\n print('no changes to save')\n return\n filename = os.path.join(os.path.expanduser('~'), filename)\n if not self.silent:\n print('saving {} stock lists to {}'.format(len(self.ticker_lists), filename))\n try:\n with open(filename, 'w') as f:\n for tick_list in self.ticker_lists:\n items = list(oDict.fromkeys(self.ticker_lists[tick_list])) # remove dups\n line = '\\t'.join(items)\n if line and tick_list:\n print(tick_list + '\\t' + line, file=f)\n except IOError as err:\n print('failed to save list data')", "def get_data(retrieve = False, start='2019-01-01', comp = False):\r\n if retrieve == True:\r\n tickers = retrieve_sp500()\r\n else:\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n if not os.path.exists('sp500_data'):\r\n os.mkdir('sp500_data')\r\n exchg_close = dt.time(16,0,0,0)\r\n # use todays date if markets have closed.\r\n if dt.datetime.today().time() > exchg_close:\r\n end = dt.datetime.now()\r\n # use yesterdays dates if markets have not yet closed.\r\n else: \r\n end = dt.datetime.now() - dt.timedelta(1)\r\n for ticker in tickers:\r\n # updates data for tickers not currently stored.\r\n if not os.path.exists('sp500_data/{}.csv'.format(ticker)):\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # updates data for tickers that have not been updated today.\r\n elif dt.datetime.fromtimestamp(os.path.getmtime('sp500_data/{}.csv'.format(ticker))).day != dt.datetime.today().day:\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # prints out data that was not and does not need udpating.\r\n else:\r\n print('{} is already saved'.format(ticker))\r\n if comp == True:\r\n compile_data()", "def save_pickle(companies):\n print(\"Saving companies.pickle...\")\n\n Path(\"output\").mkdir(parents=True, exist_ok=True)\n file_name = 'output/companies.pickle'\n\n companies_dict = {}\n i = 0\n while i < 500:\n company = companies[i]\n companies_dict[company.text] = {\n \"name\": company.text,\n \"url\": company.get_attribute('href'),\n }\n i += 1\n\n with open(file_name, 'wb') as handle:\n pickle.dump(companies_dict, handle)\n\n print('companies.pickle created')", "def retrieve_data_for_all_sp500_tickers(store_in_csv=False):\n df_sp500_info = get_SP500_info()\n symbol_list = list(df_sp500_info['Symbol'])\n symbol_list.append(SP500_INDEX_TICKER)\n symbol_count = 0\n for symbol in symbol_list:\n try:\n df_symbol_data = get_data_for_ticker(symbol)\n if store_in_csv:\n logger.debug(f'writing csv for {symbol}')\n filename = get_filename_for_ticker(symbol)\n df_symbol_data.to_csv(filename)\n except:\n logger.error(f'error processing: {symbol}')\n symbol_count += 1\n\n return symbol_count", "def fetch_markets_tickers(self):\r\n return self.__public_request('GET', '/api/v1/tickers')", "def _refresh_tickers(self):\n if self._tickers is None or (time.time() - self._tickers_age) > self.tickers_update_interval:\n res = self.get('/v1/tickers')\n self._tickers = {m['id']: m for m in res['markets']}\n self._tickers.update({m['id_hr']: m for m in res['markets']})\n self._tickers_age = time.time()", "def tickers(self):\n self._refresh_tickers()\n return self._tickers", "def save_company_names(self,reload = False):\n #this is a security measure such that the companies can not be reloaded by fault.\n if not reload:\n return\n\n # Get the html of the Wikipedia site to extract the table\n website_url = requests.get(\"https://en.wikipedia.org/w/index.php?title=List_of_S%26P_500_companies&oldid=895655255\").text\n html_site = BeautifulSoup(website_url, 'lxml')\n\n # Extract the table\n SP_Table = html_site.find('table',{'class':'wikitable sortable'})\n \n # Extract the rows of the table\n rows = SP_Table.findAll('tr')\n \n # Extract for each row in rows the second value as this is the wanted symbol\n df = pd.DataFrame(columns=['Symbol', 'FullName', 'CSVName', 'Sector'])\n for row in rows[1:]:\n # Extract the company names\n companyFullName = row.findAll('td')[1].text\n # Extract the company csv names\n companyCSVName = companyFullName.replace('*', ' ')\n # Extract the company symbols\n companySymbol = row.findAll('td')[0].text\n companySymbol = ''.join(companySymbol.split())\n sector = row.findAll('td')[3].text\n df1 = pd.DataFrame([[companySymbol, companyFullName, companyCSVName, sector]], columns=df.columns)\n df = df.append(df1, ignore_index=True)\n \n df['Sector'] = df['Sector'].apply(lambda x: x.replace('\\n', ''))\n df.to_csv(self.PATH_TO_COMPANY_FILES + '/Companies.csv', index=False)\n\n return", "def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices", "def get_tickers():\n\turl = \"https://api.iextrading.com/1.0/ref-data/symbols\"\n\t\n\ttry:\n\t\tresponse = requests.get(url)\n\t\tif str(response.status_code) == \"200\":\n\t\t\tprint(\"[UPDATE]: Downlaoding Tickers from iextrading API\")\n\t\t\tjson_stock_data = response.json()\n\n\t\t\tpd_stock = pandas.DataFrame(json_stock_data)\n\t\t\t# DataFrame Format\n\t\t\t# date iexId isEnabled name symbol type\n\t\t\t# 0 2019-02-12 2 True Agilent Technologies Inc. A cs\n\n\t\t\tprint(\"[SUCCESS]: Downloaded {} symbols from IEX.\".format(len(pd_stock.index)))\n\n\t\t\treturn pd_stock\n\n\t\telse:\n\t\t\tprint(\"[ERROR]: Download from IEX failed.\")\n\t\t\treturn \"ERROR\"\n\texcept Exception as e:\n\t\tprint(\"[ERROR]: {}\".format(e))\n\t\treturn \"ERROR\"", "def final_series():\n tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +\"./codigos.xlsx\", \n header=[0]).values.flatten()\n # tickers = pd.read_excel(\"./codigos.xlsx\", \n # header=[0]).values.flatten()\n ls = fetch_series(list(set(tickers)))\n net_series = [s for s in ls if _cleasing(s, [\"D\", \"M\"]) is not None]\n p = os.path.abspath(os.path.dirname(__file__))\n with open(p + \"/series_bcb\", \"wb\") as f:\n pickle.dump(net_series, f)\n # with open(\"./series_bcb\", \"wb\") as f:\n # pickle.dump(net_series, f) ", "def component_tickers(self):\n\n f = os.path.join(pth, '..', 'static/data', self.filename)\n with open(f, 'rb') as fh:\n tickers = [rw.rstrip() for rw in fh.readlines()]\n\n return tickers", "def fetch_tickers(self, symbol):\r\n symbol = self.__transfer_symbol(symbol)\r\n return self.__public_request('GET', '/api/v1/ticker/%s' % symbol)", "def tickers(**params):\n endpoint = 'tickers'\n return request(authenticate=False, version=2, endpoint=endpoint, method='GET', query_params=params)", "def tickers(self, assets):\n symbols = self._get_v2_symbols(assets)\n log.debug('fetching tickers {}'.format(symbols))\n\n try:\n self.ask_request()\n response = requests.get(\n '{url}/v2/tickers?symbols={symbols}'.format(\n url=self.url,\n symbols=','.join(symbols),\n )\n )\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'error' in response.content:\n raise ExchangeRequestError(\n error='Unable to retrieve tickers: {}'.format(\n response.content)\n )\n\n try:\n tickers = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n ticks = dict()\n for index, ticker in enumerate(tickers):\n if not len(ticker) == 11:\n raise ExchangeRequestError(\n error='Invalid ticker in response: {}'.format(ticker)\n )\n\n ticks[assets[index]] = dict(\n timestamp=pd.Timestamp.utcnow(),\n bid=ticker[1],\n ask=ticker[3],\n last_price=ticker[7],\n low=ticker[10],\n high=ticker[9],\n volume=ticker[8],\n )\n\n log.debug('got tickers {}'.format(ticks))\n return ticks", "def ticker(self):\n response = self.query('ticker')\n return response", "def obtain_stock_names():\n\n url = 'https://en.wikipedia.org/wiki/List_of_S%26P_400_companies'\n stock_names = []\n response = requests.get(url, timeout=5)\n content = BeautifulSoup(response.content, \"html.parser\")\n\n # We get stock_names from the web page\n for stock in content.findAll('a', attrs={\"class\": \"external text\"}):\n if(len(stock.text)<=5):\n stock_names.append(stock.text)\n\n # We persist the Stock Names\n save_dir = Path(__file__).parent.parent\n filename = (save_dir / \"../data/stock_names.joblib\").resolve()\n PersistenceAPI.persist_stock_data(stock_names, filename)\n\n return stock_names", "def update_binance_data(tickers_intervals):\n for ticker, interval in tickers_intervals:\n try:\n path_to_file = os.path.join(DIR_PATH, interval, f\"{ticker}.csv\")\n csvfile = pd.read_csv(path_to_file, index_col=False, delimiter=\",\")\n starting_date = datetime.utcfromtimestamp(\n csvfile.iloc[-1][\"Open time\"]\n ).strftime(\"%d %b, %Y\")\n print(\n \"Getting historical data for the ticker {} with {} interval starting from {}\".format(\n ticker, interval, starting_date\n )\n )\n\n candlesticks = client.get_historical_klines(\n ticker,\n interval,\n starting_date,\n datetime.now().strftime(\"%d %b, %Y\"),\n limit=1000,\n )\n\n format_date = lambda t: t / 1000\n format_price = lambda p: float(f\"{float(p):.2f}\")\n\n # overriding the last row.\n candlesticks[0][0] = format_date(candlesticks[0][0])\n candlesticks[0][1] = format_price(candlesticks[0][1])\n candlesticks[0][2] = format_price(candlesticks[0][2])\n candlesticks[0][3] = format_price(candlesticks[0][3])\n candlesticks[0][4] = format_price(candlesticks[0][4])\n csvfile.loc[len(csvfile) - 1] = candlesticks[0][:-1]\n for candlestick in candlesticks[1:]:\n candlestick[0] = format_date(candlestick[0])\n candlestick[1] = format_price(candlestick[1])\n candlestick[2] = format_price(candlestick[2])\n candlestick[3] = format_price(candlestick[3])\n candlestick[4] = format_price(candlestick[4])\n csvfile.loc[len(csvfile)] = candlestick[:-1]\n\n csvfile.to_csv(path_to_file, index=False)\n except Exception as e:\n print(e)", "def get_btc_ticker(self):\n return self.execute_http_call(\"/api/ticker\", \"GET\", headers=None)", "def fetch_series(tickers: List[str]) -> List[dict]:\n with requests.Session() as session:\n c = suds.client.Client(\n 'https://www3.bcb.gov.br/sgspub/JSP/sgsgeral/FachadaWSSGS.wsdl',\n transport=suds_requests.RequestsTransport(session))\n \n def _fetch(tck):\n try:\n resp = c.service.getUltimoValorVO(tck)\n if resp is not None:\n return _process_info(resp)\n except:\n tcks_off.append(tck)\n\n with executor() as e:\n ls = list(e.map(_fetch, tickers))\n return ls", "def load_data(self):\n try:\n df = self.live_quote_arg_func(self.tickers)\n for index, ticker in enumerate(self.tickers):\n ticker_info = df.loc[index]\n self.ticker_dict[ticker].append(ticker_info['price'],\n ticker_info['volume'],\n ticker_info['amount'],\n ticker_info['time'])\n except Exception:\n raise ValueError('Polling thread exception')", "def updateAllHistorical():\n now = datetime.datetime.fromtimestamp(getTime())\n fiveDaysAgo = datetime.datetime.fromtimestamp(\n getTime() - daysToSeconds(5)\n )\n for stockName in db.STOCK_MAP.keys():\n try:\n historicalData = getHistoricalData(stockName, fiveDaysAgo)\n with open(\n \"static/data/\" + stockName.lower() + \".csv\",\n \"a\"\n ) as f:\n try:\n f.write(\",\".join(\n str(d) for d in historicalDictToList(\n historicalData[\"history_list\"][0]\n )\n ) + \"\\n\")\n except KeyError:\n pass\n except IOError as e:\n print \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n db.UPDATING_HISTORICAL = False", "def get_data_logic():\r\n global input_exchange\r\n global input_symbols\r\n global all_symbols\r\n global input_timeframe\r\n\r\n # create exchange connection\r\n exchange = Exchange(input_exchange)\r\n\r\n # perform check that exchange can grab price data\r\n if exchange.connection.has['fetchOHLCV']:\r\n\r\n # user ticked 'All Symbols?', so includes all symbols in\r\n # exchange_tickers.py for the particular exchange\r\n if all_symbols:\r\n symbol_list = SymbolList(symbols='auto', exchange=exchange)\r\n # user didn't tick 'All Symbols?', so create unpopulated symbol list\r\n else:\r\n symbol_list = SymbolList(exchange=exchange)\r\n # add all symbols user inputted\r\n for s in input_symbols:\r\n symbol_list.input_symbol(s)\r\n\r\n # get auto timeframe and check it is valid\r\n timeframe = Timeframe(timeframe=input_timeframe, exchange=exchange)\r\n while not timeframe.check_timeframe():\r\n timeframe.input_timeframe() # default to asking for input\r\n\r\n print(f\"Pulling data on the {timeframe.tf} timeframe for...\")\r\n print(symbol_list.symbols)\r\n\r\n # get current time in UTC in milliseconds\r\n now = datetime.now().astimezone(pytz.timezone('UTC'))\r\n now = int(now.timestamp()*1000)\r\n\r\n # loop over each symbol and pull new data\r\n for sym in symbol_list.symbols:\r\n # create csv filename and path\r\n file_sym = sym.replace('/', '')\r\n file_sym = file_sym.replace('-', '')\r\n filename = f\"{exchange.name}_{file_sym}_{timeframe.tf}.csv\" # generate filename from given information\r\n csv_path = f\"{exchange.name}/{timeframe.tf}/{filename}\"\r\n\r\n # get most recent price data and append it to existing data\r\n # (if it exists)\r\n price_data = PriceData(exchange=exchange, tf=timeframe.tf,\r\n sym=sym, now=now, path=csv_path)\r\n\r\n # check if price data csv already exists\r\n if price_data.exists():\r\n price_data.get_current()\r\n # get new data as far back as possible if csv does not exist\r\n else:\r\n price_data.get_new()\r\n\r\n # keep updating price_data until current time\r\n price_data.update()\r\n\r\n # write to csv\r\n price_data.write()\r\n\r\n print(\"Finished writing files!\")", "def export_as_json(self):\n f = open(\"{}_historical_prices.json\".format(self.symbol), \"a\")\n f.write(self.stocks.json())\n f.close()", "def download_all():\r\n f = open('stock_symbols.txt', 'r')\r\n fout = open('../data/stocks_read.txt', 'w')\r\n count_max = 500\r\n count = 0\r\n for stock_symbol in f:\r\n stock_symbol = stock_symbol.strip()\r\n try:\r\n stock_download(stock_symbol)\r\n fout.write(stock_symbol + '\\n')\r\n except:\r\n print(\"was not able to read file \", stock_symbol)\r\n count = count + 1\r\n if count >= count_max:\r\n break\r\n f.close()\r\n fout.close", "def load_pickled_price_history(self, ticker: str) -> pd.DataFrame:\n try:\n price_history = pd.read_pickle(f\"../data/pickles/{ticker}.pkl\")\n return price_history\n except FileNotFoundError:\n print(f\"no pickle available for {ticker}; falling back to DB\")\n return None", "def save_as_json(self,json_path):\n data = {}\n for company in self:\n\n df = company.data.copy()\n df.index = df.index.map(str)\n data[company.ticker] = json.loads(df.to_json())\n\n with open(json_path, 'w') as file:\n json.dump(data, file,indent = 4,sort_keys = True)", "def load_db_price_history(self, ticker: str, conn: sqlite3.Connection) -> pd.DataFrame:\n sql = f\"SELECT date, closing_price FROM historical_prices WHERE ticker = '{ticker}' ORDER BY date;\"\n price_history = pd.read_sql(sql, conn)\n\n if not os.path.exists(Portfolio.PICKLE_DIR):\n os.mkdir(Portfolio.PICKLE_DIR)\n price_history.to_pickle(f\"../data/pickles/{ticker}.pkl\")\n return price_history", "def __init__(self, filename = 'tickers.dat', silent=False):\n self.indexes = {\n 'dow': IndexList.tickers_dow,\n 'nasdaq': IndexList.tickers_nasdaq,\n 'sp500': IndexList.tickers_sp500}\n\n filename = os.path.join(os.path.expanduser('~'), filename)\n self.changed = False\n self.silent = silent\n self.ticker_lists = {}\n try:\n with open(filename, 'r') as f:\n lines = f.readlines()\n except IOError as err:\n if not silent:\n print('failed to load list data')\n lines = []\n for line in lines:\n if line.strip():\n items = line.strip().split('\\t')\n if items[0].lower() not in self.indexes:\n self.ticker_lists[items[0].lower()] = items[1:]\n else:\n if not silent:\n print('loaded {} stock lists from {}'.format(len(self.ticker_lists), filename))" ]
[ "0.6478883", "0.64458394", "0.6418934", "0.63052785", "0.6192895", "0.61889976", "0.6067577", "0.5903302", "0.58534765", "0.5844709", "0.5843255", "0.5743359", "0.56932896", "0.55991876", "0.55669063", "0.5554215", "0.5516616", "0.5503813", "0.5497355", "0.5488232", "0.5449579", "0.54253876", "0.5410472", "0.53855026", "0.53809476", "0.53797835", "0.5315045", "0.5304827", "0.52904004", "0.5282936" ]
0.74970037
0
Given the RNN outputs (presoftmax) at each time step and a target labeling, compute the negative of the CTC loss for one example, where the loss itself can be defined as the negative of the probability p(target | logits) as defined in equations 3 & 8
def compute_ctc_loss(self, logits, target): num_time_steps = logits.shape[0] num_labels = logits.shape[1] - 1 num_labels_with_blank = num_labels + 1 # sanity check to ensure targets are all right assert (target < num_labels).all() ###################### ### YOUR CODE HERE ### ###################### target_length = 2 * target.shape[0] + 1 normalized_logits = softmax(logits) alpha = self.compute_forward_variables(normalized_logits, target) return -np.log(np.sum(alpha[target_length-1, :] \ + alpha[target_length - 2, :], axis=0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CELOSS(output,label,delay = 0):\n if delay > 0:\n label.data[delay:,:] = label.data[0:-delay,:]\n label.data[0:delay, :] = -1\n _,predict = torch.max(output,1)\n correct = (predict.data == label.view(-1).data).sum()\n\n #correct = np.sum(predict_data == yt.reshape(-1))\n \n mask = (label>=0)\n output =F.log_softmax(output)\n labselect = label + (label<0).long()\n select = -torch.gather(output,1,labselect.view(-1,1))\n losses = mask.float().cuda().view(-1,1)*select\n loss = torch.sum(losses)/torch.sum(mask.float())\n return loss, correct", "def ctc_loss(inputs, padding_mask=-1, **kwargs):\n args = ArgHelper.parse(locals())\n inputs[0] = activation_ops.softmax(inputs[0], axis=2)\n op_lib = loss_ops_lib.Operator\n if context.executing_eagerly():\n raise NotImplementedError\n else:\n return op_lib.blend('CTCLoss', **args)", "def ctc_loss(input, target, padding_mask=-1, reduction='mean'):\n return FunctionLib.apply(\n 'CTCLoss', input.device, [input, target],\n padding_mask=padding_mask, reduction=reduction.upper())", "def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n grad = np.zeros_like(outputVectors)\n gradPred = np.zeros_like(predicted)\n cost = 0.0\n probability = 0.0\n for sample_idx in indices:\n similarity = outputVectors[sample_idx].dot(predicted.T)\n probability = sigmoid(similarity) # squash to 0 ~ 1\n if sample_idx == target: # positive sample\n #p = sigmoid(outputVectors[sample_idx].dot(predicted.T))\n cost += -np.log(sigmoid(similarity))\n else: # negative sample\n #p = sigmoid(-outputVectors[sample_idx].dot(predicted.T))\n cost += -np.log(sigmoid(-similarity)) # deduction from reference 2.\n \n if sample_idx == target:\n grad[sample_idx, :] += (probability - 1) * predicted\n gradPred += (probability - 1) * outputVectors[sample_idx]\n else:\n grad[sample_idx, :] += probability * predicted\n gradPred += probability * outputVectors[sample_idx]\n '''\n V, D = outputVectors.shape\n one_hot_target = np.zeros(V)\n one_hot_target[target] = 1\n cost = 0\n gradPred = np.zeros_like(predicted)\n grad = np.zeros_like(outputVectors)\n \n for idx in indices:\n context_vector = outputVectors[idx] # embedding vector (1, D)\n cosine_similarity = normalizeRows(predicted).dot(normalizeRows(context_vector).T)\n print('neg sample, consine_similarity={0}'.format(cosine_similarity))\n binary_class = sigmoid(cosine_similarity)\n print('neg sample, binary_class={0}'.format(binary_class))\n \n if idx == target:\n cost += binary_class - 1\n else:\n cost += binary_class\n \n dlogits = sigmoid_grad(cosine_similarity)\n #gradPred += dlogits * normalizeRows(context_vector)\n #grad += np.outer(one_hot_target, dlogits * normalizeRows(predicted))\n gradPred += dlogits\n grad += np.outer(one_hot_target, dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def loss_fn(outputs, labels, wts):\n\n # reshape labels to give a flat vector of length batch_size*seq_len\n loss_noreduce = nn.BCEWithLogitsLoss(reduce=False)\n loss = torch.mean(loss_noreduce(outputs, labels)*wts)\n\t\n # compute cross entropy loss for all tokens\n return loss", "def _add_mxnet_ctc_loss(pred, seq_len, label):\n pred_ctc = mx.sym.Reshape(data=pred, shape=(-4, seq_len, -1, 0))\n\n loss = mx.sym.contrib.ctc_loss(data=pred_ctc, label=label)\n ctc_loss = mx.sym.MakeLoss(loss)\n\n softmax_class = mx.symbol.SoftmaxActivation(data=pred)\n softmax_loss = mx.sym.MakeLoss(softmax_class)\n softmax_loss = mx.sym.BlockGrad(softmax_loss)\n return mx.sym.Group([softmax_loss, ctc_loss])", "def ctc_loss(label_length, ctc_input_length, labels, probs):\n label_length = tf.cast(tf.squeeze(label_length), dtype=tf.int32)\n ctc_input_length = tf.cast(tf.squeeze(ctc_input_length), dtype=tf.int32)\n sparse_labels = tf.cast(tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length), dtype=tf.int32)\n y_pred = tf.log(tf.transpose(probs, perm=[1, 0, 2]) + tf.keras.backend.epsilon())\n return tf.expand_dims(tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred, sequence_length=ctc_input_length), axis=1)", "def gen_criterion(dis_preds, ctc_loss):\n return ctc_loss - torch.mean(dis_preds)\n # return -torch.mean(dis_preds)", "def get_lr_cost(self):\n\n\t\tlabels = self.get_symbolic_expected_rewards()\n\n\t\treturn -theano.tensor.mean(\n\t\t\ttheano.tensor.log(labels)[\n\t\t\t\ttheano.tensor.arange(self.symbolic_output.shape[0]),\n\t\t\t\tself.symbolic_output])", "def maxup_loss(self, outputs, extra_labels):\n batch_size = outputs.shape[0] // self.ntrials\n correct_preds = (torch.argmax(outputs.data, dim=1) == extra_labels).sum().item() / self.ntrials\n stacked_loss = self.max_criterion(outputs, extra_labels).view(batch_size, self.ntrials, -1)\n loss = stacked_loss.max(dim=1)[0].mean()\n\n return loss, correct_preds", "def _neg_loss(outputs: torch.Tensor, targets: torch.Tensor):\n pos_inds = targets.eq(1).float()\n neg_inds = targets.lt(1).float()\n\n neg_weights = torch.pow(1 - targets, 4)\n\n loss = 0\n\n pos_loss = torch.log(outputs) * torch.pow(1 - outputs, 2) * pos_inds\n neg_loss = torch.log(1 - outputs) * torch.pow(outputs, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss", "def bce_loss(input, target):\r\n neg_abs = - input.abs()\r\n loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()\r\n return loss.mean()", "def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n # Similar to softmax, we find the target matrix\n # v_c structured as 1xD matrix\n # u_o assume to be 1xD matrix\n # u_k assume to be K x D matrix\n # we pull the data assuming that each row represent one vector\n v_c = predicted\n u_o = outputVectors[target]\n u_k = outputVectors[indices]\n\n\n # The intermediary matrix outputs\n # z_o, h_o: single scalar number\n # z_k, h_k: K x 1 vector, wich each number associated with a neg sample\n z_o = np.dot(u_o, v_c)\n h_o = sigmoid(z_o)\n z_k = np.dot(u_k, v_c)\n h_k = sigmoid( - z_k)\n\n J_1 = - np.log(h_o)\n J_2 = - np.sum( np.log(h_k) )\n cost = J_1+ J_2\n\n # Return the gradient for the prediction function\n # the prediction vector interacts with both the predicted vector\n # the negative sample vectors so below are both parts of the gradient\n # here we are trying to increase the prediction matrix to maximize\n # the similarity with the predicted vector\n # output is a 1 x D matrix\n grad_pred_o = - (1 - h_o)*u_o\n\n # the second part is tyring to decrease\n # similarity with the negative sample vectors\n # K x 1 multiply be input is a k x D matrix, we will need to sum all negative samples\n # along the rows. output is a 1 x D matrix\n # reshape h_k so that it can multiple\n grad_pred_k = np.dot(( 1 - h_k).T, u_k)\n # find the predicted matrix gradient\n # output is a 1 x D matrix\n gradPred = grad_pred_o + grad_pred_k\n\n\n # Return the gradient of the output vector\n # create a matrix the same shape as outputVector\n grad = np.zeros(outputVectors.shape)\n # first find the gradient wrt to the target output vector\n # here we want to increase the similarity between\n # the target output vector and the center vector\n # outputs is a 1 x D matrix\n grad_u_o = - (1-h_o)*v_c\n\n # print('***************grad_u_o************')\n # print(grad_u_o)\n # print(grad_u_o.shape)\n # replace the target row in output vector gradient\n grad[target, ] = grad_u_o\n # then find the gradient descent of all the u_k matrices\n # K x 1 matrix multiply by 1 x 3\n # K x D\n grad_uk = - np.outer((h_k - 1), v_c)\n # print('***************grad_uk************')\n # print(grad_uk)\n # for each token (row) replace gradient\n for k in xrange(u_k.shape[0]):\n index = indices[k]\n grad[index] += grad_uk[k]\n\n ### END YOUR CODE\n return cost, gradPred, grad", "def rpn_cls_loss(*args):\n y_true, y_pred = args if len(args) == 2 else args[0]\n indices = tf.where(tf.not_equal(y_true, -1))\n target = tf.gather_nd(y_true, indices)\n output = tf.gather_nd(y_pred, indices)\n lf = tf.losses.BinaryCrossentropy()\n return lf(target, output)", "def replacement_ctc(self, model, target, output):\n\tif model.get_backend().get_name() == 'pytorch':\n\t\tpytest.xfail('Backend \"pytorch\" does not use a CTC loss function.')\n\treturn replacement_ctc.original(self, model, target, output)", "def bce_loss(input, target):\n neg_abs = -input.abs()\n loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()\n return loss.mean()", "def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def test_model(preds, target):\n ### START CODE HERE (Replace instances of 'None' with your code) ###\n print(preds.shape, target.shape)\n total_log_ppx = np.sum(preds * tl.one_hot(target, preds.shape[-1]), axis= -1) # HINT: tl.one_hot() should replace one of the Nones\n print(total_log_ppx.shape)\n \n non_pad = 1.0 - np.equal(target, 0) # You should check if the target equals 0\n ppx = total_log_ppx * non_pad # Get rid of the padding\n\n log_ppx = np.sum(ppx) / np.sum(non_pad)\n ### END CODE HERE ###\n \n return -log_ppx", "def loss(self, targets, scores):\n return (2. * numpy.arctan(targets * scores) - 1.)**2", "def compute_ctc_loss(criterion, ip, tgt, tgt_lens):\n ip_lens = torch.full(size=(ip.shape[1],), fill_value=ip.shape[0])\n return criterion(ip, tgt, ip_lens, tgt_lens)", "def get_loss(self, model, target, output):\n\t\tbackend = model.get_backend()\n\n\t\tif backend.get_name() == 'keras':\n\n\t\t\timport keras.backend as K\n\n\t\t\tif 'warp' in self.variant:\n\n\t\t\t\t# Just use the built-in Keras CTC loss function.\n\t\t\t\tlogger.info('Attaching Warp-CTC loss function to model '\n\t\t\t\t\t'output \"%s\".', target)\n\n\t\t\t\tif backend.get_toolchain() != 'theano':\n\t\t\t\t\tlogger.error('If you want to use warp-ctc, you need to '\n\t\t\t\t\t\t'use the Theano backend to Keras.')\n\t\t\t\t\traise ValueError('Warp-CTC is currently only supported '\n\t\t\t\t\t\t'with the Theano backend to Keras.')\n\n\t\t\telse:\n\t\t\t\t# Just use the built-in Keras CTC loss function.\n\t\t\t\tlogger.debug('Attaching built-in Keras CTC loss function to '\n\t\t\t\t\t'model output \"%s\".', target)\n\n\t\t\tctc_scaled = 'ctc_scaled_{}'.format(self.input_length)\n\t\t\tflattened_labels = 'ctc_flattened_labels_{}'.format(target)\n\n\t\t\ttranscript_length = K.placeholder(\n\t\t\t\tndim=2,\n\t\t\t\tdtype='int32',\n\t\t\t\tname=self.output_length\n\t\t\t)\n\t\t\ttranscript = K.placeholder(\n\t\t\t\tndim=2,\n\t\t\t\tdtype='int32',\n\t\t\t\tname=flattened_labels if 'warp' in self.variant \\\n\t\t\t\t\telse self.output\n\t\t\t)\n\t\t\tutterance_length = K.placeholder(\n\t\t\t\tndim=2,\n\t\t\t\tdtype='int32',\n\t\t\t\tname=self.input_length if self.relative_to is None \\\n\t\t\t\t\telse ctc_scaled\n\t\t\t)\n\n\t\t\tif self.relative_to is not None:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tctc_scaled,\n\t\t\t\t\tScaledSource(\n\t\t\t\t\t\tmodel,\n\t\t\t\t\t\trelative_to=self.relative_to,\n\t\t\t\t\t\tto_this=target,\n\t\t\t\t\t\tscale_this=self.input_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tif 'warp' in self.variant:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tflattened_labels,\n\t\t\t\t\tFlattenSource(\n\t\t\t\t\t\tself.output,\n\t\t\t\t\t\tself.output_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\t\ttry:\n\t\t\t\t\timport ctc\t\t\t\t\t# pylint: disable=import-error\n\t\t\t\texcept ImportError:\n\t\t\t\t\tlogger.error('The warp-CTC loss function was requested, '\n\t\t\t\t\t\t'but we cannot find the \"ctc\" library. See our '\n\t\t\t\t\t\t'troubleshooting page for helpful tips.')\n\t\t\t\t\traise ImportError('Cannot find the \"ctc\" library, which '\n\t\t\t\t\t\t'is needed when using the \"warp\" variant of the CTC '\n\t\t\t\t\t\t'loss function.')\n\n\t\t\t\tout = ctc.cpu_ctc_th(\n\t\t\t\t\toutput.dimshuffle((1, 0, 2)),\n\t\t\t\t\tK.squeeze(utterance_length, -1),\n\t\t\t\t\ttranscript[0]+1,\n\t\t\t\t\tK.squeeze(transcript_length, -1)\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tout = K.ctc_batch_cost(\n\t\t\t\t\ttranscript,\n\t\t\t\t\toutput,\n\t\t\t\t\tutterance_length,\n\t\t\t\t\ttranscript_length\n\t\t\t\t)\n\n\t\t\tif 'loss_scale' in self.variant:\n\t\t\t\tlogger.debug('Loss scaling is active.')\n\t\t\t\tout = out * K.mean(\n\t\t\t\t\tK.cast(utterance_length, K.dtype(out))\n\t\t\t\t) / 100\n\n\t\t\treturn (\n\t\t\t\t(\n\t\t\t\t\t(self.output_length, transcript_length),\n\t\t\t\t\t(flattened_labels if 'warp' in self.variant \\\n\t\t\t\t\t\telse self.output, transcript),\n\t\t\t\t\t(self.input_length if self.relative_to is None \\\n\t\t\t\t\t\telse ctc_scaled, utterance_length)\n\t\t\t\t),\n\t\t\t\tout\n\t\t\t)\n\n\t\telif backend.get_name() == 'pytorch':\n\n\t\t\tif 'warp' not in self.variant:\n\t\t\t\tlogger.error('PyTorch does not include a native CTC loss '\n\t\t\t\t\t'function yet. However, PyTorch bindings to Warp CTC are '\n\t\t\t\t\t'available (SeanNaren/warp-ctc). Try installing that, and '\n\t\t\t\t\t'then settings variant=warp.')\n\t\t\t\traise ValueError('Only Warp CTC is supported for PyTorch '\n\t\t\t\t\t'right now.')\n\n\t\t\tctc_scaled = 'ctc_scaled_{}'.format(self.input_length)\n\t\t\tflattened_labels = 'ctc_flattened_labels_{}'.format(target)\n\t\t\ttranscript_length = model.data.placeholder(\n\t\t\t\tself.output_length,\n\t\t\t\tlocation='cpu',\n\t\t\t\tdata_type='int'\n\t\t\t)\n\t\t\ttranscript = model.data.placeholder(\n\t\t\t\tflattened_labels,\n\t\t\t\tlocation='cpu',\n\t\t\t\tdata_type='int'\n\t\t\t)\n\t\t\tutterance_length = model.data.placeholder(\n\t\t\t\tself.input_length if self.relative_to is None else ctc_scaled,\n\t\t\t\tlocation='cpu',\n\t\t\t\tdata_type='int'\n\t\t\t)\n\n\t\t\tif self.relative_to is not None:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tctc_scaled,\n\t\t\t\t\tScaledSource(\n\t\t\t\t\t\tmodel,\n\t\t\t\t\t\trelative_to=self.relative_to,\n\t\t\t\t\t\tto_this=target,\n\t\t\t\t\t\tscale_this=self.input_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tif 'warp' in self.variant:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tflattened_labels,\n\t\t\t\t\tFlattenSource(\n\t\t\t\t\t\tself.output,\n\t\t\t\t\t\tself.output_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\ttry:\n\t\t\t\tfrom warpctc_pytorch import CTCLoss\t# pytorch: disable=import-error\n\t\t\texcept ImportError:\n\t\t\t\tlogger.error('The warp-CTC loss function was requested, '\n\t\t\t\t\t'but we cannot find the \"warpctc_pytorch\" library. See '\n\t\t\t\t\t'out troubleshooting page for helpful tips.')\n\t\t\t\traise ImportError('Cannot find the \"warpctc_pytorch\" library, '\n\t\t\t\t\t'which is needed when using the \"warp\" variant of the CTC '\n\t\t\t\t\t'loss function.')\n\n\t\t\tloss = model.data.move(CTCLoss())\n\n\t\t\tdef basic_ctc_loss(inputs, output):\n\t\t\t\t\"\"\" Computes CTC loss.\n\t\t\t\t\"\"\"\n\t\t\t\treturn loss(\n\t\t\t\t\toutput.transpose(1, 0).contiguous(),\n\t\t\t\t\tinputs[0][0]+1,\t\t# transcript[0]+1\n\t\t\t\t\tinputs[1].squeeze(1),\t# K.squeeze(utterance_length, -1),\n\t\t\t\t\tinputs[2].squeeze(1)\t# K.squeeze(transcript_length, -1)\n\t\t\t\t) / output.size(0)\n\n\t\t\tif 'loss_scale' in self.variant:\n\t\t\t\tlogger.debug('Loss scaling is active.')\n\n\t\t\t\tdef loss_scale(inputs, output):\n\t\t\t\t\t\"\"\" Computes CTC loss.\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tfactor = inputs[1].float().mean().data[0] / 100.\n\t\t\t\t\treturn basic_ctc_loss(inputs, output) * factor\n\n\t\t\t\tget_ctc_loss = loss_scale\n\t\t\telse:\n\t\t\t\tget_ctc_loss = basic_ctc_loss\n\n\t\t\treturn [\n\t\t\t\t[\n\t\t\t\t\t(flattened_labels if 'warp' in self.variant \\\n\t\t\t\t\t\telse self.output, transcript),\n\t\t\t\t\t(self.input_length if self.relative_to is None \\\n\t\t\t\t\t\telse ctc_scaled, utterance_length),\n\t\t\t\t\t(self.output_length, transcript_length)\n\t\t\t\t],\n\t\t\t\tget_ctc_loss\n\t\t\t]\n\n\t\telse:\n\t\t\traise ValueError('Unsupported backend \"{}\" for loss function \"{}\"'\n\t\t\t\t.format(backend.get_name(), self.get_name()))", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n y_hat = softmax(np.dot(outputVectors,predicted))\n y = np.zeros(outputVectors.shape[0])\n y[target] = 1.0\n\n cost = -np.log(y_hat[target])\n gradPred = np.dot(outputVectors.T,y_hat - y)\n grad = np.outer(y_hat - y,predicted)\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def add_loss_op(self, preds):\n ### YOUR CODE HERE (~2-4 lines)\n trans = tf.get_variable('trans',\n shape=[Config.n_classes, Config.n_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n log_likelihood, _ = crf_log_likelihood(preds,\n self.labels_placeholder,\n self.length_placeholder,\n trans)\n #log_likelihood = tf.boolean_mask(log_likelihood, self.mask_placeholder)\n loss = tf.reduce_mean(-1.0 * log_likelihood)\n \n ### END YOUR CODE\n return trans, loss", "def _add_warp_ctc_loss(pred, seq_len, num_label, label):\n label = mx.sym.Reshape(data=label, shape=(-1,))\n label = mx.sym.Cast(data=label, dtype='int32')\n return mx.sym.WarpCTC(data=pred, label=label, label_length=num_label, input_length=seq_len)", "def structured_loss(predict, frame_mask, label_weight, tp_weight, fp_weight, num_false_neg):\n # tp_weight = torch.autograd.Variable(torch.Tensor(tp_weight), requires_grad=False).cuda()\n # fp_weight = torch.autograd.Variable(torch.Tensor(fp_weight), requires_grad=False).cuda()\n tp_weight = torch.tensor(tp_weight, requires_grad=False).cuda()\n fp_weight = torch.tensor(fp_weight, requires_grad=False).cuda()\n tp_mask = torch.tensor(\n tp_weight > 0, dtype=torch.float32, requires_grad=False).cuda()\n\n # tp_cost = (tp_mask - tp_weight * predict) * label_weight * frame_mask\n tp_cost = tp_weight * -predict * label_weight * frame_mask\n fp_cost = fp_weight * predict * frame_mask\n\n tp_cost = tp_cost.sum(2).sum(0).view(-1)\n fp_cost = fp_cost.sum(2).sum(0).view(-1)\n\n fn_cost = torch.autograd.Variable(\n torch.Tensor(num_false_neg), requires_grad=False).cuda()\n\n return tp_cost, fp_cost, fn_cost", "def loss_fn(self, targets, outputs, model):", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def calculate_perplexity(loss):\n return math.exp(float(loss)) if loss < 300 else float(\"inf\")", "def ctc_loss(data=None, label=None, data_lengths=None, label_lengths=None, use_data_lengths=_Null, use_label_lengths=_Null, blank_label=_Null, out=None, name=None, **kwargs):\n return (0,)", "def negSamplingCostAndGradient(predicted_vc, target, outputVectors_uk, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n cost = 0.0\n sigmd_uoT_vc = sigmoid(np.dot(predicted_vc.reshape(-1), outputVectors_uk[target].T))\n cost += -np.log(sigmd_uoT_vc)\n\n gradPred_dJ_vc = np.zeros_like(predicted_vc)\n gradPred_dJ_vc += (sigmd_uoT_vc - 1) * outputVectors_uk[target]\n\n grad_dJ_uw = np.zeros_like(outputVectors_uk)\n grad_dJ_uw[target:target + 1] = (sigmd_uoT_vc - 1) * predicted_vc\n\n neg_samples = []\n for i in range(K):\n j = dataset.sampleTokenIdx()\n if j == target or (j in neg_samples):\n i -= 1 # if negative sample is same with target or already sampled, then resample.\n continue\n neg_samples.append(j)\n\n sigmd_ukT_vc = sigmoid(-np.dot(predicted_vc.reshape(-1), outputVectors_uk[j].T))\n cost += -np.log(sigmd_ukT_vc) # cost for negative sample\n\n grad_dJ_uw[j:j + 1] = (1 - sigmd_ukT_vc) * predicted_vc # gradient for negative sample\n gradPred_dJ_vc += (1 - sigmd_ukT_vc) * outputVectors_uk[j]\n\n return cost, gradPred_dJ_vc, grad_dJ_uw" ]
[ "0.7144969", "0.6606015", "0.6490216", "0.64216244", "0.6363278", "0.63588995", "0.63528866", "0.63436717", "0.63401145", "0.633253", "0.63303226", "0.631504", "0.6300099", "0.6297033", "0.6292471", "0.6279375", "0.62720025", "0.6260915", "0.62449986", "0.6224692", "0.6217328", "0.62080985", "0.61979455", "0.6187106", "0.61844385", "0.61720073", "0.6137179", "0.6133181", "0.612994", "0.6121622" ]
0.7510228
0
Given the normalized RNN outputs (postsoftmax) at each time step and a target labeling, compute the forward variables alpha_t(s) as defined in equation 5 in the paper
def compute_forward_variables(self, normalized_logits, target): target_length = target.shape[0] num_time_steps = normalized_logits.shape[0] ###################### ### YOUR CODE HERE ### ###################### blank_label = normalized_logits.shape[1] - 1 l = add_blanks(target, blank_label) target_length = l.shape[0] # init alpha = np.zeros((target_length, num_time_steps)) alpha[0, 0] = normalized_logits[0, blank_label] # where s = 0, t = 0 alpha[1, 0] = normalized_logits[0, target[0]] # where s = 1, t = 0 for i in xrange(2, num_time_steps): # for all s >= 2, t = 0 alpha[i, 0] = 0 # recursive case for t in xrange(1, num_time_steps): for s in xrange(2, target_length): a_bar = alpha[s, t-1] + alpha[s-1, t-1] if l[s] == blank_label or l[s-2] == l[s]: alpha[s, t] = normalized_logits[t, l[s]] * a_bar else: alpha[s, t] = normalized_logits[t, l[s]] * (a_bar + alpha[s-2, t-1]) return alpha
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_alpha(self, feats):\n \n init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n forward_var = autograd.Variable(init_alphas)\n\n for feat in feats:\n alphas_t = [] # The forward variables at this timestep\n for next_tag in range(self.tagset_size):\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n trans_score = self.transition[next_tag].view(1, -1)\n next_tag_var = forward_var + trans_score + emit_score\n alphas_t.append(log_sum_exp(next_tag_var))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transition[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha", "def _forward_alg(self, feats):\n\n init_alphas = torch.full((self.batch_size, 1, self.tagset_size), -10000.).to(self.device)\n # START_TAG has all of the score.\n init_alphas[:, 0, self.tag2idx[START_TAG]] = 0.\n forward_var = init_alphas\n\n # Iterate through the sentence\n for i in range(feats.shape[1]):\n feat = feats[:,i,:]\n\n emit_score = feat.view(self.batch_size, self.tagset_size, 1)\n next_tag_var = forward_var + self.transitions + emit_score\n forward_var = torch.logsumexp(next_tag_var,dim=-1).view(self.batch_size, 1, self.tagset_size)\n\n terminal_var = forward_var + self.transitions[self.tag2idx[STOP_TAG]]\n alpha = torch.logsumexp(terminal_var,dim=-1)\n return alpha", "def forwardVariableGeneration(self):\n self.alpha = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialistation\n self.alpha[0,0] = 1.0\n self.alpha[1:,0] = 0.0\n self.alpha[0,1:] = 0.0\n\n # main recursion\n for t in range(1, self.T+1):\n for j in range(1, self.noOfEmmittingStates+1):\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k, t-1] * self.transitionMatrix[k, j-1])\n self.alpha[j, t] = self.b[j-1, t-1] * partialSum\n # since must end in final state, last alpha for states with zero transition\n # prob to last state must be zero?\n for row in range(self.transitionMatrix.shape[0]):\n if self.transitionMatrix[row,-1] == 0.0:\n self.alpha[row,-1] = 0.0\n # fwd prob variable for final state at 'last' timestep gets bumped into the\n # final column to save having a needless column\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k,-1] * self.transitionMatrix[k,-1])\n self.alpha[-1,-1] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]", "def train(self, inputs, targets, eta, niterations):\n ndata = np.shape(inputs)[0] # number of data samples\n # adding the bias\n inputs = np.concatenate((inputs, -np.ones((ndata, 1))), axis=1)\n\n # numpy array to store the update weights\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n updatew3 = np.zeros((np.shape(self.weights3)))\n\n self.Errors = []\n for n in range(niterations):\n\n #############################################################################\n # TODO: implement the training phase of one iteration which consists of two phases:\n # the forward phase and the backward phase. you will implement the forward phase in \n # the self.forwardPass method and return the outputs to self.outputs. Then compute \n # the error (hints: similar to what we did in the lab). Next is to implement the \n # backward phase where you will compute the derivative of the layers and update \n # their weights. \n #############################################################################\n\n # forward phase \n self.outputs = self.forwardPass(inputs)\n\n # Error using the sum-of-squares error function\n error = 0.5 * np.sum((self.outputs - targets) ** 2)\n\n if np.mod(n, 100) == 0:\n self.Errors.append(error)\n print(\"Iteration: \", n, \" Error: \", error)\n\n # backward phase \n # Compute the derivative of the output layer. NOTE: you will need to compute the derivative of \n # the softmax function. Hints: equation 4.55 in the book. \n # deltao = (self.outputs - targets) * (self.outputs - self.outputs ** 2)\n deltao = (self.outputs - targets) * self.outputs * (1 - self.outputs)\n\n # compute the derivative of the second hidden layer\n\n deltah2 = self.beta * self.hidden2 * (1.0 - self.hidden2) * (np.dot(deltao, np.transpose(self.weights3)))\n\n\n # compute the derivative of the first hidden layer\n deltah1 = self.beta * self.hidden1 * (1.0 - self.hidden1) * (np.dot(deltah2[:, :-1], np.transpose(self.weights2)))\n\n # update the weights of the three layers: self.weights1, self.weights2 and self.weights3\n # here you can update the weights as we did in the week 4 lab (using gradient descent) \n # but you can also add the momentum\n\n updatew1 = eta * np.dot(np.transpose(inputs), deltah1[:, :-1]) + self.momentum * updatew1\n updatew2 = eta * np.dot(np.transpose(self.hidden1), deltah2[:, :-1]) + self.momentum * updatew2\n updatew3 = eta * np.dot(np.transpose(self.hidden2), deltao) + self.momentum * updatew3\n\n self.weights1 -= updatew1\n self.weights2 -= updatew2\n self.weights3 -= updatew3\n\n #############################################################################\n # END of YOUR CODE \n #############################################################################", "def forward(self, x):\n in_num=self.layers[1].in_features\n out_num=self.layers[-1].out_features\n x_new=x.detach().clone()\n for i in range(in_num):\n x_new[:,i]=(x[:,i] - self.beta[i]) * self.alpha[i] \n \n y=self.layers(x_new)\n for i in range(out_num):\n y[:,i]=(y[:,i] / self.alpha[in_num+i]) + self.beta[in_num+i]\n return y", "def forward(self, x):\n if self.train():\n # N(0,1)\n epsilon = Variable(torch.randn(x.size()))\n if x.is_cuda:\n epsilon = epsilon.cuda()\n \n # Clip alpha\n self.log_alpha.data = torch.clamp(self.log_alpha.data, max=self.max_alpha)\n alpha = self.log_alpha.exp()\n \n # N(1, alpha)\n epsilon = epsilon * alpha\n \n return x * epsilon\n else:\n return x", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat", "def forward(self, h_prev, x_t):\n from scipy.special import softmax\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y\n\n\n\n\n\n\n\n\n\n\n\n m, i = x_t.shape\n U = self.Wh[:i]\n W = self.Wh[i:]\n x = x_t\n T = len(x_t)\n # During forward propagation we save all hidden states in s because need them later.\n # We add one additional element for the initial hidden, which we set to 0\n s = np.zeros((T + 1, len(self.Wh[:self.Wh.shape[1]]) ))\n s[-1] = np.zeros(self.Wh.shape[1])\n # The outputs at each time step. Again, we save them for later.\n o = np.zeros((T, len(self.Wh[:self.Wh.shape[1]])))\n # For each time step...\n for t in np.arange(T):\n # Note that we are indxing U by x[t]. This is the same as multiplying U with a one-hot vector.\n #s[t] = np.tanh(U[:, x_t[]] + W.dot(s[t - 1]))\n o[t] = softmax(self.V.dot(s[t]))\n return s, o\n \n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n print(\"wi\", Wi.shape, \"wh\", Wh.shape)\n print(\"wh\", self.Wh.shape, \"wy\", self.Wy.shape)\n print(\"bh\", self.bh.shape, \"by\", self.by.shape)\n print(\"xtshape\", x_t.shape, \"hprev\", h_prev.shape)\n print(\"one\", self.Wh[:i].shape)\n one = self.Wy.dot(x_t)# np.dot(x_t, Wh) # x_t.dot(self.Wh[:i])\n two = h_prev @ Wh # h_prev.dot(self.Wh[i:])\n sum = one + two\n h_next = np.tanh(sum + self.bh)\n soft = h_next @ self.Wy\n y = self.softmax(soft) # + self.by)\n return h_next, y", "def neural_net_predict(self, inputs):\n for W, b in self.params:\n outputs = np.dot(inputs, W) + b\n inputs = np.tanh(outputs)\n return outputs # - logsumexp(outputs, axis=1, keepdims=True)", "def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden = self.transfer(np.dot(inputs, self.w_input))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots = (np.dot(a_hidden, self.w_output))\n a_output = self.transfer(np.asarray(dots))\n\n return (a_hidden, a_output)", "def forward(self, inputs):\n max_idx_1 = inputs['max_idx_1']\n max_idx_2 = inputs['max_idx_2']\n max_idx_3 = inputs['max_idx_3']\n max_idx_4 = inputs['max_idx_4']\n max_idx_5 = inputs['max_idx_5']\n x = inputs['out']\n\n out = self.relu(self.deconv6_1(x))\n out = self.max_unpool2d(out, max_idx_5)\n\n out = self.relu(self.deconv5_1(out))\n out = self.max_unpool2d(out, max_idx_4)\n\n out = self.relu(self.deconv4_1(out))\n out = self.max_unpool2d(out, max_idx_3)\n\n out = self.relu(self.deconv3_1(out))\n out = self.max_unpool2d(out, max_idx_2)\n\n out = self.relu(self.deconv2_1(out))\n out = self.max_unpool2d(out, max_idx_1)\n\n out = self.relu(self.deconv1_1(out))\n raw_alpha = self.deconv1(out)\n return raw_alpha", "def mlpfwd(self,inputs):\n\n self.hidden = np.dot(inputs,self.weights1);\n self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))\n self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)\n\n outputs = np.dot(self.hidden,self.weights2);\n\n # Different types of output neurons\n if self.outtype == 'linear':\n \treturn outputs\n elif self.outtype == 'logistic':\n return 1.0/(1.0+np.exp(-self.beta*outputs))\n elif self.outtype == 'softmax':\n normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))\n return np.transpose(np.transpose(np.exp(outputs))/normalisers)\n else:\n print \"error\"", "def __backpropagation(self, X, y, alpha):\n # perform forward pass\n y_pred = self.__feed_forward(X)\n # loop over the layers backward\n for i in reversed(range(len(self.__layers))):\n layer = self.__layers[i]\n\n # output layer\n if layer == self.__layers[-1]:\n layer.error = 2 * (y_pred - y)\n # other layers\n else:\n next_layer = self.__layers[i + 1]\n layer.error = next_layer.error * next_layer.d_act_f(next_layer.p) @ next_layer.weights \n\n # update the weights\n for i in range(len(self.__layers)):\n layer = self.__layers[i]\n input_to_use = np.atleast_2d(X if i == 0 else self.__layers[i - 1].z)\n grad = (layer.error * layer.d_act_f(layer.p) * input_to_use.T)\n if layer.weights.shape[1] != grad.shape[1]: grad = grad.T\n layer.weights -= alpha * grad", "def forwardPass(self, inputs):\n #############################################################################\n # TODO: Implement the forward phase of the model. It has two hidden layers \n # and the output layer. The activation function of the two hidden layers is \n # sigmoid function. The output layer activation function is the softmax function\n # because we are working with multi-class classification. \n #############################################################################\n\n # layer 1 \n # compute the forward pass on the first hidden layer with the sigmoid function\n\n self.hidden1 = np.dot(inputs, self.weights1) #(9000, 785) (785, 5)\n # self.hidden1 = self.sigmoid_fun(self.hidden1) #(9000, 5)\n self.hidden1 = 1.0 / (1.0 + np.exp(-self.beta*self.hidden1))\n self.hidden1 = np.concatenate((self.hidden1, -np.ones((np.shape(inputs)[0], 1))), axis=1) # (9000,6)\n\n\n # layer 2\n # compute the forward pass on the second hidden layer with the sigmoid function\n self.hidden2 = np.dot(self.hidden1, self.weights2) # (9000,6) (6, 5)\n self.hidden2 = self.sigmoid_fun(self.hidden2) # (9000,5)\n self.hidden2 = np.concatenate((self.hidden2, -np.ones((np.shape(self.hidden1)[0], 1))), axis=1) # (9000,6)\n\n # output layer\n # compute the forward pass on the output layer with softmax function\n outputs = np.dot(self.hidden2, self.weights3) # (9000,6) (6, 10)\n normalisers = np.sum(np.exp(outputs), axis=1)*np.ones((1, np.shape(outputs)[0]))\n outputs = np.transpose(np.transpose(np.exp(outputs)) / normalisers)\n # outputs = self.softmax_fun(outputs) # (9000,10)\n # print(outputs)\n #############################################################################\n # END of YOUR CODE \n #############################################################################\n return outputs", "def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden1 = self.transfer(np.dot(inputs, self.w_input))\n \n dots1 = (np.dot(a_hidden1, self.w_middle))\n a_hidden2 = self.transfer(np.asarray(dots1))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots2 = (np.dot(a_hidden2, self.w_output))\n a_output = self.transfer(np.asarray(dots2))\n \n return (a_hidden1, a_hidden2, a_output)", "def forward(self, inputs): \n self.z1 = self.af(np.dot(inputs, self.hidden1_weights)) \n self.z2 = self.af(np.dot(self.z1, self.hidden2_weights))\n self.z3 = sigmoid(np.dot(self.z2, self.output3_weights)) # because the output interval must be [0, 1]\n return self.z3 # so the activation function of last layer must be sigmoid", "def forward(self, z_t_1, h_x):\n h_combined = 0.5*(self.z_to_h(z_t_1) + h_x)# combine the rnn hidden state with a transformed version of z_t_1\n mu = self.h_to_mu(h_combined)\n logvar = self.h_to_logvar(h_combined)\n std = F.softplus(logvar) \n epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, mu, logvar", "def forwardPropagation(self, inputs, label):\n node_hidden = np.dot(inputs, self.input_W)\n node_hidden = np.add(node_hidden, self.input_B)\n node_hidden = np.maximum(0, node_hidden)\n node_output = np.dot(node_hidden, self.hidden_W)\n node_output = np.add(node_output, self.hidden_B)\n #print(node_output)\n exp_node_output = np.exp(node_output)\n node_output = exp_node_output / np.sum(exp_node_output, axis=1, keepdims=True)\n #print(node_output)\n #node_output = self.softmax(node_output)\n loss = np.sum(-np.log(node_output[range(inputs.shape[0]),label]))/(inputs.shape[0])+0.5 * self.regularizer*np.sum(self.input_W *self.input_W)+0.5 * self.regularizer*np.sum(self.hidden_W *self.hidden_W)\n \"\"\"Loss= Input data loss + Loss correction by penalizing the loss, here we use 0.2 as an experimental value\"\"\"\n #loss = np.sum(-np.log(node_output[range(inputs.shape[0]), label])) / (inputs.shape[0]) + 0.2 * self.regularizer * np.sum(self.input_W ^ 2) + 0.2 * self.regularizer * np.sum(self.hidden_W ^ 2)\n return loss, node_hidden, node_output", "def forward(self, z_t_1, h_x):\n h_combined = 0.5*(self.z_to_h(z_t_1) + h_x)# combine the rnn hidden state with a transformed version of z_t_1\n \n z_mean = self.h_to_z_mean(h_combined)\n \n z_var = self.h_to_z_var(h_combined)\n \n epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n z_t = z_mean + epsilon * torch.exp(0.5 * z_var) # [batch_sz x z_sz]\n \n \n# z_t = F.gumbel_softmax(z_category)\n# z_t = z_category\n# \n# phi_z = torch.mm(z_t, torch.t(phi_table))\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_mean, z_var", "def forward(self, pred, gt, mask=None):\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n if mask is not None:\n pos_inds = pos_inds * mask\n neg_inds = neg_inds * mask\n neg_weights = torch.pow(1 - gt, self.beta)\n loss = 0\n pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss", "def compute_targets(rollout, action_space, last_r=0.0, gamma=0.9, lambda_=1.0):\n\n rollout = compute_advantages(rollout, last_r, gamma=gamma, lambda_=lambda_)\n rollout[\"adv_targets\"] = np.zeros((rollout.count, action_space.n))\n rollout[\"adv_targets\"][np.arange(rollout.count), rollout[\"actions\"]] = \\\n rollout[\"advantages\"]\n rollout[\"value_targets\"] = rollout[\"rewards\"].copy()\n rollout[\"value_targets\"][:-1] += gamma * rollout[\"vf_preds\"][1:]\n return rollout", "def forward(self):\n self.iteration_number += 1\n x = self.x\n self.x = self.alpha * self.x + self.betta\n t = x - self.x\n\n return (t * t).sum()", "def forward(self, ps, T):\n exp_values = self.expected_value_calc(ps)\n probs = F.softmax(exp_values/T, dim=-1)\n return probs", "def forward(self, states, actions_previous=None): \n x = states.view(states.shape[0], self.frames_n * self.state_size)\n \n # ACTOR\n x_actor_alphas = F.relu(self.actor_alphas_layer_1(x))\n x_actor_alphas = F.relu(self.actor_alphas_layer_2(x_actor_alphas))\n x_actor_alphas = F.softplus(self.actor_alphas_layer_3(x_actor_alphas)) + 1. # To get to the interval [1; Inf).\n\n x_actor_betas = F.relu(self.actor_betas_layer_1(x))\n x_actor_betas = F.relu(self.actor_betas_layer_2(x_actor_betas))\n x_actor_betas = F.softplus(self.actor_betas_layer_3(x_actor_betas)) + 1. # To get to the interval [1; Inf).\n \n distribution = torch.distributions.beta.Beta(concentration1=x_actor_alphas, concentration0=x_actor_betas)\n raw_actions = actions_previous * 0.5 + 0.5 if actions_previous is not None else distribution.sample() # To return to the Beta interval, [0, 1], for now.\n densities = torch.exp(distribution.log_prob(raw_actions))\n actions = (raw_actions - 0.5) * 2 # Finally back to the action interval, [-1, -1].\n entropies = distribution.entropy()\n \n # CRITIC\n x_critic = F.relu(self.critic_layer_1(x))\n x_critic = F.relu(self.critic_layer_2(x_critic))\n values = self.critic_layer_3(x_critic)\n \n return {\n 'actions': actions,\n 'densities': densities,\n 'entropies': entropies, \n 'values': values\n }", "def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def calc_scale(alpha, targets, preds, gamma):\n return alpha * tf.pow(tf.abs(targets - tf.nn.sigmoid(preds)), gamma)", "def forward(self, x, y, x_mask):\n Wy = self.linear(y) if self.linear is not None else y\n xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)\n xWy.data.masked_fill_(x_mask.data, -float('inf'))\n if self.training:\n # In training we output log-softmax for NLL\n alpha = F.log_softmax(xWy)\n else:\n # ...Otherwise 0-1 probabilities\n alpha = F.softmax(xWy)\n return alpha", "def get_batch_predictions_MFE(rnn, X, target):\n\n out = rnn.forward(X, mean_field_inference=True)\n arr_preds = nn.functional.softmax(out, dim=-1).data.cpu().numpy()\n arr_target = target.detach().cpu().numpy()\n\n return arr_preds, arr_target", "def forward(self, inputs):\n x_wrd = self.lookup(inputs)\n\n # bilinear attention\n x_avg = x_wrd.mean(dim=1)\n x = x_wrd.matmul(self.M)\n x = x.matmul(x_avg.unsqueeze(1).transpose(1, 2))\n if self.b is not None:\n x += self.b\n\n x = F.tanh(x) \n a = F.softmax(x, dim=1)\n\n z = a.transpose(1, 2).matmul(x_wrd)\n z = z.squeeze()\n if z.dim() == 1:\n return z.unsqueeze(0)\n return z", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out" ]
[ "0.6556268", "0.6452523", "0.6378413", "0.62199193", "0.6107131", "0.6078645", "0.60783464", "0.6060503", "0.6053484", "0.6044611", "0.6033897", "0.60004437", "0.5989006", "0.59869456", "0.5968317", "0.5951192", "0.5950084", "0.5944599", "0.5943287", "0.5917645", "0.5909624", "0.5908689", "0.5906831", "0.5903671", "0.5894711", "0.5890797", "0.5885738", "0.5876283", "0.58737254", "0.58690363" ]
0.73559594
0