query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Get an element from a queue. Test that it is 1. | def test_get_element(self):
data = (1, 2, 3, 4)
queue = Queue_(data)
self.assertEqual(queue.get(), data[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to get item from queue')\n\t\t\titem = self.queue.get()\n\t\t\tself.logger.debug('Recevie item from queue %s'%(item))\n\t\t\treturn True, item\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Error method get, error: %s'%(e),exc_info=True)\n\t\t\treturn False, None",
"def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except Queue.Empty:\n return None\n return item",
"def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except queue.Empty:\n return None\n return item",
"def peek(self) -> int: \n if not self.empty(): \n return self.queue[0] \n return None",
"def __getitem__(self, uri):\n\t\t# The queue is empty, so return None\n\t\tif self.qsize() == 0:\n\t\t\treturn\n\n\t\t# Iterate through the queue grabbing a worker, comparing its URI with the one provided\n\t\t# and putting it back if they do not match. Note that this implementation assumes that\n\t\t# all workers are equal and therefore their order does not matter\n\t\tfound = None\n\t\tfor i in range(self.qsize()):\n\t\t\tp = self.get_nowait()\n\t\t\tif p.uri == uri:\n\t\t\t\tfound = p\n\t\t\t\tself._uris.remove(uri)\n\t\t\t\tbreak\n\t\t\tself.put_nowait(p)\n\t\treturn found",
"def ztest_get_item(self):\n \n queue = NMSQueue()\n \n result_set = queue.get_items_with_priority(1,1,0,1)\n \n for item in result_set:\n print(\"\\nItem = %s\\n\" % (item) )\n newitem = queue.get_item(item.uuid)\n print(\"\\nRetrieve the same from queue Item = %s\\n\" % (newitem) )",
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def read(self,getindex):\n if getindex<0:\n #print(\"Indicies are non-negative\")\n return None\n try:\n bufinx = len(self.buffer)+(getindex - self.index.value)\n if bufinx<0:\n #print(\"This item has been deleted, try increasing the queue size\")\n return None\n return self.buffer[bufinx]\n except IndexError:\n #print(\"This item doesn't exist yet\")\n return None",
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block=True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.isAlive():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def test_dequeue_returns_value():\n queue = Queue()\n queue.enqueue('a')\n assert queue.dequeue() is 'a'",
"def front(queue):\n if empty_queue(queue):\n raise IndexError(\"Queue is empty!\")\n else:\n return queue.front.value",
"def peek(self):\r\n return self.queue[0]",
"def peek(self):\r\n return self.queue[0]",
"def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None",
"def peek(self):\n if self.isEmpty(): \n raise Exception(\"Queue underflow\")\n return self._q[self._first]",
"def poll(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n \n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d",
"def pop(self) -> int:\n return self.queue.get()",
"def get_message_from_queue(self):\n message = None, None\n\n try:\n message = self.queue.get(block=True, timeout=3)\n except Empty:\n self.fail(msg='Queue get() failed empty')\n\n return message",
"def get(self):\n if self.result_data.get(self.get_idx + 1) is not None:\n self.get_idx += 1\n res = self.result_data[self.get_idx]\n del self.result_data[self.get_idx]\n return res\n while True:\n res = self.result_queue.get(block=False)\n idx = res.id\n if idx == self.get_idx + 1:\n self.get_idx += 1\n return res\n self.result_data[idx] = res",
"def peek(ind: int = 0, priority: int = 0) -> Any:\n\tglobal queue\n\tif queue:\n\t\ttry:\n\t\t\treturn queue[ind][1]\n\t\texcept IndexError:\n\t\t\treturn None\n\telse:\n\t\treturn None",
"def getitem(self, index):\n #FIXME: A better way to get item without removing it.\n priority,size,trace=self.queues[index].get()\n self.enqueue(index,trace,priority)\n return trace",
"def get(self, q_idx, data_id):\n\n while True:\n # Make sure no other get threads are pulling from the queue\n self.nsyncs[q_idx].done_lock.acquire()\n\n # Check every item currently in the queue\n done_queue = self.nsyncs[q_idx].done_queue\n size = done_queue.qsize() if self.os_supported else 1 # Workaround for non-UNIX systems\n for i in range(size):\n tup = done_queue.get()\n\n # Check if item is our item (ie. if the ids match)\n if tup[0] == data_id:\n self.nsyncs[q_idx].done_lock.release()\n return tup[1]\n\n # Nope, not our item, put it back on the done_queue \n # for someone else to grab\n done_queue.put(tup)\n\n # Didn't find our item, try again..\n self.nsyncs[q_idx].done_lock.release()",
"def pop(self,n):\r\n\t\treturn self.queue.pop(0)[1]",
"def first(self):\n if self.head is None:\n raise Exception(\"nothing in queue\")\n return self.head.value",
"def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur",
"def first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._head._element",
"def peek(self):\n return self.queue[0]",
"def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()",
"def peek(self):\n return self.the_queue[0]",
"def peek(self):\n if not self.empty():\n return self.queue[-1]\n return None"
]
| [
"0.7556628",
"0.74670905",
"0.7456273",
"0.7270813",
"0.71341264",
"0.71085036",
"0.7094008",
"0.7046715",
"0.7011527",
"0.6876995",
"0.6863746",
"0.68071526",
"0.68071526",
"0.67984605",
"0.6744334",
"0.67340684",
"0.6734029",
"0.6704264",
"0.668802",
"0.66751915",
"0.6670262",
"0.66692525",
"0.66516656",
"0.66481966",
"0.66450465",
"0.66384673",
"0.6613847",
"0.66134727",
"0.661217",
"0.6557933"
]
| 0.80578023 | 0 |
Create a Queue from an iterable object. Check that the size of queue_ equals to the size of the given tuple. | def test_new_queue_from_tuple(self):
data = (1, 2, 3, 4)
queue = Queue_(data)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), len(data))
for value in data:
test_value = queue.get()
self.assertEqual(test_value, value)
self.assertTrue(queue.empty())
self.assertEqual(queue.size(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_new_queue_from_list(self):\n data = [1, 3, 5, 7, 2, 4]\n queue = Queue_(data)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), len(data))\n self.assertEqual(queue.top(), data[0])",
"def test_new_queue_from_generator(self):\n queue = Queue_(range(10))\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), 10)\n self.assertEqual(queue.top(), 0)",
"def test_can_instantiate_empty_queue(empty_queue):\n assert isinstance(empty_queue, Queue)",
"def __init__(self, size):\n self.size = size\n self.queue = []",
"def __init__(self, iterable=()):\n self.head = None\n self._counter = 0\n if isinstance(iterable, (str, tuple, list)):\n for item in iterable:\n self.push(item)",
"def __init__(self, size):\n self.queue = collections.deque(maxlen=size)",
"def create_from_tuple(cls, tube, the_tuple):\n if the_tuple is None:\n return\n\n if not the_tuple.rowcount:\n raise Deque.ZeroTupleException(\"Error creating task\")\n\n row = the_tuple[0]\n\n return cls(\n tube,\n task_id=row[0],\n state=row[1],\n next_event=row[2],\n msg_type=row[3],\n obj_type=row[4],\n obj_id=row[5],\n channel=row[6],\n to_send_at=row[7],\n valid_until=row[8],\n created_at=row[9],\n data=row[10]\n )",
"def __init__(self, size):\n self.queue = collections.deque(maxlen = size)",
"def queued(values, qsize):\n values = [_normalize(v) for v in values]\n if qsize < 1:\n raise ValueError(\"qsize must be 1 or larger\")\n q = []\n it = iter(values)\n try:\n for i in range(qsize):\n q.append(next(it))\n for i in cycle(range(qsize)):\n yield q[i]\n q[i] = next(it)\n except StopIteration:\n pass",
"def __init__(self, *values):\n if (len(values) == 1) and (type(values[0]) in SequenceTypes):\n values = values[0]\n self.values = tuple(values)\n self.fast_validate = (5, self.values)",
"def enqueue(tup):",
"def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3",
"def validate(self, queue):\n self.assertTrue(queue.is_empty())\n queue.enqueue(10)\n self.assertFalse(queue.is_empty())\n queue.enqueue(20)\n self.assertEqual(10, queue.dequeue())\n self.assertFalse(queue.is_empty())\n queue.enqueue(30)\n queue.enqueue(40)\n self.assertEqual(20, queue.dequeue())\n self.assertEqual(30, queue.dequeue())\n self.assertEqual(40, queue.dequeue())",
"def __init__(self, iterable=None):\n self.list = LinkedList()\n\n if iterable:\n for item in iterable:\n self.enqueue(item)",
"def from_tuple(cls, t):\n if (isinstance(t, tuple) and len(t) == 3):\n l, n, r = t\n return cls(n, l, r)\n else:\n raise ValueError(\"Expected a 3-tuple!\")",
"def __init__(self, size):\n self.size = size\n self.queue = []\n self.sum = 0",
"def enqueue_with_tuples(priority_queue, d):\n for ch, count in d.items():\n tree = Tree(None, ch, None)\n priority_queue.enqueue((tree, count))",
"def __init__ (self, size: int):\n self.size = size\n self.queue = []\n self.sum = 0",
"def test_instantiate_of_iterable():\n iterable_stack = Stack([1, 2, 3, 4, 5])\n assert iterable_stack.top.val == 5\n assert iterable_stack._size == 5",
"def __init__(self, iterable=None):\n self.heap = []\n if iterable is not None:\n for item in iterable:\n self.push(item)",
"def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue",
"def __init__(self, iterable=(), maxlen=None): # known case of _collections.deque.__init__\n pass",
"def instantiate_queue(self):\n serialized_queue = self.cache.get('queue')\n queue = ast.literal_eval(serialized_queue.decode('utf-8'))\n return queue",
"def __init__(self, initial=[]):\n self.the_queue = copy.copy(initial)",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def small_queue():\n que = Queue()\n que.enqueue(1)\n que.enqueue(2)\n que.enqueue(3)\n que.enqueue(4)\n que.enqueue(5)\n return que",
"def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n empty_queue.enqueue(100)\n assert len(empty_queue) == 1",
"def _check_tuple(self) -> PossibleResult[T]:\n if isinstance(self.constructor_origin, type) and issubclass(\n self.constructor_origin, tuple\n ):\n if not isinstance(self.obj, Sequence):\n raise DeserializeError(\n tuple, self.obj, self.new_depth, self.key\n )\n if not self.constructor_args:\n return self.constructor_origin(self.obj) # type: ignore\n if (\n len(self.constructor_args) == 2\n and self.constructor_args[1] == ...\n ):\n return self.constructor_origin(\n Deserialize(\n obj=value,\n constructor=self.constructor_args[0],\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n ).run()\n for value in self.obj\n ) # type: ignore\n if len(self.constructor_args) != len(self.obj):\n raise DeserializeError(\n tuple,\n self.obj,\n self.new_depth,\n self.key,\n message_prefix=\"Tuple incorrect length. \",\n )\n return self.constructor_origin(\n Deserialize(\n obj=self.obj[i],\n constructor=arg,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n ).run()\n for i, arg in enumerate(self.constructor_args)\n ) # type: ignore\n return NO_RESULT",
"def __init__(self): # 1+1+1+1+1 => O(1)\n self._capacity = 7 #O(1)\n self._queue = [None] * self._capacity #O(1)\n self._size = 0 #O(1)\n self._start = 0 #O(1)\n self._end = 0 #O(1)",
"def __init__(self, values=[]):\n self.priority_queue = {}\n if isinstance(values, list):\n try:\n for value, priority in values:\n self.insert(value, priority)\n except ValueError:\n raise TypeError(\"You need to tuplize your priorities\")\n else:\n raise TypeError(\"Put your items in a list\")"
]
| [
"0.62171847",
"0.58740187",
"0.5855165",
"0.5600438",
"0.5446116",
"0.54402775",
"0.543408",
"0.5431774",
"0.54129475",
"0.54115546",
"0.5404512",
"0.5395198",
"0.53889567",
"0.5335712",
"0.5332964",
"0.53098434",
"0.5306954",
"0.52912533",
"0.5286187",
"0.5239695",
"0.5224592",
"0.52075523",
"0.52071553",
"0.51963186",
"0.51815826",
"0.51774836",
"0.5166636",
"0.51640284",
"0.51566267",
"0.5156438"
]
| 0.75716573 | 0 |
Create a Queue from a list. Check that the size of queue equals to the size of the queue. Check that the top element of queue equals to the latest element of the list. | def test_new_queue_from_list(self):
data = [1, 3, 5, 7, 2, 4]
queue = Queue_(data)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), len(data))
self.assertEqual(queue.top(), data[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_initialization_with_empty_list_last_node_check():\n queue = Queue([])\n assert queue._queue.last_node is None",
"def queue_to_stack(queue):\n stack = Stack()\n check_list = []\n\n while len(queue) != 0:\n check_list.append(queue.dequeue())\n\n check_list.reverse()\n\n while check_list != []:\n stack.push(check_list[0])\n check_list.remove(check_list[0])",
"def test_new_queue_from_tuple(self):\n data = (1, 2, 3, 4)\n queue = Queue_(data)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), len(data))\n for value in data:\n test_value = queue.get()\n self.assertEqual(test_value, value)\n self.assertTrue(queue.empty())\n self.assertEqual(queue.size(), 0)",
"def validate(self, queue):\n self.assertTrue(queue.is_empty())\n queue.enqueue(10)\n self.assertFalse(queue.is_empty())\n queue.enqueue(20)\n self.assertEqual(10, queue.dequeue())\n self.assertFalse(queue.is_empty())\n queue.enqueue(30)\n queue.enqueue(40)\n self.assertEqual(20, queue.dequeue())\n self.assertEqual(30, queue.dequeue())\n self.assertEqual(40, queue.dequeue())",
"def f1(lst):\n\n # At each iteration, we remove the values from the \"current\" queue\n # and sort them into the correct queue based on their jth bit. We \n # arbitrarily set the zeros queue as first \"current\".\n \n q_0 = DequeQueue(lst) \n q_1 = DequeQueue()\n cur_queue = q_0 \n \n j = 0 \n output = 0\n \n while 1<<j <= len(lst):\n \n cur_load = len(cur_queue) # Size of queue changes so store it! \n for _ in range(cur_load):\n \n val = cur_queue.remove() \n if get_jth_bit(val,j):\n q_1.add(val)\n else:\n q_0.add(val)\n \n # Now, only vals with least significant j-1 bits identical to \n # output are expected to remain (but one is missing). For \n # example, if len(lst) is 17, the bit 0 has been found to be \n # \"1\", and bit 1 \"0\", then j is 2, and only vals <= 17 and \n # ending in bits \"01\" are expected to be left. These start at 1\n # and increase by 2^j. (1,5,9,13,17).\n \n # This number of vals is either even or odd (compare with when \n # the first two bits are \"11\", yielding 3,7,11,15) and the \n # vals' jth bit alternates between 0 and 1. Thus the expected\n # number of vals with a \"0\" jth bit is either equal to, or one \n # greater than, the expected number of values with a \"1\".\n \n # When the expected number of vals are equal, and the missing \n # number has a \"0\" jth bit, then q_1 should have more vals than\n # q_0. If the missing number has a \"1\", then q_0 has more vals.\n \n # When the expected number of vals in q_0 is expected to be one \n # greater than the number of vals in q_1, then if the missing \n # number has a \"0\" jth bit, then q_0 and q_1 should have the \n # same number of vals. If the missing number has a \"1\", then \n # q_0 has more vals.\n \n # It follows, then, that the missing number has a \"1\" jth bit \n # iff q_0 has more vals than q_1.\n \n if len(q_0) > len(q_1): \n output |= (1<<j)\n cur_queue = q_1\n drain(q_0)\n else:\n cur_queue = q_0\n drain(q_1)\n \n j += 1\n \n return output",
"def test_the_queue_dequeue_multi_values_phase_one(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n the_queue.dequeue()\n assert the_queue._new_dll.tail.data == 3",
"def test_the_queue_enqueue_multi_values(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n assert (the_queue._new_dll.head.data,\n the_queue._new_dll.tail.data,\n the_queue._new_dll.head.prior_node.data,\n the_queue._new_dll.tail.next_node.data) == (5, 2, 4, 3)",
"def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3",
"def test_values_dequeue_in_correct_order():\n queue = Queue('ab')\n assert queue.dequeue() == 'a'",
"def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2",
"def test_initialization_with_empty_list_first_node_check():\n queue = Queue([])\n assert queue._queue.first_node is None",
"def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue",
"def test_initialization_with_list_first_node_val():\n queue = Queue('a')\n assert queue._queue.first_node == 'a'",
"def construct_max_heap(self, lst):\n self.heap_list = lst\n #start compare node\n node = (len(self.heap_list)-2)/2\n while node >= 0:\n self.sift_down(node, len(self.heap_list)-1)\n node -= 1",
"def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)",
"def test_new_queue_from_generator(self):\n queue = Queue_(range(10))\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), 10)\n self.assertEqual(queue.top(), 0)",
"def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n empty_queue.enqueue(100)\n assert len(empty_queue) == 1",
"def test_peek_top():\n\n q = PriorityQueue()\n\n # input list (obj, priority) should be reversed\n # in the priority_queue\n input_list = [((1), 9), ((2), 8), ((3), 7),\n ((4), 6), ((5), 5), ((6), 4),\n ((7), 3), ((8), 2), ((9), 1)]\n\n # insert the items in the queue\n for obj, p in input_list:\n q.push(obj, p)\n\n # save the state of the queue\n state = [i for i in q._queue]\n\n # peek a few times\n [q.top() for i in range(100)]\n\n eq_(state, q._queue)",
"def test_push_pop():\n\n q = PriorityQueue()\n\n # input list (obj, priority) should be reversed\n # in the priority_queue\n input_list = [((1), 9), ((2), 8), ((3), 7),\n ((4), 6), ((5), 5), ((6), 4),\n ((7), 3), ((8), 2), ((9), 1)]\n\n # insert the items in the queue\n for obj, p in input_list:\n q.push(obj, p)\n\n # pop the items into another list\n output = []\n while q._queue:\n output.append(q.pop())\n\n # make sure it lines up with expected result\n eq_(output, range(1, 10)[::-1])",
"def make_queue(V):\n H = []\n for i in V:\n H.append(i)\n return(H)",
"def test_the_queue_dequeue_multi_values_phase_two(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n the_queue.dequeue()\n assert (the_queue.dequeue(),\n the_queue._new_dll.tail.data) == (3, 4)",
"def test_dequeue(self):\n queue = Queue()\n self.assertEqual(queue.dequeue(), None)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.dequeue(), 1)\n self.assertEqual(queue.size(), 2)",
"def ztest_sql_queue(self):\n \n sql_queue = SQLQueue()\n \n #insertion\n for i in range(10):\n item = NMSQueueItem(5,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )",
"def test_can_instantiate_empty_queue(empty_queue):\n assert isinstance(empty_queue, Queue)",
"def test_values_dequeue_two_values_in_correct_order():\n queue = Queue('ab')\n assert queue.dequeue() == 'a'\n assert queue.dequeue() == 'b'",
"def dequeue(self): ##################### <-\n \"\"\"Llst -> lst, dequeue, lst -> Llst\"\"\"\n top = self.top\n def linkedLstToList(Llst):\n \"\"\"A list to a single node linked list\"\"\"\n if not Llst: return []\n return [Llst.value] + linkedLstToList(Llst.next)\n def lstToLinkedList(lst):\n \"\"\"A list to a single node linked list\"\"\"\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList\n self.top = lstToLinkedList(linkedLstToList(top)[:-1])\n return linkedLstToList(top)[-1]",
"def small_queue():\n que = Queue()\n que.enqueue(1)\n que.enqueue(2)\n que.enqueue(3)\n que.enqueue(4)\n que.enqueue(5)\n return que",
"def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def test_initialization_with_two_items_last_node_val():\n queue = Queue('ab')\n assert queue._queue.last_node == 'b'"
]
| [
"0.656142",
"0.6330778",
"0.6320571",
"0.6206751",
"0.61039263",
"0.60570526",
"0.604458",
"0.59754544",
"0.5956983",
"0.59466815",
"0.59238017",
"0.58860093",
"0.58682257",
"0.5859621",
"0.5830505",
"0.57924914",
"0.57899845",
"0.5778571",
"0.5774191",
"0.5771166",
"0.5761098",
"0.57587373",
"0.574458",
"0.5741567",
"0.57377356",
"0.5717084",
"0.57085097",
"0.57046664",
"0.56353456",
"0.5602025"
]
| 0.83507144 | 0 |
Create a Queue_ from a generator. Test that its size equals to the number provided in the generator. | def test_new_queue_from_generator(self):
queue = Queue_(range(10))
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), 10)
self.assertEqual(queue.top(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def large_queue():\n que = Queue()\n for num in range(1, 11):\n que.enqueue(num)\n return que",
"def build_generator(self, i):\n if self.rng.random() < self.freeze_probability:\n return SequenceIterator(self.rng.choice(SIZES[:SIZES.index(self.max_size)]))\n else:\n return SequenceIterator(self.max_size)",
"def test_new_queue_from_tuple(self):\n data = (1, 2, 3, 4)\n queue = Queue_(data)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), len(data))\n for value in data:\n test_value = queue.get()\n self.assertEqual(test_value, value)\n self.assertTrue(queue.empty())\n self.assertEqual(queue.size(), 0)",
"def test_size_increments_with_enqueue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1",
"def test_new_queue_from_list(self):\n data = [1, 3, 5, 7, 2, 4]\n queue = Queue_(data)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), len(data))\n self.assertEqual(queue.top(), data[0])",
"def __init__(self, size):\n self.size = size\n self.queue = []",
"def __init__(self, size):\n self.queue = collections.deque(maxlen=size)",
"def __init__(self, size):\n self.queue = collections.deque(maxlen = size)",
"def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)",
"def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3",
"def queued(values, qsize):\n values = [_normalize(v) for v in values]\n if qsize < 1:\n raise ValueError(\"qsize must be 1 or larger\")\n q = []\n it = iter(values)\n try:\n for i in range(qsize):\n q.append(next(it))\n for i in cycle(range(qsize)):\n yield q[i]\n q[i] = next(it)\n except StopIteration:\n pass",
"def test_dequeue_2(self):\r\n from numpy import random\r\n queue = Queue(shape=(100,10), dtype='int16')\r\n for i in range(5): queue.enqueue( random.randint(0,4096,(16,10)) )\r\n for i in range(1000):\r\n self.assertEqual(queue.dequeue(16).shape,(16,10))\r\n queue.enqueue(random.randint(0,4096,(16,10)) )",
"def test_size_of_new_queue():\n queue = Queue()\n assert queue.size() == 0",
"def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue",
"def test_size_decrements_with_dequeue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1\n queue.dequeue()\n assert queue.size() == 0",
"def test_valueInQueue(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper.pop()\n self.assertEqual(expected, actual)",
"def small_queue():\n que = Queue()\n que.enqueue(1)\n que.enqueue(2)\n que.enqueue(3)\n que.enqueue(4)\n que.enqueue(5)\n return que",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def test_EmptyQueue(self):\n genFn = Mock(return_value=None)\n \n wrapper = KaoGenerator(genFn)\n self.assertEqual(len(wrapper._queue), 0)",
"def buffered_gen_threaded(source_gen, buffer_size=2):\n if buffer_size < 2:\n raise RuntimeError(\"Minimal buffer size is 2!\")\n\n buffer = Queue.Queue(maxsize=buffer_size - 1)\n # the effective buffer size is one less, because the generation process\n # will generate one extra element and block until there is room in the buffer.\n\n def _buffered_generation_thread(source_gen, buffer):\n for data in source_gen:\n buffer.put(data, block=True)\n buffer.put(None) # sentinel: signal the end of the iterator\n\n thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))\n thread.daemon = True\n thread.start()\n\n for data in iter(buffer.get, None):\n yield data",
"def __init__(self, size: int):\n self.size = size\n self.queue = [0] * self.size # queue w/ len of size\n self.head = self.window_sum = 0\n self.count = 0",
"def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))",
"def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)",
"def queue_iter(queue: Queue) -> Generator[T, None, None]:\n while True:\n val = queue.get()\n yield val",
"def __init__(self, size):\n self.q = deque( maxlen=size)",
"def test_queueEmpty(self):\n items = [1,2,3,4,5]\n queueValues = ['a', 'b', 'c']\n \n generator = Mock()\n generator.send = Mock(side_effect=items)\n genFn = Mock(return_value=generator)\n \n wrapper = KaoGenerator(genFn)\n for v in queueValues:\n wrapper.queue(v)\n \n for i, yieldedValue in enumerate(wrapper):\n self.assertEqual(items[i], yieldedValue)\n if i >= len(queueValues):\n generator.send.assert_called_with(None)\n else:\n generator.send.assert_called_with(queueValues[i])",
"def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n empty_queue.enqueue(100)\n assert len(empty_queue) == 1",
"def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]",
"def get_queue(queue_limits):\n\n queues, limits = queue_limits.items()\n queues.pop('')\n\n while(True): \n \n queued_jobs = qstat_plain()\n jobs = {queue : [j for j in queued_jobs if j.queue == queue] for queue in queues} \n jobs[''] = [j for j in queued_jobs if j.queue not in queues]\n\n for queue in queues:\n if len(jobs[queue]) < queue_limits[queue]:\n yield queue\n else:\n time.sleep(30)",
"def test_dequeue(self):\r\n from numpy import random\r\n queue = Queue(shape=(11, 2, 3, 4), dtype='int16')\r\n for i in range(100):\r\n arr_in = random.randint(4096,size = (2,2,3,4))\r\n queue.enqueue(arr_in)\r\n arr_out = queue.dequeue(2)\r\n self.assertEqual((arr_in==arr_out).all(), True)\r\n self.assertEqual(queue.length,0)\r\n self.assertEqual(queue.global_rear,(i+1)*2)\r\n self.assertEqual(queue.rear,2*(i+1)-int(2*(i+1)/11)*11)\r\n\r\n from numpy import random\r\n queue = Queue(shape=(32, 2, 3, 4), dtype='int16')\r\n for i in range(100):\r\n arr_in = random.randint(4096,size = (1,2,3,4))\r\n queue.enqueue(arr_in)\r\n self.assertEqual(queue.length,1)\r\n arr_out = queue.dequeue(1)\r\n self.assertEqual((arr_in==arr_out).all(), True)\r\n self.assertEqual(queue.length,0)\r\n self.assertEqual(queue.global_rear,(i+1)*1)\r\n self.assertEqual(queue.rear,1*(i+1)-int(1*(i+1)/queue.shape[0])*queue.shape[0])"
]
| [
"0.6048734",
"0.60149246",
"0.5839498",
"0.5820395",
"0.57929295",
"0.57875896",
"0.5727647",
"0.57267374",
"0.57099265",
"0.5691689",
"0.5681378",
"0.5661334",
"0.5646394",
"0.5631191",
"0.561792",
"0.5603292",
"0.55817884",
"0.5534262",
"0.55297804",
"0.5498225",
"0.5494258",
"0.5458672",
"0.54176545",
"0.5416677",
"0.54142916",
"0.53767836",
"0.537147",
"0.5366912",
"0.5351669",
"0.53501165"
]
| 0.80516404 | 0 |
Put an element in queue. Test that its size is 1. | def test_put_element(self):
queue = Queue_()
queue.put(1)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), 1)
self.assertEqual(queue.top(), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n empty_queue.enqueue(100)\n assert len(empty_queue) == 1",
"def push(self, element):\n if not self.full():\n heapq.heappush(self.queue, element)\n self.size += 1\n return True\n else:\n if element >= self.queue[0]:\n heapq.heapreplace(self.queue, element)\n return True\n else:\n return False",
"def _put(self, item, queue):",
"def enqueue(self, element):\n self.the_queue.append(element)",
"def test_size_increments_with_enqueue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1",
"def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)",
"def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2",
"def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3",
"def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)",
"def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False",
"def enQueue(self, value):\r\n if (len(self.queue) >= self.maxlen):\r\n return False\r\n else:\r\n self.queue.append(value)\r\n return True",
"def offer(self, value):\n if self.isFull():\n raise Exception(\"Queue too small!\")\n \n self.data[self.end] = value\n self.end += 1\n self.qSize += 1\n self.end = self.end % len(self.data)",
"def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n return True\n else:\n return False",
"def enqueue(queue, item):\n new_node = Node(item, None)\n if empty_queue(queue):\n queue.front = new_node\n queue.back = new_node\n else:\n queue.back.next = new_node\n queue.back = new_node\n queue.size = queue.size + 1",
"def enqueue(self, value):\n if len(self.data) == self.size:\n \"\"\"Current queue list:\"\"\"\n self.print_queue()\n raise Exception(\"Queue size limit reached maximum\")\n\n self.data.append(value)",
"def enQueue(self, value):\n if self.count == self.capacity:\n return False\n self.queue[(self.headIndex + self.count) % self.capacity] = value\n self.count += 1\n return True",
"def enqueue(self, item):\n self.__queue.insert(0, item)",
"def enqueue(self, element):\n raise NotImplementedError(\"enqueue: You should have implemented this method!\")",
"def put(self, item: Any):\n has_item = True\n with self._lock:\n if item not in self._items:\n self._items.add(item)\n has_item = False\n if not has_item:\n self._queue.put(item)",
"def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)",
"def push(self, x):\n self.queue.insert(len(self.queue), x)",
"def enqueue(self, val):\r\n self.queue.append(val)",
"def enQueue(self, value):\n if self.rear - self.front < self.size:\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False",
"def test_enquque_val():\n queue = Queue()\n anode = queue.enqueue('a')\n assert queue._queue.first_node is anode",
"def test_get_element(self):\n data = (1, 2, 3, 4)\n queue = Queue_(data)\n self.assertEqual(queue.get(), data[0])",
"def test_enqueue():\n from parenthetics import Queue\n q = Queue()\n q.enqueue('(')\n assert q.length == 1",
"def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q",
"def put_nowait(self, item):\r\n if self.full():\r\n raise QueueFull\r\n self._put(item)\r\n self._unfinished_tasks += 1\r\n self._finished.clear()\r\n self._wakeup_next(self._getters)",
"def push(self, x):\r\n self.queue.append(x)",
"def enqueue(self, item):\n self.queue.append(item)"
]
| [
"0.77519256",
"0.7739508",
"0.7582388",
"0.7481959",
"0.7472519",
"0.74159044",
"0.7394143",
"0.7335067",
"0.73285407",
"0.7098055",
"0.70681745",
"0.70555544",
"0.7049531",
"0.70145833",
"0.6986029",
"0.6973277",
"0.6941566",
"0.6898279",
"0.689296",
"0.6888862",
"0.6868528",
"0.6863752",
"0.6852366",
"0.6846403",
"0.68436307",
"0.6811192",
"0.68058133",
"0.68038076",
"0.6802815",
"0.6771351"
]
| 0.8269443 | 0 |
Create an empty Queue. Test that call of get function raises Assertion error | def test_call_get_of_empty_queue_raised_error(self):
queue = Queue_()
self.assertRaises(IndexError, queue.get) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_new_queue_is_empty(self):\n queue = Queue_()\n self.assertTrue(queue.empty())\n self.assertEqual(queue.size(), 0)",
"def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)",
"def test_emptyget(clean_queue):\n assert clean_queue.get(timeout=0.1) is None",
"def test_size_of_new_queue():\n queue = Queue()\n assert queue.size() == 0",
"def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)",
"def test_can_instantiate_empty_queue(empty_queue):\n assert isinstance(empty_queue, Queue)",
"def test_dequeue(self):\n queue = Queue()\n self.assertEqual(queue.dequeue(), None)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.dequeue(), 1)\n self.assertEqual(queue.size(), 2)",
"def test_dequeue_empty(self):\n \n r = self.store.dequeue('/queue/nonexist')\n assert r is None\n \n assert self.store.has_frames('/queue/nonexist') == False\n assert self.store.size('/queue/nonexist') == 0",
"def test_size_empty(the_queue):\n assert the_queue.size() == 0",
"def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)",
"def empty_queue():\n return Queue()",
"def empty_queue():\n return Queue()",
"def test_EmptyQueue(self):\n genFn = Mock(return_value=None)\n \n wrapper = KaoGenerator(genFn)\n self.assertEqual(len(wrapper._queue), 0)",
"def make_empty_queue():\n return Queue(0, None, None)",
"def test_queue_worker_needs_a_queue(self):\n with pytest.raises(ValueError):\n MinimalQueueWorker(None)",
"def test_dequeue():\n from parenthetics import Queue\n q = Queue()\n q.enqueue(0)\n assert q.dequeue() == 0",
"def test_initialization_with_empty_list_last_node_check():\n queue = Queue([])\n assert queue._queue.last_node is None",
"def test_pull_empty_queue(self) -> None:\n\n self.plugin.pull()\n\n self.assertEqual(\n len(self.plugin.queue),\n 0\n )",
"def test_peek(self):\n queue = Queue()\n self.assertEqual(queue.peek(), None)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.peek(), 1)\n self.assertEqual(queue.size(), 3)",
"def test_the_queue_dequeue(the_queue):\n the_queue.enqueue(2)\n assert the_queue.dequeue() == 2",
"def test_initialization_with_empty_list_first_node_check():\n queue = Queue([])\n assert queue._queue.first_node is None",
"def test_put_element(self):\n queue = Queue_()\n queue.put(1)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), 1)\n self.assertEqual(queue.top(), 1)",
"def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3",
"def test_get_element(self):\n data = (1, 2, 3, 4)\n queue = Queue_(data)\n self.assertEqual(queue.get(), data[0])",
"def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )",
"def test_tx_queue_emptying(self):\n\n self._serial_handler.tx_queue.put((0,'test'))\n\n # Should write the message and then empty the queue\n self._serial_handler._write()\n\n self.assertEquals(self._serial_handler.tx_queue.qsize(), 0)",
"def test_default_value_of_front(empty_queue):\n assert empty_queue.front is None",
"def test_peak_empty_queue_returns_none():\n queue = Queue()\n assert queue.peak() is None",
"def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2",
"def assert_empty(self):\n if self._queue:\n raise CallQueueNotEmpty(\n \"Queue is not empty; {0} expected calls remaining.\"\n .format(len(self._queue))\n )"
]
| [
"0.8160254",
"0.8153244",
"0.8052863",
"0.7911015",
"0.7905001",
"0.7894963",
"0.76920027",
"0.7689933",
"0.7631737",
"0.7544532",
"0.75173104",
"0.75173104",
"0.7458197",
"0.74333274",
"0.7330861",
"0.73305017",
"0.729531",
"0.72764575",
"0.7261248",
"0.7248773",
"0.72299206",
"0.7227959",
"0.7114776",
"0.7089149",
"0.7049019",
"0.7028886",
"0.7017037",
"0.7014569",
"0.70047283",
"0.70034474"
]
| 0.8159753 | 1 |
Override the save method to save the first and last name to the user field. | def save(self):
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, commit=True):\n\n email_local_part = self.cleaned_data['email'].split('@')[0]\n username_start = email_local_part[:5] if len(email_local_part) >= 5 else email_local_part\n self.instance.username = username_start + ''.join(\n [choice(ascii_letters) for _ in range(30 - len(username_start))])\n\n return super(RegisterForm, self).save(commit=commit)",
"def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n return user",
"def save(self, *args, **kwargs):\n self.username = self.username or self.email\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n\n self._set_first_initial()\n self._set_user()\n super(AbstractHuman, self).save(*args, **kwargs)",
"def user_profile_setname(token, name_first, name_last):\n if (len(name_first) > 50 or name_first == \"\"):\n raise error.InputError(description=\"First name is not within 1-50 characters\")\n\n if (len(name_last) > 50 or name_last == \"\"):\n raise error.InputError(description=\"Last name is not within 1-50 characters\")\n\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['name_first'] = name_first\n user['name_last'] = name_last\n database.set_user_data(user)",
"def save(self, *args, **kwargs):\n self.name = unique_slugify(self.name, instance=self)\n\n if self.is_personal and self.user.username != self.name:\n self.user.username = self.name\n self.user.save()\n\n if self.is_customer:\n self.update_customer()\n\n if not self.image:\n self.set_image_from_name(should_save=False)\n\n return super().save(*args, **kwargs)",
"def save(self, *args):\n self.firstname, self.lastname, self.othername, self.email, self.phonenumber, self.passporturl, self.roles, self.nationalid, self.county, self.password, self.date_created, self.date_modified = args\n format_str = f\"\"\"\n INSERT INTO public.users (firstname,lastname,othername,email,phonenumber,passporturl,roles,nationalid,county,password,date_created,date_modified)\n VALUES ('{args[0]}','{args[1]}','{args[2]}','{args[3]}','{args[4]}','{args[5]}','{args[6]}','{args[\n 7]}','{args[8]}','{args[9]}','{(datetime.now())}','{(datetime.now())}');\n \"\"\"\n cursor.execute(format_str)",
"def update_user_info(user, save=True):\n p = bayou.Person.from_default_services(user.username)\n\n user.email = p.email if p.email else user.email\n user.first_name = p.first_name if p.first_name else user.first_name\n user.last_name = p.surname if p.surname else user.last_name\n\n if save:\n user.save()\n\n return user",
"def save(self, profile_callback=None):\n\n # First, save the parent form\n new_user = super(BodbRegistrationForm, self).save(profile_callback=profile_callback)\n\n # Update user with first, last names\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Update profile with affiliation\n profile = new_user.get_profile()\n profile.affiliation = self.cleaned_data['affiliation']\n profile.save()\n\n cache.set('%d.profile' % new_user.id, profile)\n\n return new_user",
"def save(self, **kwargs):\n doc = {\n 'username': self.username,\n 'first_name': self.first_name,\n 'last_name': self.last_name\n }\n if kwargs and self._parameters:\n doc.update(kwargs)\n elif self._parameters:\n doc.update(self._parameters)\n doc.update(kwargs)\n return self.collection().update_one(\n {'username': self.username},\n {'$set': doc},\n upsert=True)",
"def save_user(self, request, user, form, commit=True):\n from allauth.account.utils import user_username, user_email, user_field\n\n data = form.cleaned_data\n first_name = data.get('first_name')\n last_name = data.get('last_name')\n email = data.get('email')\n username = data.get('username')\n birth_date = data.get('birth_date')\n gender = data.get('gender')\n timezone = data.get('timezone')\n user_email(user, email)\n user_username(user, username)\n user.all_fields_completed = True\n if first_name:\n user_field(user, 'first_name', first_name)\n if last_name:\n user_field(user, 'last_name', last_name)\n if birth_date:\n user_field(user, 'birth_date', birth_date)\n if gender:\n user_field(user, 'gender', gender)\n if timezone:\n user_field(user, 'timezone', timezone)\n if 'password1' in data:\n user.set_password(data[\"password1\"])\n else:\n user.set_unusable_password()\n self.populate_username(request, user)\n if commit:\n # Ability not to commit makes it easier to derive from\n # this adapter by adding\n user.save()\n return user",
"def update_firstname(state: UserCreate, firstname: str) -> None:\n state.name.first = firstname\n state.slug = slugify(f\"super-user: {state.name.first} {state.name.last}\")",
"def save_user(self):\n args = parser.parse_args()\n data = {\n 'firstname': request.json.get('firstname').capitalize(),\n 'lastname': request.json.get('lastname').capitalize(),\n 'othernames': request.json.get('othernames', '').capitalize(),\n 'email': request.json.get('email').lower(),\n 'phoneNumber': request.json.get('phoneNumber'),\n 'username': request.json.get('username').lower(),\n 'registered': datetime.datetime.utcnow(),\n 'password': self.set_password(request.json.get('password')),\n 'isAdmin': self.isAdmin, 'public_id': self.public_id\n }\n userByEmail = self.get_user(data['email'])\n userByUsername = self.get_user(data['username'])\n if userByEmail is not None:\n return 'email exists'\n elif userByUsername is not None:\n return 'username exists'\n\n query = \"\"\"INSERT INTO users (firstname,lastname,othernames,email,phoneNumber,username,registered,password,isAdmin,public_id) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n values = data['firstname'], data['lastname'], data['othernames'], data['email'], data['phoneNumber'], data[\n 'username'], data['registered'], data['password'], data['isAdmin'], data['public_id']\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return data",
"def test_save_rewrite(self):\n\n user = CustomUser.objects.get(email=\"[email protected]\")\n user.first_name = \"UpdatedName\"\n user.save()\n actual_user = CustomUser.objects.get(email=\"[email protected]\")\n\n self.assertEqual(actual_user.first_name, \"UpdatedName\")",
"def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name",
"def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix",
"def _set_user(self):\n\n if '' in (self.last_name, self.first_name):\n return\n\n self._set_first_initial()\n\n User = get_user_model()\n try:\n self.user = User.objects.get(\n models.Q(last_name__iexact=self.last_name),\n models.Q(first_name__iexact=self.first_name) |\n models.Q(first_name__istartswith=self.first_initial[0])\n )\n except User.DoesNotExist:\n pass\n except User.MultipleObjectsReturned:\n pass",
"def last_name(self, instance):\r\n return instance.user.last_name",
"def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n # Get the profile, the `save` method above creates a profile for each\n # user because it calls the manager method `create_user`.\n # See: https://github.com/django-userena-ce/django-userena-ce/blob/master/userena/managers.py#L65\n profile = new_user.my_profile\n profile.gender = self.cleaned_data['gender']\n profile.education = self.cleaned_data['education']\n profile.birthday = self.cleaned_data['birthday']\n profile.annual_income = self.cleaned_data['annual_income']\n profile.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user",
"def save(self)->None:\n database.cursor.execute(\n \"INSERT INTO users(firstname,lastname,othernames,email,phone,username,password,role) VALUES (%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.first_name,\n self.last_name,\n self.other_name,\n self.email,\n self.phone_number,\n self.user_name,\n self.password,\n self.is_admin\n ))\n super().save()",
"def save(self, commit=True):\n model = super(UserCreationForm, self).save(commit=False)\n model.username = self.cleaned_data['username']\n\n if commit:\n model.save()\n\n return model",
"def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)",
"def test_last_name_is_optional(self):\n self.updated_data['last_name'] = ''\n self.update_user()\n self.assertEqual(self.user.last_name, self.updated_data['last_name'])",
"def full_name(self,first_name):\n full_name = self.first_name + ' ' + self.last_name\n return full_name",
"def save(self, commit=True):\n instance = super(AbstractUserChangeForm, self).save(commit=False)\n \n # Il faut obligatoirement mettre un username pour que le modèle de base\n # de Django fonctionne alors on copie simplement l'adresse courriel.\n instance.username = self.cleaned_data['email']\n if commit:\n instance.save()\n return instance",
"def save(self, commit=True):\n instance = super(AbstractUserCreationForm, self).save(commit=False)\n \n # Il faut obligatoirement mettre un username pour que le modèle de base\n # de Django fonctionne alors on copie simplement l'adresse courriel.\n instance.username = self.cleaned_data['email']\n if commit:\n instance.save()\n return instance",
"def register_user_last_name(self, message):\n try:\n self.db_handler.set_user_last_name(message.chat.id, message.text)\n\n self.logger.write_to_log('user last name added to db', message.chat.id)\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def save(self, request):\n user = get_user_model()()\n cleaned_data = self.get_cleaned_data()\n email = cleaned_data.get('email')\n nickname = cleaned_data.get('nickname')\n\n user.email = email\n user.nickname = nickname\n\n if 'password1' in cleaned_data:\n user.set_password(cleaned_data[\"password1\"])\n else:\n user.set_unusable_password()\n\n user.save()\n\n return user",
"def form_valid(self, form):\n print(self.object)\n User.objects.filter(username=self.object).update(\n first_name = form.cleaned_data['first_name'],\n last_name = form.cleaned_data['last_name'],\n email = form.cleaned_data['email'],\n gender = form.cleaned_data['gender'],\n date_of_birth = form.cleaned_data['date_of_birth'],\n )\n messages.success(self.request, 'Edited successfully')\n return super().form_valid(form)",
"def save(self, *args, **kwargs):\n c_d = self.cleaned_data\n if c_d.get('id') and c_d.get('avatar') and (\n isinstance(c_d.get('avatar'), UploadedFile)):\n person = get_object_or_404(Person, id=c_d.get('id'))\n try:\n old_avatar = person.avatar.file.name\n except ValueError:\n old_avatar = None\n person = super(PersonForm, self).save(*args, **kwargs)\n user = person.user\n user.username = c_d['username']\n user.first_name = c_d['first_name']\n user.last_name = c_d['last_name']\n user.email = c_d['email_address']\n pass1 = c_d.get('new_password')\n if pass1:\n user.set_password(pass1)\n user.save()\n if isinstance(c_d.get('avatar'), UploadedFile):\n os.remove(self.cleaned_data['avatar'].file.name)\n if old_avatar:\n os.remove(old_avatar)\n return person"
]
| [
"0.72708315",
"0.7223799",
"0.71601826",
"0.6896324",
"0.683202",
"0.6698966",
"0.66724044",
"0.6666655",
"0.6666425",
"0.66263896",
"0.66218114",
"0.6618675",
"0.65702695",
"0.6526201",
"0.65225655",
"0.64917177",
"0.6483562",
"0.64494735",
"0.6422482",
"0.64115214",
"0.6404829",
"0.63613987",
"0.6352722",
"0.6344611",
"0.6313272",
"0.6291017",
"0.628426",
"0.62737256",
"0.62659824",
"0.6225609"
]
| 0.73729247 | 0 |
Populate the `hateword` table in MongoDB with data from CSV file. | def populate_hateword_data():
with open("./data/hate-speech-lexicons/refined_ngram_dict.csv") as f:
lst = [row.split(',', 1)[0] for row in f]
lst = lst[1:]
lst = [{
'word': word,
'category': [],
'similar_to': []
} for word in lst]
try:
db = mongo_client.MongoClient(config.MONGO_URI).twitter
db.hateword.delete_many({})
result = db.hateword.insert_many(lst)
print("Completed populating", len(result.inserted_ids), "hate words")
except pymongo.errors.BulkWriteError as e:
print(e.details) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()",
"def import_csv_to_mongodb(collection, csv_file_path):\n with open(csv_file_path) as csv_file:\n result = collection.insert_many(csv.DictReader(csv_file))\n return len(result.inserted_ids)",
"def populate(self):\n\n self.create_index()\n self.check_type()\n self.create_mapping()\n\n f = open(self.csv_file, 'rU')\n\n # Read the first line for all the headers\n headers = f.readline().split(',')\n\n # Read the rest of the document\n rows = f.readlines()\n added_counter = 0\n\n actions = []\n for row in rows:\n fields = row.split(',')\n obj = {}\n for header in headers:\n # we call lower-case here because we were originally using\n # analyzed strings in elasticsearch (and they were\n # automatically converted). Code was built based on that so it's\n # easiest to convert for now\n try:\n obj[header.replace('\\n', '')] = float(fields[\n headers.index(header)].replace('\\n', '').lower())\n except ValueError:\n obj[header.replace('\\n', '')] = fields[\n headers.index(header)].replace('\\n', '').lower()\n # check afterwards to replace empty strings with None (which json.dumps hopefully writes to null)\n if obj[header.replace('\\n', '')] == '':\n obj[header.replace('\\n', '')] = None\n try:\n item = {\n '_index': self.es_main_index,\n '_type': self.es_main_type,\n '_source': obj\n }\n\n actions.append(item)\n\n added_counter += 1\n print('%s new records added' % added_counter,\n end='\\r')\n sys.stdout.flush()\n\n if added_counter % self.chunk_size == 0:\n helpers.bulk(self.es, actions)\n actions = []\n\n except ConnectionError:\n print('There was a connection error. Check your Elastic' +\n ' Search setting and make sure Elastic Search is ' +\n 'running.')\n return False\n\n # add the remaining items\n if actions:\n helpers.bulk(self.es, actions)\n\n print('The update is completed. %s new records were added.' %\n added_counter)",
"def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)",
"def load_venues():\n\n print('load_venues')\n\n Venue.query.delete()\n\n for row in open(\"seed_data/venues.csv\"):\n row = row.rstrip()\n subcategory, \\\n created_by, \\\n title, \\\n addr_1, \\\n addr_2, \\\n city, \\\n postal_code, \\\n state = row.split(',')\n\n cat_sub = Category_Subcategory.query.filter_by(name=subcategory).first()\n\n vnu = Venue(subcategory_id=cat_sub.id,\n created_by=created_by,\n name=title,\n addr_1=addr_1,\n addr_2=addr_2,\n city=city,\n postal_code=postal_code,\n state=state)\n\n db.session.add(vnu)\n\n db.session.commit()",
"def load_users():\n\n print \"User\"\n\n for row in list(open(\"Users.csv\"))[1:]:\n name, zipcode, email = row.strip().split(\",\")\n\n user = User(name=name, \n zipcode=zipcode, email=email)\n\n db.session.add(user)\n\n db.session.commit()",
"def csv_to_db(db, filename):\n csv_table = open(filename, 'r')\n updater = [(int(dstr), int(hstr)) for (dstr, hstr) in csv.reader(csv_table)]\n db.bulk_update(updater)",
"def load_sundaes():\n\n print('load_sundaes')\n\n User.query.delete()\n\n for row in open(\"seed_data/sundaes.csv\"):\n row = row.rstrip()\n email, postal_code = row.split(',')\n\n\n usr = User(email=email,\n postal_code=postal_code)\n\n db.session.add(usr)\n\n db.session.commit()",
"def load_categories():\n\n print('load_categories')\n\n Category.query.delete()\n\n for row in open(\"seed_data/categories.csv\"):\n name = row.rstrip()\n\n cat = Category(name=name)\n\n db.session.add(cat)\n\n db.session.commit()",
"def save_words(csvf, word_set_id, orig_set_id=''):\n words = []\n headings = []\n\n with open(csvf, \"r\", encoding='utf-8-sig') as file:\n reader = csv.reader(file, delimiter=',')\n\n # Create dictionary keys\n for row in reader:\n i = 0\n while (i < len(row)):\n headings.append(row[i])\n i += 1\n break\n\n # Save STR values to each person\n for row in reader:\n i = 0\n word = {}\n\n while (i < len(row)):\n key = str(headings[i])\n value = row[i]\n word[key] = value\n i += 1\n words.append(word)\n\n # Get heading names\n lang1 = headings[0] # Original Language\n lang1p = headings[1] # Original transliteration\n lang2 = headings[2] # Translation Language\n lang2p = headings[3] # Translation transliteration\n wtype = headings[4] # Type of word (noun, verb)\n\n orig_lang_id = (db.execute(\n \"SELECT id FROM languages WHERE name = ?\", (lang1, )).fetchall())[0]['id']\n trans_lang_id = (db.execute(\n \"SELECT id FROM languages WHERE name = ?\", (lang2, )).fetchall())[0]['id']\n\n for w in words:\n word_type_id = (db.execute(\n \"SELECT id FROM word_type WHERE type = ?\", (w[wtype], )).fetchall())[0]['id']\n\n new_orig_word_id = (db.execute(\"INSERT INTO words ('wordstr', 'language_id', 'type', 'pronunciation') VALUES (?, ?, ?, ?)\",\n (w[lang1], orig_lang_id, word_type_id, w[lang1p])\n )).lastrowid\n con.commit()\n new_translated_word_id = (db.execute(\"INSERT INTO words ('wordstr', 'language_id', 'type', 'pronunciation') VALUES (?, ?, ?, ?)\",\n (w[lang2], trans_lang_id, word_type_id, w[lang2p])\n )).lastrowid\n con.commit()\n db.execute(\"INSERT INTO word_set_words (word_set_id, word_id) VALUES (?, ?)\",\n (word_set_id, new_translated_word_id))\n con.commit()\n # if orig_set_id is set\n if (orig_set_id != ''):\n db.execute(\"INSERT INTO word_set_words (word_set_id, word_id) VALUES (?, ?)\",\n (int(orig_set_id), new_orig_word_id))\n con.commit()\n # insert orig and its translation equivalent\n db.execute(\"INSERT INTO word_translation (orig_lang, trans_lang, orig_word, trans_word) VALUES (?, ?, ?, ?)\",\n (orig_lang_id, trans_lang_id, new_orig_word_id, new_translated_word_id))\n con.commit()\n # reverse orig & translation\n db.execute(\"INSERT INTO word_translation (orig_lang, trans_lang, orig_word, trans_word) VALUES (?, ?, ?, ?)\",\n (trans_lang_id, orig_lang_id, new_translated_word_id, new_orig_word_id))\n con.commit()\n file.close()\n return len(words)",
"def prepareSemanticDifferential():\r\n\t\r\n filename = (\"OsgoodOriginal.csv\") \r\n fileIn = open(filename, 'r')\r\n allData = []\r\n line = fileIn.readline()\r\n while line != \"\":\r\n line = fileIn.readline().strip()\r\n if line != \"\":\r\n values = line.split(',')\r\n wordData = {}\r\n wordData['word'] = str(values[0])\r\n wordData['evaluation'] = float(values[1])\r\n wordData['activity'] = float(values[2])\r\n wordData['potency'] = float(values[3])\r\n allData.append(wordData)\r\n fileIn.close()\r\n return allData",
"def load_users():\n\n print('load_users')\n\n for row in open(\"seed_data/users.csv\"):\n row = row.rstrip()\n\n email, \\\n postal_code, \\\n fname, \\\n lname, \\\n username, \\\n password, \\\n phone, \\\n role = row.split(',')\n\n\n usr = User(email=email,\n postal_code=postal_code,\n fname=fname,\n lname=lname,\n username=username,\n password=password,\n phone=phone,\n role=role)\n\n db.session.add(usr)\n\n db.session.commit()",
"def load_products_data(connection, csvfile):\n insert_sql = 'insert into products (id, description, genres) ' \\\n 'values (%s, %s, %s)'\n load_data(connection, insert_sql, get_data_from_file(csvfile))",
"def load_users():\n\n for i, row in enumerate(open('seed_data/users.csv')):\n data = row.rstrip().split(\",\")\n user_id, email, password = data\n\n user = User(user_id=user_id, email=email,\n password=password)\n\n db.session.add(user)\n\n # For testing, just to see it was happening\n # if i % 100 == 0:\n # print i\n\n db.session.commit()",
"def seed_db_from_csv(csv):\n\n # Delete any existing rows\n Event.query.delete()\n db.session.commit()\n\n with open(csv, 'r') as csv_file:\n # Skip the first row of column headers\n rows = [row.strip().split(',')[:11] for row in csv_file.readlines()[1:]]\n\n for _, _, _, _, _, state, date, _, _, kind, title in rows:\n event = Event(kind, date=date[:10], state=state, title=title.strip('\"'))\n db.session.add(event)\n\n try:\n # Persist changes if entire table was imported successfully\n db.session.commit()\n return True\n except Exception as e:\n db.session.rollback()\n return False",
"def load_equipment():\n\n for row in open(\"static/equipment.csv\"):\n row = row.rstrip()\n \n gear_name, category, brand, lender_email, zipcode, gear_photo, gear_photo_url = row.split(\",\")\n\n equipment = Equipment(\n gear_name=gear_name,\n category=category,\n brand=brand,\n lender_email=lender_email,\n zipcode=zipcode,\n gear_photo=gear_photo,\n gear_photo_url=gear_photo_url)\n\n db.session.add(equipment)\n \n\n db.session.commit()",
"def fill_words_table(self, statistics, path, filemoving, conn, logg, parser):\n logg.writing_log(conn, 'Starting filling words table')\n c = conn.cursor()\n val1 = statistics.book_name(path, filemoving, parser).replace(' ', '_')\n sql1 = \"CREATE TABLE \" + val1 + \" (word text, count integer, count_uppercase integer)\"\n c.execute(sql1)\n val2 = statistics.frequency(path, filemoving, parser)\n sql2 = \"INSERT INTO \" + val1 + \" VALUES(?,?,?)\"\n for key, value in val2.items():\n if not key.istitle():\n c.execute(sql2, (key, value, (0 if val2.get(key.capitalize()) == None else val2.get(key.capitalize()))))\n logg.writing_log(conn, 'Words table is filled')\n conn.commit()",
"def load_topics():\n\n print \"Importing topics...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Topic.query.delete()\n\n # Read CSV file\n with open(\"seed_data/topics.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n topic = Topic(topic_title=list_item[1])\n\n # Add the current retailer to the session\n db.session.add(topic)\n\n # Commit the db.session changes to the database\n db.session.commit()",
"def get_districts():\n\n for i, row in enumerate(open('data/student_counts.csv')):\n data = row.rstrip().split(\",\")\n district_name = data[0]\n county = data[1]\n\n if District.query.filter_by(district_name=district_name).first():\n continue\n\n county_id = County.query.filter_by(county_name=county).first().county_id\n\n district = District(district_name=district_name, county_id=county_id)\n\n db.session.add(district)\n\n db.session.commit()",
"def _importInDjango(self):\n\n with open(settings.DATA_PATH, 'r', encoding='latin-1') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for raw in reader:\n\n # Créer ou mettre à jour la division\n division, created = Division.objects.get_or_create(\n nom=raw['Division']\n )\n if created:\n self.stdout.write(\n 'Divion {} ajoutée'.format(division.nom)\n )\n\n # Créer ou mettre à jour les équipes\n equipeDom, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 1'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeDom.nom)\n )\n\n equipeExt, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 2'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeExt.nom)\n )\n\n # Créer ou mettre à jour la rencontre\n scoreDom = 0 if raw['Score 1'] == '' else int(raw['Score 1'])\n scoreExt = 0 if raw['Score 2'] == '' else int(raw['Score 2'])\n forfaitDom = True if raw['Forfait 1'] == 'true' else False\n forfaitExt = True if raw['Forfait 2'] == 'true' else False\n date = datetime.datetime.strptime(raw['Date de rencontre'], '%d/%m/%Y')\n heure = datetime.datetime.strptime(raw['Heure'], '%H:%M')\n rencontre, created = Rencontre.objects.update_or_create(\n numero=int(raw['N° de match']),\n equipeDom=equipeDom,\n equipeExt=equipeExt,\n defaults={\n 'date': date,\n 'heure': heure,\n 'scoreDom': scoreDom,\n 'scoreExt': scoreExt,\n 'forfaitDom': forfaitDom,\n 'forfaitExt': forfaitExt,\n }\n )\n if created:\n self.stdout.write(\n 'Rencontre {} / {} ajoutée'.format(\n rencontre.equipeDom,\n rencontre.equipeExt\n )\n )",
"def get_counties():\n\n for i, row in enumerate(open('data/counties_data.csv')):\n data = row.rstrip().split(\",\")\n county_name, latitude, longitude, county_name_lower = data\n\n county = County(county_name=county_name, latitude=latitude, longitude=longitude, county_name_lower=county_name_lower)\n\n db.session.add(county)\n\n if i % 100 == 0:\n print(i)\n\n db.session.commit()",
"def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()",
"def insert_into_solr():\n solr = pysolr.Solr('http://localhost:8983/solr/mag_journals', always_commit=True)\n filepath = '/vol1/mag/data/2018-07-19/dumps/Journals.txt'\n\n list_for_solr = []\n with open(filepath, \"r\") as file:\n csv_reader = csv.reader(file, delimiter='\\t')\n for journal_id, rank, normalized_name, display_name, issn, publisher, webpage, paper_count, citation_count, created_date in csv_reader:\n solr_record = {}\n solr_record['journal_id'] = journal_id\n solr_record['rank'] = rank\n solr_record['normalized_name'] = normalized_name\n solr_record['display_name'] = display_name\n solr_record['issn'] = issn\n solr_record['publisher'] = publisher\n solr_record['webpage'] = webpage\n solr_record['paper_count'] = paper_count\n solr_record['citation_count'] = citation_count\n solr_record['created_date'] = created_date\n list_for_solr.append(solr_record)\n # Upload to Solr: 48000-odd rows\n solr.add(list_for_solr)",
"def seed_user_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\") #data is a list\n\n #get data from split line\n id = int(data[0])\n age = int(data[1])\n zip = data[4]\n\n #create a new record and add it to the queue\n new_user = User(user_id=id, age=age, zipcode=zip)\n db.session.add(new_user)\n\n #commit changes\n db.session.commit()",
"def populate_table_from_csv(csv_file, csv_encoding='iso-8859-15'):\n try:\n with open(file=csv_file, mode='r', encoding=csv_encoding) as input_file:\n # Could find a good place to add iterators/generators/comprehensions elsewhere, so made a new function\n # Also, yet another pylint false positive. The below line isn't supposed to be assigned to anything.\n [add_customer(*l.split(',')) for l in input_file if 'Id,Name,Last_name,' not in l] # pylint: disable=W0106\n except Exception as e:\n logger.error(\"Failed to load records from csv file %s into database %s: %s\", csv_file, customer_db.database, e)",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def test_add_talks_from_csv(self):\r\n\r\n fname = self._csvfile\r\n\r\n presentation = Presentation(\"Building NetBSD\", \"David Maxwell\")\r\n\r\n self.db.add_talks_from_csv(fname)\r\n self.assertTrue(self.db.presentation_exists(presentation))",
"def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)",
"def load_users():\n\n print \"Loading Users\"\n\n for i, row in enumerate(open(\"data/mock_user_data.csv\")):\n row = row.rstrip()\n user_id, email, password = row.split(\",\")\n\n user = User(email=email)\n\n db.session.add(user)\n\n db.session.commit()",
"def ingest_rental_csv(csv_path):\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(csv_path)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n try:\n data = next(import_generator)\n if len(data) != 2:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add document to database\n with Connection():\n rental = Rental(\n product_id=data[RENTAL_PROD_ID],\n user_id=data[RENTAL_USER_ID]\n )\n rental.save() # This will perform an insert\n except StopIteration:\n break"
]
| [
"0.62989795",
"0.61529267",
"0.60396224",
"0.5890785",
"0.58553517",
"0.58194333",
"0.58194286",
"0.5814144",
"0.5806153",
"0.57831645",
"0.5744489",
"0.5741411",
"0.57202476",
"0.569331",
"0.5675714",
"0.56400776",
"0.56394845",
"0.5633955",
"0.5550253",
"0.5521801",
"0.5515218",
"0.54884243",
"0.5481542",
"0.54772335",
"0.54665834",
"0.54638565",
"0.5448344",
"0.54474825",
"0.5409698",
"0.5406267"
]
| 0.7946996 | 0 |
Prepopulate user data for the app, including an admin account | def populate_user_data():
try:
db = mongo_client.MongoClient(config.MONGO_URI).twitter
db.user.insert_one(
{
'username': 'admin',
'password': 'admin',
}
)
print("Created an admin account")
except Exception as e:
print(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init():\n create_user(app)\n get_all_user()",
"def prefill(self, user):\n print('prefilling')\n self.username.data = user.username\n self.full_name.data = user.full_name\n self.email.data = user.email",
"def init_data_for_admin(db_data):\n administrators = db_data.get('administrator')\n if administrators is not None:\n rows = administrators.get('data')\n for row in rows:\n administrator = Administrator(\n name=row[0], password=generate_password_hash(row[1]))\n db_add_and_commit(db, administrator)",
"def load_initial_data(apps, schema_editor):\n\n\n #\n # get the model by name\n User = apps.get_model('auth', 'User')\n password = User.objects.make_random_password()\n\n\n draftboard = User()\n draftboard.username= settings.USERNAME_DRAFTBOARD\n draftboard.password = make_password(password)\n draftboard.is_superuser = False\n draftboard.is_staff = True\n draftboard.save()\n\n escrow = User()\n escrow.username = settings.USERNAME_ESCROW\n escrow.password= make_password(password)\n escrow.is_superuser = False\n escrow.is_staff = True\n escrow.save()",
"def default_admin_setup(self, *args):\n name = args[0]\n email_address = args[1]\n password = args[2]\n account_type = args[3]\n created_on = args[4]\n last_modified = args[5]\n select_users = \"SELECT * FROM users;\"\n self.cursor.execute(select_users)\n an_admin = self.cursor.fetchall()\n if not an_admin:\n insert_user = \"INSERT INTO users(name, email_address, password, account_type, created_on, last_modified) \" \\\n \"VALUES('{}', '{}', '{}', '{}', '{}', '{}');\"\\\n .format(name, email_address, password, account_type, created_on, last_modified)\n self.cursor.execute(insert_user, (name, email_address, password, account_type, created_on, last_modified))\n self.connection.commit()",
"def init_data_for_users(db_data):\n users = db_data.get('user')\n if users is not None:\n rows = users.get('data')\n for row in rows:\n user = User(name=row[0], password=generate_password_hash(row[1]))\n db_add_and_commit(db, user)",
"def _set_user_info(self):\n sha = sha1(self.email).hexdigest()\n user_info = redis.hgetall(\"sl:account:{}\".format(sha))\n\n if (type(user_info) != dict or\n user_info.get(\"password\") != self.password):\n user_info = {}\n\n try:\n self.plan = Plan.from_id(user_info.get(\"plan\"))\n except SleekException:\n self.plan = None\n self.customer_token = str_to_none(\n user_info.get(\"customer_token\")\n )\n self.subscription_token = str_to_none(\n user_info.get(\"subscription_token\")\n )\n self.subscription_end = str_to_none(\n user_info.get(\"subscription_end\")\n )",
"def setUp(self):\n User.users = {}\n self.app = User('[email protected]', 'admin', 'admin')\n # Set some default user data\n self.user_data = {\n 1: {\n 'email': '[email protected]',\n 'username': 'admin',\n 'password': 'admin' \n }\n \n }",
"def on_start(self):\n admin_user = os.environ['ADMIN_USER']\n admin_password = os.environ['ADMIN_PASSWORD']\n admin_domain_name = os.environ['ADMIN_DOMAIN_NAME']\n admin_project_id = os.environ['ADMIN_PROJECT_ID']\n HEADERS['X-Auth-Token'] = self._get_token(admin_user,\n admin_password,\n admin_domain_name,\n project_id=admin_project_id)\n # Create test user\n self.username = 'test_user'\n self.password = 'Password1'\n self.user_domain_id = 'default'\n self.user_domain_name = 'Default'\n self.project_id = self._create_project()['project']['id']\n self._create_user(self.username, self.password, self.user_domain_id,\n self.project_id)",
"def on_start(self):\n # TODO: do the opposite of pop here, cut?\n new_user = next(users)\n\n self.user_data = new_user\n self.signup()\n # self.login()",
"def after_db_init():\n with app_instance.app_context():\n # Creates any models that have been imported\n db.create_all()\n\n # Init security for the application\n from .security import user_datastore\n\n # Create the Admin user\n if not UserModel.find(1):\n user_datastore.create_role(name='_permissions | admin')\n user_datastore.create_role(name='_permissions | manager')\n user_datastore.create_role(name='_permissions | agent')\n user_datastore.create_user(\n username='admin',\n email='[email protected]',\n password='password',\n first_name='Super',\n last_name='Admin',\n roles=['_permissions | admin']\n )\n db.session.commit()\n\n # Register the admin views to the extension\n admin.add_view(\n UsersView(\n UserModel, db.session, name='Manage Users', category='User Admin'\n )\n )\n admin.add_view(RolesView(RolesModel, db.session, name='Manage Privileges', category='User Admin'))",
"def setup_user():\n if 'auth_user' in flask.session:\n user = models.User.query.get(flask.session['auth_user'])\n if user is None:\n # old bad cookie, no good\n del flask.session['auth_user']\n # save the user in `flask.g`, which is a set of globals for this request\n flask.g.user = user",
"def createAdmin():\n select_user_by_email = \"\"\"\n SELECT id, username, password, email FROM users\n WHERE users.email = '{}'\"\"\".format(\"[email protected]\")\n\n isUserPresent = select_data_from_db(select_user_by_email)\n if not isUserPresent:\n conn, cursor = connect_to_db()\n password = generate_password_hash('BootcampWeek1')\n create_admin_if_not_present = \"\"\"\n INSERT INTO users(username, firstname, lastname, othername ,\n phone, email, password, passportUrl , isPolitician ,isAdmin)\n VALUES(\n '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}'\n )\"\"\".format('OriginalAdmin',\n 'FirstAdminName', 'LastAdminName',\n 'OtherAdminName', '0742546892',\n '[email protected]', password, \"\",\n False, True)\n cursor.execute(create_admin_if_not_present)\n conn.commit()\n conn.close()",
"def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)",
"def update_user():",
"def create_admin():\n admin = models.User(username= 'gallery_admin', email='[email protected]', address='#0000' , password =bcrypt.generate_password_hash('toledano',\n current_app.config.get('BCRYPT_LOG_ROUNDS')).decode('utf-8'), admin=True)\n admin.save()",
"def example_data():\n\n User.create_user(\"Kate\", \"longpass\", None)\n User.create_user(\"Long\", \"regularpass\", None)\n User.create_user(\"Critter\", \"shortpass\", None)",
"def load_account_data(self):\n if self.acc is None:\n return\n\n self.name_entry.delete(0, tk.END)\n self.name_entry.insert(0, self.acc.name)\n self.name_entry.config(state='disabled')\n\n self.email_entry.delete(0, tk.END)\n self.email_entry.insert(0, self.acc.email)\n\n self.user_entry.delete(0, tk.END)\n self.user_entry.insert(0, self.acc.username)\n\n key = secrets.decrypt_field(self.us.crypt_key)\n self.pass_entry.delete(0, tk.END)\n self.pass_entry.insert(0, secrets.decrypt_data(key, self.acc.password))",
"def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()",
"def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }",
"async def prepare(self):\n\n # Read the secure cookie which exists if we are in an authenticated\n # context (though not if the caimira webservice is running standalone).\n session = json.loads(self.get_secure_cookie('session') or 'null')\n\n if session:\n self.current_user = AuthenticatedUser(\n username=session['username'],\n email=session['email'],\n fullname=session['fullname'],\n )\n else:\n self.current_user = AnonymousUser()",
"def create_user():\n\n username = str(request.parsed_json['username'])\n email = str(request.parsed_json['email'])\n password = str(request.parsed_json['password'])\n\n res = auth.create_user(username, email, password)\n if not res:\n return create_error(400, str(res))\n\n globalopts.appdata[username] = {\n 'user': username,\n 'Appdata': {'Total': globalopts.DEFAULT_WEEKLY_TIMES},\n 'Goals': [globalopts.DEFAULT_GOALS]\n }\n\n print(globalopts.appdata[username])\n\n return \"\", 200",
"def _add_user(data: dict) -> dict:\n user = create_user()\n name = []\n if 'first_name' in data:\n name.append(data['first_name'])\n if 'middle_name' in data:\n name.append(data['middle_name'])\n if 'last_name' in data:\n name.append(data['last_name'])\n user['name'] = ' '.join(name)\n if 'role' in data:\n user['exp']['exp']['title'] = data['role']\n if 'affiliation' in data:\n user['abs'] = data['affiliation']\n user['exp']['exp']['company'] = data['affiliation']\n elif 'organization' in data:\n user['abs'] = data['organization']\n user['exp']['exp']['company'] = data['organization']\n phone = []\n if 'phone' in data:\n phone.append(data['phone'])\n if 'phone_ext' in data:\n phone.append(data['phone_ext'])\n user['contact']['phone'] = '-'.join(phone)\n user['contact']['email'] = data['email'] if 'email' in data else ''\n if 'degrees' in data:\n if not user.title:\n user['edu']['degree'] = data['degrees']\n if len(user['name']) < 0:\n user['name'] = user['contact']['email'] if len(user['contact']['email']) > 0 else 'Anonymous'\n return user",
"def set_user_info(self, usrs):\r\n logger.info('Starting set user profile info')\r\n user = choice(usrs)\r\n self.title = user['title']\r\n self.fname = user['fname']\r\n self.lname = user['lname']\r\n self.email = user['email']\r\n self.password = user['password']\r\n self.dob = user['dob']\r\n self.company = user['company']\r\n self.address = user['address']\r\n self.city = user['city']\r\n self.postalcode = user['postalcode']\r\n self.phone = user['phone']\r\n logger.info('Ending set user profile info')",
"def p_makeAdminUser(self):\n\n # If already in database, return\n if self.dbManager.userExists(C_ADMINISTRATOR_USERNAME):\n return\n # Store admin in database\n self.dbManager.createUser(C_ADMINISTRATOR_USERNAME, C_ADMINISTRATOR_PASSWORD, UserRole.ADMIN, defaultPacemakerParameterData)",
"def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()",
"def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()",
"def db_override_user_data(self):\n util.log(\"Clearing old user data\", util.LogLevel.Info)\n self.db.db_clear_data_user()\n util.log(\"Attempt loading user data to database\", util.LogLevel.Info)\n start = time.time()\n # Library\n for card in self.library.values():\n self.db.lib_card_add(card)\n # Tags\n for tag, card_ids in self.tags.items():\n self.db.tag_new(tag)\n for card_id in card_ids:\n self.db.tag_card_add(tag, card_id)\n # Wants\n for list_name, cards in self.wants.items():\n self.db.wants_new(list_name)\n for card in cards:\n self.db.wants_card_add(list_name, card.multiverse_id)\n end = time.time()\n util.log(\"Finished in {}s\".format(str(round(end - start, 3))), util.LogLevel.Info)\n self.push_status(\"User data imported\")",
"def setUpTestUsers(self) -> None:\n self.password = \"thisisasecret\"\n self.other = get_user_model().objects.create_user(\"other\", password=self.password)\n self.user = get_user_model().objects.create_user(\"user\", password=self.password)\n self.admin = get_user_model().objects.create_superuser(\"admin\", password=self.password)\n self.anonymous = AnonymousUser()",
"def do_user_import(request, user, user_object_dict, setting_dict):\n\n insert = not bool(user) # insert or update\n user = user or User() # existing or new user\n override = setting_dict['override'] # update ALL fields\n\n # insert/update user\n for field in user_field_names:\n if field == 'password' or field == 'username' or \\\n (not insert and field in setting_dict['key']):\n continue\n if field in user_object_dict:\n if override:\n setattr(user, field, user_object_dict[field])\n else:\n # fill out the blank field only\n if getattr(user, field) == '':\n setattr(user, field, user_object_dict[field])\n\n if insert:\n if 'username' in user_object_dict: # set username\n user.username = user_object_dict['username']\n\n # generate if not username\n user.username = get_unique_username(user)\n\n if 'password' in user_object_dict and (insert or override):\n user.set_password(user_object_dict['password'])\n\n if not user.password:\n user.set_password(User.objects.make_random_password(length=8))\n\n user.is_active = bool(setting_dict['interactive'])\n\n if not bool(validate_email(user.email)):\n user.email = '' # if not valid; empty it out\n\n # loop through user properties; truncate at max_length\n for key, value in user.__dict__.items():\n max_length = 90\n try:\n max_length = User._meta.get_field_by_name(key)[0].max_length\n except FieldDoesNotExist:\n max_length = None\n if max_length: # truncate per max_length field attribute\n setattr(user, key, value[:max_length])\n\n # username and email required\n if user.username and user.email:\n\n # insert/update record\n if insert:\n user.save(force_insert=True)\n else:\n user.save(force_update=True)\n\n try: # get or create\n profile = user.profile\n except Profile.DoesNotExist:\n profile = Profile.objects.create(user=user,\n creator=request.user,\n creator_username=request.user.username,\n owner=request.user,\n owner_username=request.user.username,\n )\n\n for field in profile_field_names:\n if field in user_object_dict:\n\n if override:\n setattr(profile, field, user_object_dict[field])\n else:\n # fill out the blank field only\n if getattr(profile, field) == '':\n setattr(profile, field, user_object_dict[field])\n\n profile.save()\n\n # add to group\n if setting_dict['group']:\n try:\n gm = GroupMembership.objects.get(group=setting_dict['group'],\n member=user)\n except GroupMembership.DoesNotExist:\n gm = GroupMembership()\n gm.member = user\n gm.group = setting_dict['group']\n gm.creator_id = request.user.id\n gm.creator_username = request.user.username\n gm.owner_id = request.user.id\n gm.owner_username = request.user.username\n gm.status = 1\n gm.status_detail = 'active'\n gm.save()\n\n return user"
]
| [
"0.6845231",
"0.6764182",
"0.6723294",
"0.6634625",
"0.66286814",
"0.66168195",
"0.65961397",
"0.6412519",
"0.6382036",
"0.6321992",
"0.63086635",
"0.6226326",
"0.6178413",
"0.61414754",
"0.6119165",
"0.6108649",
"0.6106677",
"0.60994",
"0.60962856",
"0.6091124",
"0.60897976",
"0.60866326",
"0.6085912",
"0.60804874",
"0.60239446",
"0.5994153",
"0.5984708",
"0.59751004",
"0.59608936",
"0.5947849"
]
| 0.7075983 | 0 |
add a new vertex object to the graph with the given key and return the vertex | def add_vertex(self, key):
#increments the number of vertices
#creates a new vertex
#adds the new vertex to the vertex list
#returns the new vertex
if key != None:
self.num_vertices += 1
new_vertex = Vertex(key)
self.vert_list[key] = new_vertex
return new_vertex
raise KeyError("There's no key here") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices += 1\n self.graph[key] = vertex\n\n return vertex",
"def add_vertex(self, key):\n self.vertCount += 1\n addedVertex = vertex.Vertex(key)\n self.vertList[key] = addedVertex\n return addedVertex",
"def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices[key] = vertex",
"def add_vertex(self, key):\n # increment the number of vertices\n self.num_vertices += 1\n # create a new vertex\n vertex = Vertex(key)\n # add the new vertex to the vertex dictionary with a list as the value\n # self.vert_dict[vertex] = []\n # add the new vertex to the vertex list\n self.vert_dict[key] = vertex\n # return the new vertex\n return vertex",
"def add_vertex(self, key):\n if key in self.vertices:\n raise ValueError('Key is already in use')\n \n # Create vertex\n self.vertices[key] = GraphVertex(key=key)",
"def addVertex(self, key):\n if key not in self.vertList:\n self.numVertices += 1\n vtx = Vertex(key)\n self.verList[key] = vtx\n return vtx",
"def add_vertex(self, key: str, data=None):\n if key in self._vertex_map:\n self._vertex_map[key].data = data\n else:\n v = Vertex(key, data)\n self._vertex_map[key] = v",
"def add(self, key, data):\n if key not in self.vertices:\n self.numberOfVertices += 1\n self.vertices[key] = Vertex(key, data)\n return True\n\n return False",
"def add_vertex(self, vertex_id):\n pass # TODO",
"def __add__(self, vertex):\n\n if isinstance(vertex, Vertex):\n vName = vertex.name\n self._vertices[vName] = vertex",
"def add_vertex(self, vertex_id): # O(1) time complexity\n self.vertices[vertex_id] = set() \n\n # additional options (class)\n '''\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = {}\n\n else:\n return \"Vertex is already in Graph\"\n '''",
"def add_vertex(self, vertex):\r\n if vertex not in self.__graph_dict:\r\n self.__graph_dict[vertex] = {}",
"def get_vertex(self, key):\n\n vertex = None\n try: \n vertex = self.graph[key]\n except KeyError:\n raise ValueError(\"Vertex with key {} not in Graph\".format(key))\n\n return vertex",
"def add_vertex(self, vertex):\n if vertex.id not in self.vertices.keys():\n self.vertices[vertex.id] = vertex",
"def add_vertex(self, vertex_id):\n # just add new dict entry\n self.vertices[vertex_id] = set()\n\n pass # TODO",
"def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []\n return vertex",
"def add_vertex(self, vertex):\n self[vertex] = {}",
"def add_vertex(self, v):\n self[v] = {}",
"def add_vertex(self, v):\n self[v] = {}",
"def add_vertex(self, vertex):\n try:\n vertex_idx = self.vertices.index(vertex)\n # print \"{} already in {}\".format(vertex, self.vertices)\n return self.vertices[vertex_idx]\n except Exception:\n self.vertices.append(vertex)\n # print \"adding {} to {}\".format(vertex, self.vertices)\n return vertex",
"def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []",
"def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []",
"def add_vertex(self, v):\n v = {'x': v[0], 'y': v[1]}\n if v not in self:\n self.append(v)\n return len(self)-1\n return self.index(v)",
"def add_vertex(self, vertex):\n raise NotImplementedError",
"def get_vertex(self, key):\n return self.vertices[key]",
"def add_vertex(self, vertex):\n if self.contains(vertex):\n return None\n if self.is_weighted():\n self._graph[vertex] = dict()\n else:\n self._graph[vertex] = set()\n return True",
"def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []",
"def add_vertex(self, vertex_id):\n # add new vertex in vertices\n self.vertices[vertex_id] = set()\n\n # increment len\n self.len += 1",
"def add_vertex(self,vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n # logging.debug(\"vertex being initialized ..\", vertex)\n else:\n # logging.debug(\"vertex not added ..\", vertex)\n pass",
"def add_vertex(self, vertex_name: n):\n new_vertex = Vertex(vertex_name)\n self._graph[new_vertex.name] = new_vertex"
]
| [
"0.8762225",
"0.87257594",
"0.87200457",
"0.84958625",
"0.84928226",
"0.84297377",
"0.8041286",
"0.7621432",
"0.7442868",
"0.7416063",
"0.7397369",
"0.73909426",
"0.73780626",
"0.7367396",
"0.73670876",
"0.73386776",
"0.7307447",
"0.7295609",
"0.7295609",
"0.72255313",
"0.716662",
"0.716662",
"0.7162621",
"0.71616876",
"0.71037674",
"0.70966697",
"0.7074905",
"0.70747334",
"0.7037856",
"0.70320064"
]
| 0.87494206 | 1 |
return the vertex if it exists | def get_vertex(self, n):
#returns the vertex if it is in the graph
if self.vert_list[n] != None:
return self.vert_list[n]
else:
raise KeyError("It would appear the vertex you are searching for does not exist") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vertex(self, key):\n if key in self.vertList:\n return self.vertList[key]\n else:\n return None",
"def get_vertex(self, key):\n\n vertex = None\n try: \n vertex = self.graph[key]\n except KeyError:\n raise ValueError(\"Vertex with key {} not in Graph\".format(key))\n\n return vertex",
"def get_vertex(self, vertex):\n # return the vertex if it is in the graph\n if vertex in self.vert_dict:\n return self.vert_dict[vertex]\n else:\n raise ValueError('Vertex not in graph')",
"def get_vertex(self, vertex_name: n) -> Union[Vertex, None]:\n try:\n return self._graph[vertex_name]\n except KeyError:\n return None",
"def get_vertex(self, name):\n return self.vertices[name]",
"def get_vertex(self, n):\n \n if n in self.vert_dict:\n return self.vert_dict[n]\n else:\n return None",
"def add_vertex(self, vertex):\n try:\n vertex_idx = self.vertices.index(vertex)\n # print \"{} already in {}\".format(vertex, self.vertices)\n return self.vertices[vertex_idx]\n except Exception:\n self.vertices.append(vertex)\n # print \"adding {} to {}\".format(vertex, self.vertices)\n return vertex",
"def get_vertex(self, current_id):\n if current_id is None:\n raise Exception('Vertex ID can not be None')\n return self.vertices.get(current_id)",
"def get_vertex(self, key):\n return self.vertices[key]",
"def is_vertex(self): \n return False",
"def is_vertex(self):\n return True",
"def is_vertex(self):\n return True",
"def is_vertex(self):\n return False",
"def has_vertex(self, vertex) -> bool:\n return self._start is vertex or self._end is vertex",
"def hasVertex(self, vertexNumber):\n try:\n rs = self.findVertex(vertexNumber)\n return 0\n except VertexError, e:\n return 1",
"def contains_vertex(self, v_name: str) -> bool:\n for i in self.adj_list:\n if i == v_name:\n return True\n return False",
"def getVertex(self, key):\n return self.vertList[key]",
"def find_vertex_from_word(self, word):\n for vertex in self.vertices:\n if vertex.word == word:\n return vertex\n\n return None",
"def add_vertex(self, vertex_id): # O(1) time complexity\n self.vertices[vertex_id] = set() \n\n # additional options (class)\n '''\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = {}\n\n else:\n return \"Vertex is already in Graph\"\n '''",
"def add_vertex(self, vertex):\n if self.contains(vertex):\n return None\n if self.is_weighted():\n self._graph[vertex] = dict()\n else:\n self._graph[vertex] = set()\n return True",
"def _get_available_edge(self, vertex):\n if len(self.edges) == 0:\n return None \n\n filtered = list(filter(lambda e: e.vertex == vertex, self.edges))\n if len(filtered) == 0:\n return None\n return filtered[0] # if exist, return the first one",
"def contains_vertex(self, vertex_name: n):\n return vertex_name in self._graph.keys()",
"def getVertex(self, x, y, z, epsilon=COMPARISON_EPSILON):\n for v in self.vertices:\n if (v.x - x)**2 + (v.y - y)**2 + (v.z - z)**2 <= epsilon**2:\n return v\n raise ValueError('No vertex found')",
"def _add_vertex(self, x, y):\n v = Vertex2(x, y)\n i = bisect(self.vertices, v)\n \n # if vertex at these coordinates exists just return it\n if len(self.vertices) > i and self.vertices[i] == v:\n return self.vertices[i]\n \n # otherwise add new vertex in sorted position and return it\n self.vertices.insert(i, v)\n return v",
"def get_object_vertex(self, obj):\n return self.object_vertices[obj]",
"def add_vertex(self, v):\n v = {'x': v[0], 'y': v[1]}\n if v not in self:\n self.append(v)\n return len(self)-1\n return self.index(v)",
"def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []\n return vertex",
"def get_vertex(self, label):\n\n return self._vertices[label]",
"def has_vertex(t, tri, vertex):\n for i in range(3):\n if t[tri][i] == vertex:\n return True\n return False",
"def findVertex(self, vertexNumber):\n try:\n return self.vertexIndex[vertexNumber]\n except KeyError:\n raise VertexError(vertexNumber, ErrorMessages.vertexNotFound)"
]
| [
"0.7513024",
"0.72817385",
"0.72061867",
"0.7141165",
"0.7068804",
"0.6988597",
"0.69729173",
"0.69611084",
"0.6960711",
"0.6946605",
"0.68712467",
"0.68712467",
"0.68499994",
"0.6722803",
"0.6685908",
"0.6663623",
"0.6652164",
"0.66403013",
"0.659667",
"0.65694433",
"0.6537061",
"0.65291524",
"0.65068334",
"0.6483941",
"0.6469675",
"0.6428583",
"0.6414852",
"0.6410903",
"0.63590187",
"0.63192147"
]
| 0.758258 | 0 |
add an edge from vertex f to vertex t with a cost | def add_edge(self, f, t, cost=0):
#if either vertex is not in the graph, returns an error
#if both vertices in the graph, adds the
# edge by making t a neighbor of f
#using the addNeighbor method of the Vertex class.
if (get_vertex(f) != None) and (get_vertex(t) != None):
self.vert_list[f].add_neighbor(t, cost)
self.vert_list[t].add_neighbor(f, cost)
else:
raise KeyError("F or T is not found") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_edge(self, f, t, cost=1):\n if f not in self.vertList:\n self.add_vertex(f)\n if t not in self.vertList:\n self.add_vertex(t)\n self.vertList[f].add_neighbor(self.vertList[t], cost)",
"def add_edge(self, from_vert, to_vert, cost=0):\n # if either vertex is not in the graph,\n # add it - or return an error (choice is up to you).\n if from_vert not in self.vert_dict or to_vert not in self.vert_dict:\n raise ValueError('vertexes not in graph')\n # if both vertices in the graph, add the\n # edge by making t a neighbor of f\n else:\n self.vert_dict[from_vert].add_neighbor(self.vert_dict[to_vert], cost)",
"def add_edge(self, src, dst, cost):\n self.E = self.E + 1\n self.adjacency_list[src].append((dst, cost))",
"def add_edge(self, frm, to, cost = {}):\n \n if frm not in self.vert_dict:\n raise ValueError('Node not found. Please add it using add_vertex method')\n if to not in self.vert_dict:\n raise ValueError('Node not found. Please add it using add_vertex method')\n \n src_node = self.vert_dict[frm]\n dst_node = self.vert_dict[to]\n \n if src_node.channel != dst_node.channel:\n raise ValueError('Edge should be from nodes using the same channel')\n \n src_node.add_neighbor(to, cost)\n dst_node.add_neighbor(frm, cost)",
"def add_edge(self, src, dest, cost=0):\n if src not in self.vertList:\n self.numVertices += 1\n self.vertList[src] = Vertex(src)\n if dest not in self.vertList:\n self.numVertices += 1\n self.vertList[dest] = Vertex(dest)\n self.vertList[src].add_neighbor(self.vertList[dest], cost)",
"def add_edge(self, v1, v2):\n pass # TODO",
"def addEdge(self, e):\n v = e.either()\n w = e.other(v)\n self._validateVertex(v)\n self._validateVertex(w)\n self._adj[v].add(e)\n self._adj[w].add(e)\n self._E += 1",
"def e(src, dst):\n edge = pydot.Edge(src, dst)\n graph.add_edge(edge)",
"def add_edge(self, u, v, val):\n raise NotImplementedError()",
"def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight",
"def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight\n self.edges[n2.identifier][n1.identifier] = weight",
"def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)",
"def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)",
"def add_edge(self, e):\n v, w = e\n self[v][w] = e\n self[w][v] = e",
"def energy_cost(edge):\n return edge_weight(edge) * 1.2",
"def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)",
"def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e",
"def _add_edge(src, dst):\n module_nodes[src].weights[dst] += 1\n module_nodes[dst].weights[src] += 1",
"def change_edge_cost(self, _from, _to, new_cost):\r\n if not self.is_edge_in_graph(_from, _to):\r\n raise GraphException(\"The edge does not exist in the graph.\")\r\n self.__cost[(_from, _to)] = new_cost\r\n self.__cost[(_to, _from)] = new_cost",
"def add_edge(self,source,target,weight):\n \n # Check if source exists already\n if source not in self.nodes.keys():\n self.add_node(source)\n if target not in self.nodes.keys():\n self.add_node(target)\n \n # Add target to adjacent nodes of source\n # If graph is not directed, add source to adjacent nodes of target\n self.nodes[source].add_neighbor(self.nodes[target])\n if not self.directed:\n self.nodes[target].add_neighbor(self.nodes[source])",
"def addEdge(self,u,v):\r\n self.graph[u].append(v)",
"def add_edge(self, edge):\n src = edge.get_source()\n dest = edge.get_destination()\n #weightEdge = WeightedEdge(src, dest, edge.get_total_distance(), edge.get_outdoor_distance())\n if not (src in self.edges and dest in self.edges):\n raise ValueError('Node not in graph')\n self.edges[src].append(dest)\n #self.edges[src].append(weightEdge)",
"def add_edge(self, src_key, dest_key, weight=1):\n self.vertices[src_key].add_neighbour(self.vertices[dest_key], weight)",
"def add_edge(self, u, v):\n self.graph[u].append(v)",
"def add_edge(self, s, e):\n self.graph[s].append(e)",
"def graph_increment_edge(graph, u, v, amount=1):\n\n if graph.has_edge(u, v):\n graph[u][v]['weight'] += amount\n else:\n graph.add_edge(u, v, weight=amount)",
"def add_edge(self, _from, _to, cost):\r\n if self.is_edge_in_graph(_from, _to) or self.is_edge_in_graph(_to, _from):\r\n raise GraphException(\"The edge already exists.\")\r\n if not self.is_vertex_in_graph(_from):\r\n raise GraphException(f\"The vertex {_from} does not exist in the graph.\")\r\n if not self.is_vertex_in_graph(_to):\r\n raise GraphException(f\"The vertex {_to} does not exist in the graph.\")\r\n self.__neighbours[_to].append(_from)\r\n if _to != _from: self.__neighbours[_from].append(_to)\r\n self.__cost[(_from, _to)] = cost",
"def add_edge_directed(u, v):\n adj[u].append(v)",
"def add_edge_directed(u, v):\n adj[u].append(v)",
"def _add_edge(self, actor, target):\n nodes = (actor, target)\n for i in (0, 1):\n self._vertices.setdefault(nodes[i], _Vertex()).add_neighbor(\n nodes[(i + 1) % 2])"
]
| [
"0.8412572",
"0.72365963",
"0.7025727",
"0.678826",
"0.66294044",
"0.6265532",
"0.6264762",
"0.6260604",
"0.623027",
"0.621927",
"0.61729574",
"0.6169615",
"0.6169615",
"0.61498934",
"0.6149495",
"0.611712",
"0.6066989",
"0.6052448",
"0.60479313",
"0.60100734",
"0.59923965",
"0.59766036",
"0.59688985",
"0.5964379",
"0.59390694",
"0.59290045",
"0.5913908",
"0.58998024",
"0.58998024",
"0.5895648"
]
| 0.80194545 | 1 |
The object pk value as a string. | def object_pk(self):
if self._wrapped not in (None, empty):
return str(self._wrapped.pk)
if '_object_pk' in self.__dict__:
return self.__dict__['_object_pk']
identifier = self._get_identifier()
if identifier:
# noinspection PyBroadException
try:
object_pk = identifier.split('.', 2)[-1]
if object_pk == 'None':
object_pk = None
self.__dict__['_object_pk'] = object_pk
return object_pk
except Exception:
pass
raise AttributeError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def object_key(self) -> str:\n return self._values.get('object_key')",
"def _get_obj_pk(self, obj):\n if self.use_natural_keys and hasattr(obj, 'natural_key'):\n raw_nat_key = obj.natural_key()\n obj_pk = smart_text(NATURAL_KEY_JOINER.join(raw_nat_key))\n keytype = 'natural'\n else:\n obj_pk = obj._get_pk_val()\n keytype = 'pk'\n\n return obj_pk, keytype",
"def id_str(self):\n if hasattr(self, 'id'):\n return str(self.id)\n else:\n return 'obj%s' % id(self)",
"def primary_key(self) -> str:\n return self.model._meta.pk.name # type: ignore",
"def key(self):\n return str(self._id)",
"def __str__(self):\n return '<{}>'.format(self.key.id())",
"def __str__(self):\n return '<{}>'.format(self.key.id())",
"def get_pk_name(cls):\n return cls._meta.pk_name",
"def pk(self):\n return getattr(self, self.schema.pk.name, None)",
"def __str__(self):\n return str(self._id())",
"def object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"object_id\")",
"def object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"object_id\")",
"def get_display_name(self):\n from django.utils.encoding import force_str\n return force_str(self.pk)",
"def get_id(self):\n return str(self._id)",
"def get_id(self):\n return str(self._id)",
"def stringKey(obj):\n unproxied = proxy.removeSecurityProxy(obj)\n mapper = orm.object_mapper(unproxied)\n #primary_key = mapper.primary_key_from_instance(unproxied)\n identity_values = [ getattr(unproxied, c.name) for c in mapper.primary_key ]\n identity_key = \"-\".join(map(str, identity_values))\n return \"obj-%s\" % (identity_key)",
"def pk(entity) -> str:\n for field in entity.modelfields.values():\n if field.primary: return field.name",
"def get_pk(self):\n return getattr(self, self.get_pk_name(), None)",
"def _cache_key(cls, pk, db):\r\n key_parts = ('o', cls._meta, pk, db)\r\n return ':'.join(map(encoding.smart_unicode, key_parts))",
"def get(self):\n return str(self.physical_key)",
"def __str__(self):\n return str(self.id)",
"def __str__(self):\n return str(self.id)",
"def __str__(self):\n return str(self.id)",
"def __str__(self):\n return str(self.id)",
"def keyify(content_type_pk, pk):\n return '%s:%s' % (content_type_pk, pk)",
"def get_id(self):\n return unicode(self.id)",
"def __str__(self) -> str:\n return self.id",
"def __str__(self):\n\n return str(self.id)",
"def __str__(self):\n\n return str(self.id)",
"def __str__(self):\n\n return str(self.id)"
]
| [
"0.73430043",
"0.7338839",
"0.73322386",
"0.72485405",
"0.71463984",
"0.70808476",
"0.70808476",
"0.7046806",
"0.7027561",
"0.701783",
"0.70041835",
"0.70041835",
"0.69952285",
"0.6962158",
"0.6962158",
"0.6951326",
"0.6927094",
"0.69219065",
"0.68189216",
"0.6818153",
"0.6807262",
"0.6807262",
"0.6807262",
"0.6807262",
"0.6788264",
"0.67655015",
"0.67394406",
"0.6726198",
"0.6726198",
"0.6726198"
]
| 0.75594234 | 0 |
Get or add a LazyModelObject instance to this dictionary. Accepts the same arguments as the LazyModelObject class. Returns a LazyModelObject instance. | def get_or_add(self, *args, **kwargs):
key = LazyModelObject.get_identifier(*args, **kwargs)
try:
return self[key]
except KeyError:
item = LazyModelObject(*args, **kwargs)
if not item:
item = None
self[key] = item
return item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_cached_instance(self):\n\n try:\n identifier = self._get_identifier()\n except (ValueError, ObjectDoesNotExist) as error:\n if self._fail_silently:\n return None\n raise LazyModelObjectError(exc=error) from error\n\n # Get the cache key, basically just namespacing the identifier\n cache_key = model_cache_key(identifier)\n\n cache, timeout = self._cache\n cace: BaseCache\n if cache_key in cache:\n instance = cache.get(cache_key)\n else:\n instance = self._get_instance(identifier)\n cache.set(cache_key, instance, timeout=timeout)\n\n if instance is None and not self._fail_silently:\n raise LazyModelObjectError(f'{identifier} not found.')\n return instance",
"def _add_object(self, name, model, *args, **kwargs):\n logger.debug('Adding object with name \"{}\" to model.'.format(name))\n obj = model(weakref.proxy(self), name, *args, **kwargs) # Add hidden hard reference\n self._objects.append(obj)\n return self.get_object(obj.name)",
"def __get__(self, model_instance, model_class):\n instance = None\n\n if model_instance is None:\n return self\n\n if hasattr(model_instance, self.__id_attr_name()):\n reference_id = getattr(model_instance, self.__id_attr_name())\n else:\n reference_id = None\n\n if reference_id is not None:\n\n resolved = getattr(model_instance, self.__resolved_attr_name())\n\n if resolved is not None:\n return resolved\n\n else:\n instance = memcache.get(str(reference_id) or '')\n instance = memcache.get(instance or '')\n\n if instance:\n # Convert to model from protobuf\n instance = db.model_from_protobuf(entity_pb.EntityProto(instance))\n\n # Check in DB after checking in memcache\n if not instance:\n instance = db.get(reference_id)\n\n '''\n # removed to make sure things keep running even after we screw with the db.\n if instance is None:\n raise ReferencePropertyResolveError(\n 'ReferenceProperty failed to be resolved: %s' %\n reference_id.to_path())\n '''\n\n if not instance:\n logging.error(\"The referenced object no longer exists; \"\n \"returning None.\")\n\n setattr(model_instance, self.__resolved_attr_name(), instance)\n return instance\n else:\n return None",
"def make_instance(self, data, **kwargs):\n instance = self.instance or self.get_instance(data)\n if instance is not None:\n for key, value in iteritems(data):\n setattr(instance, key, value)\n return instance\n kwargs, association_attrs = self._split_model_kwargs_association(data)\n instance = self.opts.model(**kwargs)\n for attr, value in iteritems(association_attrs):\n setattr(instance, attr, value)\n return instance",
"def get_cache(self, obj: Any) -> LazyPropertyCache:\n try:\n return getattr(obj, self.cache_name)\n except AttributeError: # need creation\n cache = self.LazyPropertyCache(self.requirements, {\"self\": obj})\n setattr(obj, self.cache_name, cache)\n return cache",
"def get_from_cache(self, **kwargs):\n if not self.cache_fields or len(kwargs) > 1:\n return self.get(**kwargs)\n\n pk_name = self.model._meta.pk.name\n key, value = kwargs.items()[0]\n\n # Kill __exact since it's the default behavior\n if key.endswith('__exact'):\n key = key.split('__exact', 1)[0]\n\n if key in self.cache_fields or key in ('pk', pk_name):\n cache_key = self._get_from_cache_key(**{key: value})\n\n retval = cache.get(cache_key)\n if retval is None:\n result = self.get(**kwargs)\n # Ensure we're pushing it into the cache\n self._post_save(instance=result)\n return result\n\n # If we didn't look up by pk we need to hit the reffed\n # key\n if key not in (pk_name, 'pk'):\n return self.get(pk=retval)\n\n return retval",
"def get(cls, obj: Model):\n return cache.get(cls._construct_key(obj))",
"def get_or_create(self, **kwargs):\n kwargs = self._preprocess(**kwargs)\n found = self.first(**kwargs)\n if found is not None:\n return found\n\n new = self.create(**kwargs)\n return new",
"def obj(self):\n if not self._obj:\n self._get()\n return self._obj",
"def _get_latest_model(cls, model, spec):\n if hasattr(model, \"KEY\") and model.KEY is not None:\n spec[\"content\"] = model\n model = cls\n elif hasattr(model, \"STRUCT\"):\n spec[\"content\"] = model.STRUCT\n else:\n # Is a dict\n spec[\"content\"] = model\n spec[\"object\"] = model\n return model",
"def initialize_model(self):\n model = self.model_class()\n return model",
"def load_obj(\n self, data: Dict[str, Any], *, many: bool, partial: bool\n ) -> Union[ObjType, dict]:\n if self.load_dataclass is True:\n return dataclass_from_dict(\n self.__model__, data, use_defaults=self.use_defaults\n )\n else:\n return data",
"def evaluate_lazy_object(obj):\n wrapped_obj = getattr(obj, LAZY_OBJECT_NAME, None)\n if wrapped_obj is None:\n # if it isn't a lazy object then just return the original object...\n return obj\n if wrapped_obj is uninitialized_lazy_object:\n # if it is a lazy object but, hasn't been initialized yet\n # then initialize it & return it\n obj._setup()\n return getattr(obj, LAZY_OBJECT_NAME)\n # return the lazy object...\n return wrapped_obj",
"def get_or_create(self, _cache=False, **kwargs):\n defaults = kwargs.pop('defaults', {})\n\n # before locking attempt to fetch the instance\n try:\n if _cache:\n return self.get_from_cache(**kwargs), False\n return self.get(**kwargs), False\n except self.model.DoesNotExist:\n pass\n lock_key = self._make_key('lock', kwargs)\n\n # instance not found, lets grab a lock and attempt to create it\n with Lock(lock_key):\n # its important we get() before create() to ensure that if\n # someone beat us to creating it from the time we did our very\n # first .get(), that we get the result back as we cannot\n # rely on unique constraints existing\n instance, created = super(BaseManager, self).get_or_create(defaults=defaults, **kwargs)\n\n return instance, created",
"def __call__(cls, *args, **kwargs):\n\n def new_instance():\n return super(SharedMemoryModelBase, cls).__call__(*args, **kwargs)\n\n instance_key = cls._get_cache_key(args, kwargs)\n # depending on the arguments, we might not be able to infer the PK, so in that case we\n # create a new instance\n if instance_key is None:\n return new_instance()\n cached_instance = cls.get_cached_instance(instance_key)\n if cached_instance is None:\n cached_instance = new_instance()\n cls.cache_instance(cached_instance, new=True)\n return cached_instance",
"def get_model(self) -> BaseLanguageModel:\n model = available_models[self.model_name.value]\n kwargs = model._lc_kwargs\n secrets = {secret: getattr(model, secret) for secret in model.lc_secrets.keys()}\n kwargs.update(secrets)\n\n model_kwargs = kwargs.get(\"model_kwargs\", {})\n for attr, value in self.dict().items():\n if attr == \"model_name\":\n # Skip model_name\n continue\n if hasattr(model, attr):\n # If the model has the attribute, add it to kwargs\n kwargs[attr] = value\n else:\n # Otherwise, add it to model_kwargs (necessary for chat models)\n model_kwargs[attr] = value\n kwargs[\"model_kwargs\"] = model_kwargs\n\n # Initialize a copy of the model using the config\n model = model.__class__(**kwargs)\n return model",
"def __getattribute__ (self, attr):\n attrib = object.__getattribute__(self, attr)\n if not isinstance (attrib, RField):\n return attrib\n if attr not in self.locals:\n self.locals.append (attr)\n if self.newobj:\n if self.keyvals.has_key (attr):\n return self.keyvals[attr]\n else:\n fieldobj = object.__getattribute__(self, attr)\n return fieldobj.default\n\n answer = r.get (self.prepare_key (attr, self.seq))\n fieldobj = object.__getattribute__(self, attr)\n if answer == None:\n answer = fieldobj.default\n else:\n if isinstance (fieldobj, ForeignKey):\n fkey = r.get (self.prepare_key ('__relationfor__', self.seq))\n cls = globals ()[fkey]\n return cls.objects.get (id = answer)\n\n return answer",
"def _get_model(\n self,\n model: t.Type[api.ModelMixins],\n start: bool = True,\n auth: t.Optional[AuthModel] = None,\n ) -> t.Any:\n if start:\n self.start()\n\n if model in self.API_CACHE:\n return self.API_CACHE[model]\n\n if not isinstance(auth, AuthModel):\n auth = self.AUTH\n\n self.API_CACHE[model] = model(auth=auth, log_level=self.API_LOG_LEVEL)\n return self.API_CACHE[model]",
"def get_object(self, id=None):\n if id is None and self.kwargs.get('field') == 'id':\n id = self.kwargs.get('constraint')\n self.object = self.get_model_obj().objects.get(pk=id)\n return self.object",
"def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError",
"def get_initial(self):\n obj = self.get_object()\n print(obj)\n if obj is not None:\n initial_data = model_to_dict(obj)\n print(initial_data)\n initial_data.update(model_to_dict(obj))\n print(initial_data)\n return initial_data\n else:\n return super().get_initial()",
"def get_initial(self):\n obj = self.get_object()\n print(obj)\n if obj is not None:\n initial_data = model_to_dict(obj)\n print(initial_data)\n initial_data.update(model_to_dict(obj))\n print(initial_data)\n return initial_data\n else:\n return super().get_initial()",
"def add(self, model):\n assert isinstance(model, self.model_class) # it's a homogeneous collection\n m_id = str(model.get_id())\n assert m_id != None # needs a real id or cid\n # If the models have already been loaded, verify the model being added is\n # not already in the set. This allows for create_child to be used before a potential\n # lazy load has happened, which might load the newly created child from the DB again.\n if self._loaded:\n assert m_id not in self._models # collision\n model._set_parent(self)\n self._models[m_id] = model\n return model",
"def get_or_create(cls, **kwargs):\n item = cls.query.filter_by(**kwargs).first()\n if not item:\n item = cls(**kwargs)\n db.session.add(item)\n db.session.commit()\n return item",
"def instance_for_model(self, model: AbstractPriorModel):\n try:\n if self.is_path_kwargs:\n return model.instance_from_path_arguments(self.kwargs)\n else:\n return model.instance_from_prior_name_arguments(self.kwargs)\n\n except KeyError:\n # TODO: Does this get used? If so, why?\n return model.instance_from_vector(self.parameter_lists_for_model(model))",
"def get_model(*args):\n return Model()",
"def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None",
"def add(self, *args, **kwargs):\n return self.load(*args, **kwargs)",
"def get_or_create_for_object(self, content_object, **kwargs):\r\n return self.get_or_create(**self._generate_object_kwarg_dict(content_object, **kwargs))",
"def __call__(self, *args, **kwargs):\n if not self.instance:\n self.instance = super().__call__(*args, **kwargs)\n return self.instance"
]
| [
"0.6156461",
"0.59348273",
"0.58452696",
"0.5716304",
"0.57067645",
"0.56040096",
"0.5588421",
"0.554282",
"0.5540866",
"0.54643005",
"0.5458968",
"0.54366755",
"0.5380811",
"0.5373615",
"0.5371358",
"0.5371091",
"0.5365681",
"0.5358472",
"0.5342425",
"0.5339498",
"0.53308314",
"0.53308314",
"0.53240883",
"0.53142166",
"0.53106844",
"0.53029394",
"0.5286668",
"0.5274037",
"0.52676135",
"0.52670413"
]
| 0.795623 | 0 |
Remove any matches (and it's entries from {matches}, {read_count} and {phreds}) that have 1 or more positions with a superPhred score < {min_phred_score} Returns count total number of reads removed count_unique total number of unique reads removed | def remove_low_quality_for_matched(matches, read_count, phreds, min_phred_score, ditched_f=None):
count = count_unique = 0
kk = matches.keys()
for k in kk:
m = matches[k]
if any( x < min_phred_score for x in phreds[m.read.tostring()] ):
count += read_count[m.read.tostring()]
count_unique += 1
if ditched_f is not None:
ditched_f.write("@{id}\n{seq}\n+{id}\n{qual}\n".format( id=k, seq=m.read, \
qual=m.quality ))
del matches[k]
del read_count[m.read.tostring()]
del phreds[m.read.tostring()]
return count, count_unique | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads",
"def remove_relative_frequent_words_below_score(score):\n print 'removing words with a relative frequency below: ', score\n global global_reduced_freqs\n\n for w, value in global_reduced_freqs.items():\n if value < score:\n del global_reduced_freqs[w]",
"def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert dataset to a list to make a separate copy, \n # otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(mutant) < min_readcount:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? MAYBE.",
"def purge_redundancy(scaff_list):\n for scaff in list(scaff_list):\n if len(scaff) < 4:\n scaff_list.remove(scaff)\n\n to_delete = [\"deleted\"] #place-marker for deleted scaffolds\n \n for n in range(0,(len(scaff_list)-1)):\n\n if scaff_list[n] != to_delete: \n n_core = scaff_list[n][1:-1]\n for m in range((n+1),len(scaff_list)):\n if scaff_list[m] != to_delete:\n m_core = scaff_list[m][1:-1]\n if list_in_list(m_core, scaff_list[n]):\n scaff_list[m] = to_delete\n elif list_in_list(n_core, scaff_list[m]):\n scaff_list[n] = to_delete\n \n if \"dummy\" in m_core[0]:\n if list_in_list([m_core[1]], scaff_list[n]) or list_in_list([m_core[2]], scaff_list[n]):\n scaff_list[m] = to_delete\n elif \"dummy\" in n_core[0]:\n if list_in_list([n_core[1]], scaff_list[m]) or list_in_list([n_core[2]], scaff_list[m]):\n scaff_list[n] = to_delete\n \n while to_delete in scaff_list:\n scaff_list.remove(to_delete)\n \n return scaff_list",
"def trim_quality(self, reads):\n cut = self.quality_cutoff * 3\n start = 0\n qscores = reads[0][3]\n qual = ord(qscores[0]) + ord(qscores[1]) + ord(qscores[2]) - 99\n while qual < cut:\n start += 1\n try:\n qual += ord(qscores[start + 2]) - ord(qscores[start - 1])\n except IndexError:\n break\n stop = len(qscores)\n qual = ord(qscores[-1]) + ord(qscores[-2]) + ord(qscores[-3]) - 99\n while qual < cut:\n stop -= 1\n try:\n qual += ord(qscores[stop - 3]) - ord(qscores[stop])\n except IndexError:\n break\n reads[0][1] = reads[0][1][start:stop]\n reads[0][3] = reads[0][3][start:stop]",
"def trim(self, ratio=10000):\n trimmed, total = 0, 0\n for sources in self.sources():\n for s in (self.tp_by_source_and_text[sources],\n self.fp_by_source_and_text[sources],\n self.fn_by_source_and_text[sources],\n self.overlap_by_source_and_text[sources]):\n try:\n max_count = s.most_common(1)[0][1]\n except IndexError:\n continue\n for k, v in list(s.items()):\n if v * ratio < max_count:\n trimmed += 1\n del s[k]\n total += 1\n print(f'trimmed {trimmed}/{total} ({trimmed/total:.1%})',\n file=sys.stderr, flush=True)",
"def remove_matching_reads(filename, cont_file):\n if not os.path.exists(cont_file + '.bwt'):\n cml = shlex.split('bwa index %s' % cont_file)\n subprocess.call(cml)\n cml = 'bwa mem -t 2 %s %s 2> /dev/null | samtools view -f 4 -h - | samtools bam2fq - ' % (cont_file, filename)\n cml += '| seqtk seq -A - > clean_reads.fasta'\n\n subprocess.call(cml, shell=True)\n return 'clean_reads.fasta'",
"def trim(args):\n\n from jcvi.algorithms.maxsum import max_sum\n\n p = OptionParser(trim.__doc__)\n p.add_option(\n \"-c\",\n dest=\"min_length\",\n type=\"int\",\n default=64,\n help=\"minimum sequence length after trimming\",\n )\n p.add_option(\"-s\", dest=\"score\", default=QUAL, help=\"quality trimming cutoff\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(p.print_help())\n\n fastafile, newfastafile = args\n qualfile = get_qual(fastafile)\n newqualfile = get_qual(newfastafile, check=False)\n\n logging.debug(\n \"Trim bad sequence from fasta file `%s` to `%s`\" % (fastafile, newfastafile)\n )\n\n fw = must_open(newfastafile, \"w\")\n fw_qual = open(newqualfile, \"w\")\n\n dropped = trimmed = 0\n\n for rec in iter_fasta_qual(fastafile, qualfile, modify=True):\n qv = [x - opts.score for x in rec.letter_annotations[\"phred_quality\"]]\n msum, trim_start, trim_end = max_sum(qv)\n score = trim_end - trim_start + 1\n\n if score < opts.min_length:\n dropped += 1\n continue\n\n if score < len(rec):\n trimmed += 1\n rec = rec[trim_start : trim_end + 1]\n\n write_fasta_qual(rec, fw, fw_qual)\n\n print(\"A total of %d sequences modified.\" % trimmed, file=sys.stderr)\n print(\n \"A total of %d sequences dropped (length < %d).\" % (dropped, opts.min_length),\n file=sys.stderr,\n )\n\n fw.close()\n fw_qual.close()",
"def prune(mapq=30):\n\n mkdir(PRUNE_DIR)\n\n #\n # samtools filters:\n # -f 3: keep properly paired and mapped reads\n # -F 4: filter out unmapped reads\n # -F 8: filter out unmapped mates\n # -F 256: filter out secondary reads\n # -F 1024: filter out duplicates marked by Picard above\n # -F 2048: filter out supplementary reads\n #\n\n template = \"\"\"samtools view -b -h -F 4 -F 256 -F 1024 -F 2048 -q {mapq} {input_bam} {autosomes} > {output_bam}; samtools index {output_bam}\"\"\"\n\n printp(\"\"\"\\n# drmr:label prune\\n\"\"\")\n printp(\"\"\"# drmr:job nodes=1 processors=1 memory=4g time_limit=4h working_directory={}\"\"\".format(PRUNE_DIR))\n printp(\"\"\"\\n#\\n# prune the BAM files with marked duplicates down to properly paired\"\"\")\n printp(\"\"\"# and mapped primary autosomal alignments of good quality, for peak calling\\n#\\n\"\"\")\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_bam = get_md_bam(sample, control = False) if x == 'treatment' else get_md_bam(sample, control = True)\n output_bam = get_pruned_bam(sample, control = False) if x == 'treatment' else get_pruned_bam(sample, control = True)\n autosomes = ' '.join(AUTOSOMAL_REFERENCES[get_genome(sample)])\n printp(template.format(**locals()), timed=True)\n\n printp(\"\"\"\\n# drmr:wait\"\"\")",
"def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc",
"def remove_mutants_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) >= readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def count_unique_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)",
"def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]",
"def remove_mutants_not_in_other_dataset(self, other_dataset, readcount_min=1, perfect_reads=False):\n # TODO do I want this to be based on non-exact position equality instead?\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert the iterator to a list to make a separate copy, \n # otherwise we'd be modifying the iterator while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(other_dataset.get_mutant(mutant.IB)) < readcount_min:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? PROBABLY.\n # LATER-TODO unit-test - it does have run-tests though.",
"def prune(pybel_list, min_RMSD):\n #Set up OBAling object\n align = openbabel.OBAlign()\n #Loop\n i = 0\n total_removed = 0\n while i < len(pybel_list):\n referens = pybel_list[i].OBMol #reference\n align.SetRefMol(referens)\n j = i + 1\n while j < len(pybel_list):\n target = pybel_list[j].OBMol #target\n align.SetTargetMol(target)\n #Align and ret rmsd\n if align.Align():\n rmsd = align.GetRMSD()\n if rmsd < min_RMSD:\n pybel_list.pop(j) #remove from both lists\n total_removed += 1\n else:\n j = j + 1\n else:\n print \"Couldn't align\"\n raise Exception()\n #end of inner loop\n i = i + 1\n #end of outer loop\n print \"finished deleting, total number of \\\n removed conformers is\", total_removed\n return pybel_list",
"def preprocess_matches(matches):\n good_matches = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good_matches.append(m)\n\n return good_matches",
"def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)",
"def _remove_tokens(tokenized_docs, counts, min_counts, max_counts):\n total_tokens_count = sum(\n count for token, count in counts.most_common()\n )\n print('total number of tokens:', total_tokens_count)\n\n unknown_tokens_count = sum(\n count for token, count in counts.most_common()\n if count < min_counts or count > max_counts\n )\n print('number of tokens to be removed:', unknown_tokens_count)\n\n keep = {}\n for token, count in counts.most_common():\n keep[token] = count >= min_counts and count <= max_counts\n\n return [(i, [t for t in doc if keep[t]]) for i, doc in tokenized_docs]",
"def remove_many(self, hashes):\n if isinstance(hashes, MinHash):\n self._methodcall(lib.kmerminhash_remove_from, hashes._objptr)\n else:\n self._methodcall(lib.kmerminhash_remove_many, list(hashes), len(hashes))",
"def prune_terms(docs, min_df=3):\n ###TODO\n final_list = []\n items_dict = defaultdict(lambda:0.0)\n for i in docs:\n for j in i:\n items_dict[j] = items_dict[j] + 1\n \n for i in docs:\n for j in list(i):\n if items_dict[j] < min_df:\n del i[j]\n if len(i) != 0:\n final_list.append(Counter(i))\n return final_list",
"def Clean(pmf):\n vals = [val for val in pmf.Values() if val < thresh]\n [pmf.Remove(val) for val in vals]",
"def check_seqs(fasta_out, fasta_files, starting_ix, valid_map, qual_mappings,\r\n filters, barcode_len, keep_primer, keep_barcode, barcode_type,\r\n max_bc_errors, retain_unassigned_reads, attempt_bc_correction,\r\n primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,\r\n reverse_primers, rev_primers, qual_out, qual_score_window=0,\r\n discard_bad_windows=False, min_qual_score=25, min_seq_len=200,\r\n median_length_filtering=None, added_demultiplex_field=None,\r\n reverse_primer_mismatches=0, truncate_ambi_bases=False):\r\n\r\n seq_lengths = {}\r\n\r\n # Record complete barcode + primer + sequence lengths\r\n raw_seq_lengths = {}\r\n # Record sequence lengths after all optional removal of components\r\n final_seq_lengths = {}\r\n\r\n bc_counts = defaultdict(list)\r\n curr_ix = starting_ix\r\n corr_ct = 0 # count of corrected barcodes\r\n\r\n # get the list of barcode lengths in reverse order\r\n barcode_length_order =\\\r\n sorted(set([len(bc.split(',')[0]) for bc in valid_map]))\r\n barcode_length_order = barcode_length_order[::-1]\r\n\r\n primer_mismatch_count = 0\r\n all_primers_lens = sorted(set(all_primers.values()))\r\n\r\n reverse_primer_not_found = 0\r\n\r\n sliding_window_failed = 0\r\n trunc_ambi_base_counts = 0\r\n\r\n below_seq_min_after_trunc = 0\r\n below_seq_min_after_ambi_trunc = 0\r\n\r\n for fasta_in in fasta_files:\r\n for curr_id, curr_seq in parse_fasta(fasta_in):\r\n curr_rid = curr_id.split()[0]\r\n curr_seq = upper(curr_seq)\r\n\r\n curr_len = len(curr_seq)\r\n curr_qual = qual_mappings.get(curr_rid, None)\r\n\r\n # if qual_out:\r\n # curr_qual_out_score = \\\r\n # \"%2.2f\" % float(float(sum(curr_qual))/float(len(curr_qual)))\r\n seq_lengths[curr_rid] = curr_len\r\n failed = False\r\n\r\n for f in filters:\r\n failed = failed or f(curr_rid, curr_seq, curr_qual)\r\n if failed: # if we failed any of the checks, bail out here\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if barcode_type == 'variable_length':\r\n # Reset the raw_barcode, raw_seq, and barcode_len -- if\r\n # we don't match a barcode from the mapping file, we want\r\n # these values to be None\r\n raw_barcode, raw_seq, barcode_len = (None, None, None)\r\n\r\n curr_valid_map =\\\r\n [curr_bc.split(',')[0] for curr_bc in valid_map]\r\n # Iterate through the barcode length from longest to shortest\r\n for l in barcode_length_order:\r\n # extract the current length barcode from the sequence\r\n bc, seq = get_barcode(curr_seq, l)\r\n # check if the sliced sequence corresponds to a valid\r\n # barcode, and if so set raw_barcode, raw_seq, and\r\n # barcode_len for use in the next steps\r\n if bc in curr_valid_map:\r\n raw_barcode, raw_seq = bc, seq\r\n barcode_len = len(raw_barcode)\r\n break\r\n # if we haven't found a valid barcode, log this sequence as\r\n # failing to match a barcode, and move on to the next sequence\r\n if not raw_barcode:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n else:\r\n # Get the current barcode to look up the associated primer(s)\r\n raw_barcode, raw_seq = get_barcode(curr_seq, barcode_len)\r\n\r\n if not disable_primer_check:\r\n try:\r\n current_primers = primer_seqs_lens[raw_barcode]\r\n # In this case, all values will be the same, i.e. the length\r\n # of the given primer, or degenerate variations thereof.\r\n primer_len = current_primers.values()[0]\r\n\r\n if primer_exceeds_mismatches(raw_seq[:primer_len],\r\n current_primers, max_primer_mm):\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except KeyError:\r\n # If the barcode read does not match any of those in the\r\n # mapping file, the situation becomes more complicated. We do\r\n # not know the length the sequence to slice out to compare to\r\n # our primer sets, so, in ascending order of all the given\r\n # primer lengths, a sequence will the sliced out and compared\r\n # to the primer set.\r\n current_primers = all_primers\r\n found_match = False\r\n for seq_slice_len in all_primers_lens:\r\n if not(\r\n primer_exceeds_mismatches(raw_seq[:seq_slice_len],\r\n current_primers, max_primer_mm)):\r\n primer_len = seq_slice_len\r\n found_match = True\r\n break\r\n if not found_match:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n primer_mismatch_count += 1\r\n continue\r\n except IndexError:\r\n # Try to raise meaningful error if problem reading primers\r\n raise IndexError('Error reading primer sequences. If ' +\r\n 'primers were purposefully not included in the mapping ' +\r\n 'file, disable usage with the -p option.')\r\n else:\r\n # Set primer length to zero if primers are disabled.\r\n primer_len = 0\r\n\r\n # split seqs\r\n cbc, cpr, cres = split_seq(curr_seq, barcode_len,\r\n primer_len)\r\n\r\n total_bc_primer_len = len(cbc) + len(cpr)\r\n\r\n # get current barcode\r\n try:\r\n bc_diffs, curr_bc, corrected_bc = \\\r\n check_barcode(cbc, barcode_type, valid_map.keys(),\r\n attempt_bc_correction, added_demultiplex_field, curr_id)\r\n if bc_diffs > max_bc_errors:\r\n raise ValueError(\"Too many errors in barcode\")\r\n corr_ct += bool(corrected_bc)\r\n except Exception as e:\r\n bc_counts[None].append(curr_rid)\r\n continue\r\n\r\n curr_samp_id = valid_map.get(curr_bc, 'Unassigned')\r\n\r\n new_id = \"%s_%d\" % (curr_samp_id, curr_ix)\r\n # check if writing out primer\r\n write_seq = cres\r\n\r\n if reverse_primers == \"truncate_only\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n except KeyError:\r\n pass\r\n elif reverse_primers == \"truncate_remove\":\r\n try:\r\n rev_primer = rev_primers[curr_bc]\r\n mm_tested = {}\r\n for curr_rev_primer in rev_primer:\r\n # Try to find lowest count of mismatches for all\r\n # reverse primers\r\n rev_primer_mm, rev_primer_index = \\\r\n local_align_primer_seq(curr_rev_primer, cres)\r\n mm_tested[rev_primer_mm] = rev_primer_index\r\n\r\n rev_primer_mm = min(mm_tested.keys())\r\n rev_primer_index = mm_tested[rev_primer_mm]\r\n if rev_primer_mm <= reverse_primer_mismatches:\r\n write_seq = write_seq[0:rev_primer_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + rev_primer_index]\r\n else:\r\n reverse_primer_not_found += 1\r\n write_seq = False\r\n except KeyError:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n # Check for quality score windows, truncate or remove sequence\r\n # if poor window found. Previously tested whole sequence-now\r\n # testing the post barcode/primer removed sequence only.\r\n if qual_score_window:\r\n passed_window_check, window_index =\\\r\n check_window_qual_scores(curr_qual, qual_score_window,\r\n min_qual_score)\r\n # Throw out entire sequence if discard option True\r\n if discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = False\r\n # Otherwise truncate to index of bad window\r\n elif not discard_bad_windows and not passed_window_check:\r\n sliding_window_failed += 1\r\n write_seq = write_seq[0:window_index]\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + window_index]\r\n # Check for sequences that are too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_trunc += 1\r\n\r\n if truncate_ambi_bases and write_seq:\r\n write_seq_ambi_ix = True\r\n # Skip if no \"N\" characters detected.\r\n try:\r\n ambi_ix = write_seq.index(\"N\")\r\n write_seq = write_seq[0:ambi_ix]\r\n except ValueError:\r\n write_seq_ambi_ix = False\r\n pass\r\n if write_seq_ambi_ix:\r\n # Discard if too short after truncation\r\n if len(write_seq) + total_bc_primer_len < min_seq_len:\r\n write_seq = False\r\n below_seq_min_after_ambi_trunc += 1\r\n else:\r\n trunc_ambi_base_counts += 1\r\n if qual_out:\r\n curr_qual = curr_qual[0:barcode_len +\r\n primer_len + ambi_ix]\r\n\r\n # Slice out regions of quality scores that correspond to the\r\n # written sequence, i.e., remove the barcodes/primers and reverse\r\n # primers if option is enabled.\r\n if qual_out:\r\n qual_barcode, qual_primer, qual_scores_out = \\\r\n split_seq(curr_qual, barcode_len, primer_len)\r\n # Convert to strings instead of numpy arrays, strip off\r\n # brackets\r\n qual_barcode = format_qual_output(qual_barcode)\r\n qual_primer = format_qual_output(qual_primer)\r\n qual_scores_out = format_qual_output(qual_scores_out)\r\n\r\n if not write_seq:\r\n bc_counts['#FAILED'].append(curr_rid)\r\n continue\r\n\r\n if keep_primer:\r\n write_seq = cpr + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_primer + qual_scores_out\r\n if keep_barcode:\r\n write_seq = cbc + write_seq\r\n if qual_out:\r\n qual_scores_out = qual_barcode + qual_scores_out\r\n\r\n # Record number of seqs associated with particular barcode.\r\n bc_counts[curr_bc].append(curr_rid)\r\n\r\n if retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n elif not retain_unassigned_reads and curr_samp_id == \"Unassigned\":\r\n bc_counts['#FAILED'].append(curr_rid)\r\n else:\r\n fasta_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\\n\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))\r\n if qual_out:\r\n qual_out.write(\r\n \">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\\n%s\" %\r\n (new_id, curr_rid, cbc, curr_bc, int(bc_diffs),\r\n qual_scores_out))\r\n\r\n curr_len = len(write_seq)\r\n\r\n #seq_lengths[curr_rid] = curr_len\r\n\r\n curr_ix += 1\r\n\r\n # Record the raw and written seq length of everything passing\r\n # filters\r\n raw_seq_lengths[curr_rid] = len(curr_seq)\r\n final_seq_lengths[curr_id] = curr_len\r\n\r\n if median_length_filtering:\r\n # Read original fasta file output to get sequence lengths\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Record sequence lengths for median/mad calculation\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_out):\r\n sequence_lens.append(len(seq))\r\n\r\n '''# Create a temporary file to copy the contents of the fasta file, will\r\n # need to delete once operations complete.\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"w\")\r\n\r\n sequence_lens = []\r\n for label, seq in parse_fasta(fasta_lens):\r\n sequence_lens.append(len(seq))\r\n fasta_temp.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n fasta_temp.close()\r\n fasta_temp = open(fasta_out.name + \"_tmp.fasta\", \"U\")\r\n\r\n fasta_lens.close()\r\n # Overwrite seqs.fna with length filtered data\r\n fasta_out = open(fasta_out.name, \"w\")'''\r\n\r\n med_abs_dev, med_length = median_absolute_deviation(sequence_lens)\r\n\r\n min_corrected_len = med_length - med_abs_dev *\\\r\n float(median_length_filtering)\r\n max_corrected_len = med_length + med_abs_dev *\\\r\n float(median_length_filtering)\r\n seqs_discarded_median = 0\r\n\r\n fasta_out.seek(0)\r\n\r\n final_written_lens = []\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n curr_len = len(seq)\r\n if curr_len < min_corrected_len or curr_len > max_corrected_len:\r\n seqs_discarded_median += 1\r\n else:\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n final_written_lens.append(len(seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n else:\r\n min_corrected_len = 0\r\n max_corrected_len = 0\r\n seqs_discarded_median = 0\r\n final_written_lens = 0\r\n\r\n # Copy tmp seqs file to final seqs.fna file\r\n fasta_out.close()\r\n fasta_out = open(fasta_out.name, \"U\")\r\n\r\n # Create final seqs.fna\r\n final_fasta_out = open(fasta_out.name.replace('.tmp', ''), \"w\")\r\n\r\n for label, seq in parse_fasta(fasta_out):\r\n final_fasta_out.write(\">%s\\n%s\\n\" % (label, seq))\r\n\r\n final_fasta_out.close()\r\n fasta_out.close()\r\n remove_files([fasta_out.name])\r\n\r\n median_results = (median_length_filtering, min_corrected_len,\r\n max_corrected_len, seqs_discarded_median, final_written_lens)\r\n\r\n raw_seq_lengths = raw_seq_lengths.values()\r\n final_seq_lengths = final_seq_lengths.values()\r\n\r\n log_out = format_log(bc_counts, corr_ct, valid_map, seq_lengths, filters,\r\n retain_unassigned_reads, attempt_bc_correction, primer_mismatch_count,\r\n max_primer_mm, reverse_primers, reverse_primer_not_found,\r\n sliding_window_failed, below_seq_min_after_trunc, qual_score_window,\r\n discard_bad_windows, min_seq_len, raw_seq_lengths,\r\n final_seq_lengths, median_results, truncate_ambi_bases,\r\n below_seq_min_after_ambi_trunc, )\r\n\r\n #all_seq_lengths, good_seq_lengths = get_seq_lengths(seq_lengths, bc_counts)\r\n\r\n return log_out, seq_lengths.values(), raw_seq_lengths, final_seq_lengths",
"def prune(candidate_aspect_list, min_sup):\n l_k = deepcopy(candidate_aspect_list)\n for key, value in list(l_k.items()):\n if value < min_sup:\n del l_k[key]\n return l_k",
"def remove_xmatch_multiples(index_primary, index_secondary, d2d, d3d, retain_best_match=True,\n verbose=False):\n n_entries = len(index_primary)\n n_unique_entries = len(np.unique(index_primary))\n if verbose:\n print('xmatch: cleanMultipleCrossMatches: there are {0:d} unique entries in the primary '\n 'catalog'.format(n_unique_entries))\n\n if n_entries != n_unique_entries:\n # get array of unique indices (here like identifier) and the corresponding occurrence count\n unique_array, return_counts = np.unique(index_primary, return_counts=True)\n\n # indices in unique array of stars with multiple crossmatches\n index_in_unique_array = np.where(return_counts > 1)[0]\n number_of_stars_with_multiplematches = len(index_in_unique_array)\n if verbose:\n if retain_best_match:\n print('xmatch: result contains {} multiple matches affecting {} entries, '\n 'keeping closest match'.format(number_of_stars_with_multiplematches,\n n_entries - n_unique_entries))\n else:\n print('xmatch: result contains {} multiple matches affecting {} entries, removing '\n 'them all'.format(number_of_stars_with_multiplematches,\n n_entries - n_unique_entries))\n\n # identifiers (i.e. numbers in index_primary) of stars with multiple crossmatches\n multi_matches = unique_array[index_in_unique_array]\n\n # index in index_primary where multiple matches occur\n index_multiple_match = np.where(np.in1d(index_primary, multi_matches))[0]\n if verbose:\n print(d2d[index_multiple_match], index_primary[index_multiple_match], index_secondary[index_multiple_match])\n\n good_index_in_index_primary = np.zeros(number_of_stars_with_multiplematches)\n for ii, jj in enumerate(multi_matches):\n tmp_idx0 = np.where(index_primary[index_multiple_match] == jj)[0]\n tmp_idx1 = np.argmin(d2d[index_multiple_match][tmp_idx0])\n good_index_in_index_primary[ii] = index_primary[index_multiple_match][tmp_idx0[tmp_idx1]]\n\n if retain_best_match:\n index_to_remove = np.setdiff1d(index_multiple_match, good_index_in_index_primary)\n else:\n index_to_remove = index_multiple_match\n\n mask = np.ones(len(index_primary), dtype=bool)\n mask[index_to_remove] = False\n index_primary = index_primary[mask]\n index_secondary = index_secondary[mask]\n d2d = d2d[mask]\n d3d = d3d[mask]\n if len(index_primary) != len(np.unique(index_primary)):\n print('xmatch: Multiple match cleanup procedure failed')\n # get array of unique indices (here like identifier) and the corresponding occurrence count\n unique_array, return_counts = np.unique(index_primary, return_counts=True)\n\n # indices in unique array of stars with multiple crossmatches\n index_in_unique_array = np.where(return_counts > 1)[0]\n number_of_stars_with_multiplematches = len(index_in_unique_array)\n print('xmatch: result still contains {} multiple matches'.format(\n number_of_stars_with_multiplematches))\n\n return index_primary, index_secondary, d2d, d3d",
"def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")",
"def CalculateRoc2(dataArray,prefix,readsize,uniquehits,mappedreads,filename):\r\n starttime= time.time()\r\n uniquehits = float(uniquehits)\r\n readsize = float(readsize)\r\n \r\n \r\n entries = len(dataArray)\r\n \r\n\r\n resultmatrix = np.arange(entries*2)\r\n resultmatrix = resultmatrix.reshape(2,entries)\r\n \r\n maxrq = max(x.rq for x in dataArray)\r\n maxnm = max(x.nm[0] for x in dataArray)\r\n maxGaps= max(x.gaps[0] for x in dataArray)\r\n maxMism= max(x.mism[0] for x in dataArray)\r\n \r\n \r\n minrq = min(x.rq for x in dataArray)\r\n minnm = min(x.nm[0] for x in dataArray)\r\n minmq= min(x.mq[0] for x in dataArray)\r\n minGaps= min(x.gaps[0] for x in dataArray) \r\n minMism= min(x.mism[0] for x in dataArray) \r\n \r\n \r\n # adjust stepsize for rq since the score behaves the other way\r\n quants = [1,2,3,4,5]\r\n tempa = maxrq-minrq\r\n stepsize = tempa/5\r\n \r\n rqQuants = [round(minrq+(i-1)*stepsize,3) for i in quants]\r\n rqQuants.reverse()\r\n rqQuants[-1] =0 # last entry is rounded bigger than the smallest in the dataset\r\n \r\n nmQuants = [i*maxnm/5 for i in quants]\r\n GapsQuants = [i*maxGaps/5 for i in quants]\r\n MismQuants = [i*maxMism/5 for i in quants]\r\n\r\n rocvector = []\r\n \r\n # i = NM,l = RQ, k = MQ\r\n for l in quants: # RQ\r\n for k in quants: # GAPS\r\n for j in quants: # MISMATCH\r\n temparray = [m for m in dataArray if m.gaps[0] <= GapsQuants[k-1] and m.mism[0] <= MismQuants[j-1] and m.rq >=rqQuants[l-1]]\r\n \r\n\r\n tempids = [m.id for m in temparray]\r\n uniquereads = {}\r\n for i in xrange(0,len(tempids)):\r\n uniquereads[tempids[i]] = \"\"\r\n\r\n mappedreads = len(uniquereads)\r\n \r\n \r\n \r\n templength = len(temparray)\r\n \r\n if templength == 0:\r\n continue\r\n else:\r\n tempTP = sum(x.mr[0] for x in temparray)\r\n tempFP =templength-tempTP\r\n F = round((float(mappedreads)/ readsize) ,3)\r\n sens = round((tempTP/ uniquehits) * F,3)\r\n if tempFP == 0:\r\n spec = 0\r\n else:\r\n spec = round((tempFP / uniquehits) * F,3) \r\n \r\n rocvector.append([rqQuants[l-1],GapsQuants[k-1],MismQuants[j-1],tempTP,tempFP,templength,sens,spec,F])\r\n \r\n #print (\"%d\\t%d\\t%d\\t\" % (templength,tempTP,tempFP))\r\n\r\n #0 = NM 4 = TP 7 = sens\r\n #1 = RQ 5 = FP 8 = 1-spec\r\n #2 = GAPS 6 = P 9 = F\r\n #append needed for last entry in AUC calculation\r\n rocvector.append([0,0,0,0,0,0,0,0,0]) \r\n nproc = np.array(rocvector)\r\n \r\n #write the sens and specificity values from nproc according to the enumeration in line 149. \r\n #specificity is in cell -2\r\n # sensitivity is in cell -3\r\n sens = [i[-3] for i in nproc]\r\n spez = [i[-2] for i in nproc]\r\n \r\n # adjust ROC curve. It is necessary that it the 1-specificity ends in 1.\r\n # for the last record copy the predecessor in sens to it\r\n # and write 1 to specificity \r\n spez[-1] = 1\r\n sens[-1] = sens[-2]\r\n \r\n\r\n rocarray1 = np.array([sens,spez])\r\n rocarray1 = rocarray1.flatten('F')\r\n rocarray1= rocarray1.reshape((len(spez),2))\r\n \r\n rocarray = np.array([sens,spez])\r\n rocarray = rocarray.flatten('F')\r\n rocarray = rocarray.reshape((len(spez),2))\r\n rocarray = np.sort(rocarray.view('float,float'), order=['f0','f1'], axis=0).view(np.float)\r\n \r\n rocarrayCorrected = rocarray\r\n \r\n #print rocarrayCorrected\r\n # project points where...\r\n for m in range(len(rocarrayCorrected)-2,-1,-1):\r\n if (rocarrayCorrected[m,1] >= rocarrayCorrected[m+1,1]):\r\n rocarrayCorrected[m,1] = rocarrayCorrected[m+1,1]\r\n\r\n \r\n #print rocarrayCorrected \r\n plt.hold(True)\r\n plt.figure()\r\n plt.subplot(111)\r\n #plt.scatter(spez, sens, c='b', marker='o', facecolor='red')\r\n #plt.plot(rocarray[:,1], rocarray[:,0]\r\n plt.plot(rocarrayCorrected[:,1],rocarrayCorrected[:,0], marker='o', markersize=7,linestyle='--', color='r', label='projected')\r\n plt.plot(rocarray1[:,1], rocarray1[:,0], linestyle=\"None\",label='real',marker='.',color='g')\r\n plt.xlabel('1-specificity')\r\n plt.ylabel('sensitivity')\r\n plt.title(r'ROC:'+filename)\r\n plt.axis([-0.1,1.1,-0.1,1.1])\r\n plt.grid(True)\r\n plt.legend(loc='lower right')\r\n plt.tight_layout()\r\n plt.savefig(prefix + \"_ROC.pdf\",format='pdf')\r\n plt.clf \r\n \r\n \r\n AUC = trapezoidal_rule(rocarrayCorrected[:,1], rocarrayCorrected[:,0])\r\n \r\n fobj = open(prefix+\"_roctable.txt\",\"w\")\r\n fobj.write(\"RQ\\tGAPS\\tMM\\tPTP\\tFP\\tP\\tSn\\t1-Sp\\tF\\r\\n\")\r\n for i in xrange(0,len(rocvector),1):\r\n temp = [str(k) for k in rocvector[i]]\r\n tempstr = \"\\t\".join(temp)\r\n fobj.write(tempstr+\"\\r\\n\")\r\n\r\n endtime= time.time()\r\n return(round(AUC,3))",
"def prune(self, chrom, alleles):\n\n # Eliminate extinct alleles, which do not appear in the list\n # of ancestral alleles for the extant alleles.\n if len(alleles) == 1:\n self.clear()\n return\n new_hist = dict((allele, self._hist[chrom][allele])\n for allele in self._hist[chrom].keys()\n if allele in alleles)\n required_alleles = list(set([allele\n for anc_alleles in new_hist.values()\n for allele in anc_alleles]))\n required_alleles.sort()\n\n extant = new_hist.keys()\n sextant = set(extant)\n unneeded = set(new_hist[extant[0]])\n for hist in [new_hist[key] for key in extant[1:]]:\n unneeded = unneeded.intersection(hist)\n unneeded = list(unneeded)\n unneeded.sort()\n unneeded.pop()\n required_alleles = [allele\n for allele in required_alleles\n if allele not in unneeded and\n allele not in sextant]\n\n required_alleles.extend(\n [extant_allele for extant_allele in extant])\n required_alleles = list(set(required_alleles))\n required_alleles.sort()\n\n new_hist = dict((key,\n [allele for allele in new_hist[key]\n if allele in required_alleles])\n for key in extant)\n\n # Construct a mapping of old ids to new ids.\n name_map = dict((orig_name, new_name)\n for new_name, orig_name\n in enumerate(required_alleles))\n\n # Replace names in history\n self._hist[chrom] = dict((name_map[name],\n [None] +\n [name_map[anc] for anc in new_hist[name]])\n for name in extant)\n self._max[chrom] = max(self._hist[chrom]) + 1\n\n return name_map",
"def test_filter_remove(self):\n words = ['cart', 'fate', 'date', 'daft']\n filtered = filter_scores(score_words(words), 'fate', -1)\n self.assertEqual([(8, 'daft'), (7, 'date'), (7, 'cart')], filtered)",
"def _filter_subreads(self):\n logging.info(\"Start to filter subreads in fofn.\")\n if op.exists(self.ori_all_reads_fasta) and self.force_redo is not True:\n msg = \"{fa} already exists, skip pls2fasta\".format(fa=self.ori_all_reads_fasta)\n logging.warn(msg)\n else:\n logging.debug(\"{f} does not exist, call pls2fasta\".\n format(f=self.ori_all_reads_fasta))\n filter_summary = op.join(self.filtered_region_dir,\n \"filtered_summary.csv\")\n cmd = \"filter_plsh5.py --debug \" + \\\n \"--filter='MinReadScore=0.80,MinSRL=500,MinRL=100' \" + \\\n \"--trim='True' --outputDir={fr} \".format(\n fr=self.filtered_region_dir) + \\\n \"--outputSummary={sm} \".format(sm=filter_summary) + \\\n \"--outputFofn={rgn} \".format(rgn=self.region_fofn) + \\\n \"{in_fofn}\".format(in_fofn=self.input_fofn)\n logging.info(\"CMD: {cmd}\".format(cmd=cmd))\n _o, _c, _m = backticks(cmd)\n if _c != 0:\n raise RuntimeError(\"CMD failed. \" + str(_o) + ' ' + str(_m))\n\n cmd = \"pls2fasta -trimByRegion \" + \\\n \"-regionTable {rgn} \".format(rgn=self.region_fofn) + \\\n \"{fofn} {fa} \".format(fofn=self.input_fofn,\n fa=self.ori_all_reads_fasta)\n logging.info(\"CMD: {cmd}\".format(cmd=cmd))\n _o, _c, _m = backticks(cmd)\n if _c != 0:\n raise RuntimeError(\"CMD failed. \" + str(_o) + ' ' + str(_m))\n logging.info(\"{f} created.\".format(f=self.ori_all_reads_fasta))\n\n logging.debug(\"Copying {ori_f} to {f}.\".format(\n ori_f=self.ori_all_reads_fasta, f=self.all_reads_fasta))\n shutil.copyfile(self.ori_all_reads_fasta, self.all_reads_fasta)",
"def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)"
]
| [
"0.65910244",
"0.5650101",
"0.5642639",
"0.5534765",
"0.5399435",
"0.5347214",
"0.53100437",
"0.5299977",
"0.5246292",
"0.52276933",
"0.51281446",
"0.51219064",
"0.5101029",
"0.5089264",
"0.50758624",
"0.5075226",
"0.50660384",
"0.50597763",
"0.50569576",
"0.49090847",
"0.49028185",
"0.48975137",
"0.4878839",
"0.4878135",
"0.48767442",
"0.48742893",
"0.48483056",
"0.4834143",
"0.4820491",
"0.48193756"
]
| 0.7690688 | 0 |
Counts the total number of reads in M | def count(self):
return sum(read.copy for read in self.__iter__()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_count_reads_in_region_total(self):\n self.c.skipZeros = False\n self.c.stepSize = 200\n self.c.binLength = 200\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n nt.assert_equal(resp, np.array([[2, 4.]]))",
"def count_single_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)",
"def count_unique_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)",
"def map_count(filename):\n f = open(filename, \"r+\")\n buf = mmap.mmap(f.fileno(), 0)\n lines = 0\n readline = buf.readline\n while readline():\n lines += 1\n return lines",
"def test_count_total_reads(self):\n \n bam = pybedtools.BedTool(clipper.test_file(\"allup_test.bam\"))\n gene_dfn = pybedtools.BedTool(clipper.test_file(\"hg19_genes.bed\"))\n \n result = count_total_reads(bam, gene_dfn)\n \n self.assertEqual(result, 2086)",
"def count():",
"def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads",
"def count_mapped_bases(bam):\n\n for read in open_bam(bam):\n if not read.is_secondary:\n count = Counter(read.query_alignment_sequence)\n yield(count)",
"def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)",
"def count_allbest_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)",
"def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number",
"def count(self):\n return len(self.read_ints())",
"def reads_in_chromosome(self, chromosome):\n return sum(m.read_info(self.dataset_name).total_read_count \n for m in self.dataset if m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)",
"def count_number_of_reads(filename: Path) -> int:\n\tif filename.suffix == '.gz':\n\t\tcommand = f\"zcat {filename}\"\n\telse:\n\t\tcommand = f\"cat {filename}\"\n\tprocess = subprocess.Popen(command.split(), stdout = subprocess.PIPE)\n\toutput = subprocess.check_output([\"wc\", \"-l\"], stdin = process.stdout)\n\n\treads = int(output.strip()) / 4\n\treturn int(reads)",
"def get_count(bam, max_workers):\n print (\"Count total number of paired reads in %s ...\"%bam)\n cmd = ['samtools','view','-c','-f', '3','-@',str(max_workers),bam]\n out, err = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout=subprocess.PIPE).communicate()\n return int(out.split()[0])",
"def getReadCounts( counts, htList ):\n for ht in htList:\n htseqName = ht\n \n # calculate the total number of aligned reads \n totalReads = 0\n alignedReads = 0\n \n # sum read counts\n with open(htseqName,'r') as htseq:\n for x in htseq:\n x = x.strip() # get rid of that pesky newline\n row = x.split('\\t')\n totalReads += float(row[1])\n if x.startswith('__'):\n continue\n else:\n alignedReads += float(row[1])\n \n percentAligned = (alignedReads/totalReads) * 100\n counts[ht].append(totalReads)\n counts[ht].append(alignedReads)\n counts[ht].append(percentAligned)",
"def written_reads(self) -> int:\n return sum(self._written_lengths1.values())",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def getNumMaps(self):\n try:\n return self._numCMAP\n except AttributeError:\n pass\n flag = 'CMAP_COUNT'\n if flag not in self._raw_data and self.chamber:\n flag = 'CHARMM_CMAP_COUNT'\n if flag in self._raw_data:\n self._numCMAP = int(self._raw_data[flag][1])\n return self._numCMAP\n return 0",
"def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count",
"def count(r, c, k):\n\t\tif 0 <= r < M and 0 <= c < N:\n\t\t\tif A[r][c] == target[k]:\n\t\t\t\t\"*** YOUR CODE HERE ***\"\n\t\t\t\treturn memoized_count(r, c, k)\n\t\t\telse:\n\t\t\t\treturn 0\n\t\telse:\n\t\t\treturn 0",
"def read_count(self):\n return self._read_count",
"def count_reads(titer):\n c = Counter()\n for template in titer:\n for mate in template:\n for cat in (mate['cat_list'] or ['nocat']):\n c[cat] += 1\n return c",
"def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def test_count_reads_in_region_extension_1(self):\n self.c = cr.CountReadsPerBin([self.bamFile1, self.bamFile2],\n binLength=1,\n stepSize=50,\n extendReads=25)\n\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n\n nt.assert_equal(resp, np.array([[0, 0.],\n [0, 1.],\n [1, 1.],\n [1, 2.]]))",
"def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)",
"def get_marble_count(self):"
]
| [
"0.662604",
"0.66033",
"0.64993536",
"0.64116496",
"0.63783777",
"0.6340389",
"0.6309031",
"0.6276263",
"0.6247988",
"0.61803967",
"0.6126196",
"0.61252856",
"0.6114868",
"0.6101939",
"0.6043354",
"0.601563",
"0.60091496",
"0.5887007",
"0.5887007",
"0.5887007",
"0.5887007",
"0.5865512",
"0.58649534",
"0.5846263",
"0.5803514",
"0.578201",
"0.5753723",
"0.57534784",
"0.57510954",
"0.572877"
]
| 0.6607064 | 1 |
Format warnings for printing. Returns a list of warning strings with indentation. | def toStringList( self, indent='', dIndent=' ' ):
s = ['%s%s' % (indent, self.message)]
for warning in self.warningList:
s += warning.toStringList(indent + dIndent)
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pretty_print(self, warnings=False):\n msg = []\n if (warnings) and (len(self.warnings) > 0):\n msg.append(u\"Warnings:\")\n for warning in self.warnings:\n msg.append(u\" %s\" % warning)\n if len(self.errors) > 0:\n msg.append(u\"Errors:\")\n for error in self.errors:\n msg.append(u\" %s\" % error)\n return u\"\\n\".join(msg)",
"def get_warning(self) -> List[str]:\n return []",
"def get_warning(self) -> List[str]:\n return []",
"def format_warn(self, *args):\n if self._pretty:\n return self.format_multiline_message(*args, color='magenta', start='[WARN] ', multiline=' ~~ ')\n return self.format_multiline_message(*args)",
"def warnings(self) -> List[Error]:",
"def get_warning_msgs():\n return sorted(TypeWarning.warnings, key=lambda warning: warning.localization.line)",
"def create_warning_notes(warnings: List[Text]) -> Text:\n warning_title = template.SUB_SUB_SECTION_TITLE.format(\n content='Warnings'\n )\n return warning_title + create_content_list(warnings)",
"def warning(indent, message):\n print \"%sWarning: %s\" % (indent, message)",
"def get_warning_text(self):\n \n to_print = []\n if self['skipped_subchannel'] > 0:\n to_print.append(\"Some event with large weight have been discarded.\"+\\\n \" This happens %s times.\" % self['skipped_subchannel'])\n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n if fraction > 1.0e-4:\n to_print.append(\"Some PS with numerical instability have been set \"+\\\n \"to a zero matrix-element (%.3g%%)\" % (100.0*fraction))\n \n return ('\\n'.join(to_print)).replace(\"'\",\" \")",
"def print_warning_msgs():\n for err in TypeWarning.warnings:\n print err",
"def get_warning(miscobj):\n\n finalwarning = []\n typewarning = misctype_byname(miscobj.filetype).warning\n if typewarning:\n finalwarning.append(typewarning.replace('\\n', '<br>'))\n\n langwarnings = {'Python': ('This is known to work with Python 2 and 3.'),\n 'Python 2': ('This has been tested with Python 2, but it '\n 'won\\'t work with Python 3.'),\n 'Python 3': ('This has been tested with Python 3, but it '\n 'won\\'t work with Python 2.'),\n }\n if miscobj.language in langwarnings.keys():\n finalwarning.append(langwarnings[miscobj.language])\n\n if len(finalwarning) > 1:\n return mark_safe('<br><br>'.join(finalwarning))\n else:\n return finalwarning[0] if finalwarning else ''",
"def warnings():\n return THE_LOGGER.warnings",
"def print_warning(msg):\n print\n print \"*\" * 80\n print \"\\n\".join(textwrap.wrap(msg, width=80))\n print \"*\" * 80",
"def my_formatwarning(message, category, filename, lineno):\n return \"%s(%i) : warning: %s\" % (filename, lineno, message)",
"def warnings_to_stdout():\r\n showwarning_orig = warnings.showwarning\r\n\r\n def showwarning(msg, cat, fname, lno, file=None, line=0):\r\n showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)\r\n\r\n warnings.showwarning = showwarning\r\n #warnings.simplefilter('always')\r",
"def warns(*warnings, **opts):\r\n import warnings as warnings_\r\n\r\n captured = []\r\n old_filters, old_showwarning = warnings_.filters, warnings_.showwarning\r\n warnings_.filters = old_filters[:]\r\n\r\n def showwarning(message, category, *args, **kwargs):\r\n if category not in warnings:\r\n old_showwarning(message, category, *args, **kwargs)\r\n return\r\n captured.append(message)\r\n warnings_.showwarning = showwarning\r\n\r\n for warning in warnings:\r\n warnings_.simplefilter(\"always\", warning)\r\n\r\n try:\r\n yield captured\r\n if opts.get(\"any\", False):\r\n assert captured\r\n else:\r\n assert set(warnings) == set(map(type, captured))\r\n finally:\r\n warnings_.filters = old_filters\r\n warnings_.showwarning = old_showwarning",
"def format(self, warning):\n if hasattr(warning, 'start') and hasattr(warning, 'end'):\n if warning.start.line == warning.end.line:\n location = u'line {}'.format(warning.start.line)\n else:\n location = u'lines {}-{}'.format(\n warning.start.line,\n warning.end.line\n )\n else:\n location = u'line {}'.format(warning.lineno)\n if hasattr(warning, 'lines'):\n lines = warning.lines\n template = textwrap.dedent(u\"\"\"\\\n File \"{file}\", {location}\n {lines}\n {message}\n\n \"\"\")\n else:\n lines = []\n template = textwrap.dedent(u\"\"\"\\\n File \"{file}\", {location}\n {message}\n\n \"\"\")\n if len(lines) > 1:\n lineno_length = count_digits(warning.end.line)\n lines_with_lineno = []\n for lineno, line in enumerate(lines, warning.start.line):\n lines_with_lineno.append(\n u' {} {}'.format(\n text_type(lineno).rjust(lineno_length),\n line\n )\n )\n lines = lines_with_lineno\n else:\n lines = [u' ' * 2 + line for line in lines]\n self.output.write(\n template.format(\n file=warning.file,\n location=location,\n lines=u'\\n'.join(lines),\n message=textwrap.fill(warning.message)\n )\n )",
"def list_warnings(self):\n lwarn = []\n r = (220,0,0) # Red\n w = (244,234,244) # White\n g = (144,238,144) # Green\n w = (255,255,255) # White\n c = cf.gs.game.character\n ci = c.inventory\n f = ci.sorted_items['food'].amount\n if f > 0 and f < 10:\n lwarn.append(\n {'item':None,'value':'Low food!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif f <= 0:\n lwarn.append(\n {'item':None,'value':'0 food: HP -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n lwarn.append(\n {'item':None,'value':'0 food: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n h = cf.gs.game.character.selected_house\n if h == 'Staying with Friends':\n lwarn.append(\n {'item':None,'value':'No house: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.health == 1:\n lwarn.append(\n {'item':None,'value':'Low health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.health <= 0:\n lwarn.append(\n {'item':None,'value':'0 health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(cf.gs.game.events.inactive_events) == 5:\n lwarn.append(\n {'item':None,'value':'5 events: Activating!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.sanity > 0 and c.sanity < 10:\n lwarn.append(\n {'item':None,'value':'Low sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.sanity <= 0:\n lwarn.append(\n {'item':None,'value':'0 sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n cash = ci.sorted_items['cash'].amount\n if cash > 0 and cash < 4000:\n lwarn.append(\n {'item':None,'value':'Low cash!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif cash <= 0:\n lwarn.append(\n {'item':None,'value':'0 cash: Sanity-=1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(lwarn) == 0:\n lwarn.append(\n {'item':None,'value':'Green means go!',\n 'selected_bgcolor':g,'bgcolor':g,'font_size':20})\n return lwarn",
"def get_warnings(env=None):\n diagtool_bin = get_diagtool_bin()\n\n if not diagtool_bin:\n return []\n\n try:\n result = subprocess.check_output(\n [diagtool_bin, 'tree'],\n env=env,\n universal_newlines=True,\n encoding=\"utf-8\",\n errors=\"ignore\")\n return [w[2:] for w in result.split() if w.startswith(\"-W\")]\n except (subprocess.CalledProcessError, OSError):\n return []",
"def warnings_active(self) -> List[Error]:",
"def itkFormatWarning(msg, *a, **kwa):\n\n return str(msg) + '\\n'",
"def warnings(self) -> List[Error]:\n return self._get_warnings()",
"def warn(self) -> list:\n return self.__wrn",
"def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None",
"def test_sanitize_warnings(self):\n input = {\n \"warnings\": [\n \"! Change will take effect only after switch reboot at line 11\\\\n\\\\n\",\n \"! \\\\nWARNING!\\\\nChanging TCAM profile will cause forwarding agent(s) to exit and restart.\\\\nAll traffic through the forwarding chip managed by the restarting\\\\nforwarding agent will be dropped.\\\\n at line 392\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs\",\n \"concentrators\",\n \"switches\",\n \"bridges\",\n \"etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 2\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs\",\n \"concentrators\",\n \"switches\",\n \"bridges\",\n \"etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 4\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 6\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! Interface does not exist. The configuration will not take effect until the module is inserted. at line 2799\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 1247\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\"\n\n ],\n \"warningCount\": 14,\n \"errors\": [\n {\n \"lineNo\": \" 6\",\n \"error\": \"> ruter bgp 1512% Invalid input (at token 0: 'ruter') at line 6\",\n }\n ],\n \"errorCount\": 1,\n }\n expected = {\n \"warnings\": [\n \"! Change will take effect only after switch reboot at line 11\\\\n\\\\n\",\n \"! \\\\nWARNING!\\\\nChanging TCAM profile will cause forwarding agent(s) to exit and restart.\\\\nAll traffic through the forwarding chip managed by the restarting\\\\nforwarding agent will be dropped.\\\\n at line 392\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 2\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 4\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 6\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! Interface does not exist. The configuration will not take effect until the module is inserted. at line 2799\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 1247\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\"\n ],\n \"warningCount\": 7,\n \"errors\": [\n {\n \"lineNo\": \" 6\",\n \"error\": \"> ruter bgp 1512% Invalid input (at token 0: 'ruter') at line 6\",\n }\n ],\n \"errorCount\": 1,\n }\n assert self.api.sanitize_warnings(input) == expected",
"def get_warnings(self):\n pass",
"def logger_styles(self) -> str:\n\t\treturn ('info=blue;'\n\t\t\t\t'warning=green;'\n\t\t\t\t'error=red;'\n\t\t\t\t'critical=red,bold;'\n\t\t\t\t'debug=white')",
"def prettify_frags(frags):\n try:\n prettier_frags = []\n for frag in frags:\n\n # If the player did not kill itself\n if len(frag) > 2:\n frag_line = \"[{time}] {killer} {weapon} {victim}\".format(\n time=frag[0].isoformat(),\n killer=SMILEYFACE + \" \" + frag[1],\n weapon=WEAPON_DICT.get(frag[3]),\n victim=SADFACE + \" \" + frag[2])\n\n else:\n frag_line = \"[{}] \".format(frag[0].isoformat()) + \\\n SADFACE + \" \" + frag[1] + \" \" + SKULL\n\n prettier_frags.append(frag_line)\n\n return prettier_frags\n except Exception:\n print(\"Something is wrong with the log file!\")",
"def show_warnings(self):\n for w in self.warnings:\n w()",
"def warnings(self):\n return self.__warnings"
]
| [
"0.722904",
"0.64456606",
"0.64456606",
"0.6390865",
"0.6286236",
"0.62299055",
"0.6129529",
"0.6120169",
"0.6116671",
"0.6057338",
"0.60385865",
"0.5984471",
"0.5891208",
"0.58653736",
"0.58580256",
"0.58468646",
"0.58211756",
"0.57925606",
"0.5790896",
"0.575746",
"0.5723054",
"0.56591547",
"0.5642397",
"0.5626575",
"0.5532723",
"0.54898524",
"0.546866",
"0.5438004",
"0.5435777",
"0.5433326"
]
| 0.6823347 | 1 |
Sets the plot title when the button is clicked with the value present in the title input box | def setPlotTitle(self):
plot_title = self.input_plot_title.text()
if plot_title:
self.plot_title = self.input_plot_title.text()
# Redraw the plot with given title
if not self.plot_inverted:
self.drawPlot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)
else:
self.drawPlot(self.data_y_axis, self.data_x_axis, self.label_y_axis, self.label_x_axis)
else:
QMessageBox.about(self, "Error!", "Please enter a title to set in the plot") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_plot_title(self):\n plot_title = self.input_plot_title.text()\n if plot_title:\n self.plot_title = self.input_plot_title.text()\n # Redraw the plot with given title\n if not self.plot_inverted:\n self.draw_plot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)\n else:\n self.draw_plot(self.data_y_axis, self.data_x_axis, self.label_y_axis, self.label_x_axis)\n else:\n QMessageBox.about(self, \"Error!\", \"Please enter a title to set in the plot\")",
"def set_title(self, title):\n self.axplot.set_title(title)",
"def set_title(self, title):\n self.dlg.txt_layer.setText(title)",
"def setTitle(self, title):\n self.__title = title\n self.drawBorder()",
"def set_title(self, title = \"FORM\"):\n\n c = self.canvas.setTitle(title)",
"def set_title(self, title):\n self.title = title\n self.opf.title = title\n self.ncx.title = title",
"def set_title(self, title):\r\n self.title = title",
"def title(self, title: str):\n\n #self.master.title(title)\n self.ax.set_title(title)\n self.canvas.draw()",
"def set_title(self, title):\n\t\tpass",
"def title(self, value: str):\n self.tk_ref.title(value)",
"def set_title(self, title):\n self.widget.SetTitle(title)",
"def set_title(self, val):\n self._title = val",
"def set_title(self, title):\n self.data['title'] = title",
"def set_title(self):\n plt.title(label=self.title, fontsize=self.titlesize)",
"def set_title(self, title):\n self.l1.setText(title)",
"def settitle(self, title):\n self.__title = title\n self.__nonzero = True",
"def set_title (self, title):\n self.title = title",
"def SetTitle(self, title):\n if self._title != title:\n self._title = title\n def closure(pane):\n pane.Caption(title)\n self._PaneInfoOperation(closure)",
"def set_title(self, setto):\n command = 'title ' + str(setto)\n self.run_command(command)",
"def set_title(self, title):\n \n self.name = title or \"\"",
"def set_title(self, title, color):\n # check input\n assert isinstance(title, str), 'Title must be a string but not a {0}.'.format(type(title))\n assert isinstance(color, str), 'Color must be a string but not a {0}.'.format(type(color))\n \n print '[DB...BAT] Set {0} in color {1} as the figure\\'s title.'.format(title, color)\n self.axes.set_title(title)\n \n self.draw()\n \n return",
"def updateTitle(self, bool=True, path=None):\n if path is None:\n child = self.sender()\n if child.parentWidget().title() == \"Set Window Title\":\n if child.text() == \"Filename\" and self.dataObj is not None:\n self.setWindowTitle(os.path.basename(self.dataObj.fname))\n elif child.text() == \"Other...\":\n text, ok = QtWidgets.QInputDialog.getText(self,\n \"Enter Window Name\",\n \"Title:\",\n QtWidgets.QLineEdit.Normal,\n self.windowTitle())\n if ok: self.setWindowTitle(text)\n self.path=None\n return\n path = [str(child.parentWidget().title()), str(child.text())]\n\n self.titlePath = path\n params = self.specParams\n try:\n val = params.child(*path).value()\n except Exception as e:\n print(\"ERROR updating window title\")\n print(\"\\tbool=\",bool)\n print(\"path=\",path)\n name = path[1]\n # cut out units if they're present\n pref = name[:name.find(\"(\")-1] if \"(\" in name else name\n\n self.setWindowTitle(\"{}: {}\".format(pref, val))",
"def title(self, title):\n\n self.container['title'] = title",
"def set_title(self, title):\n self.groupBox.setTitle(title)",
"def add_title(self, title, x_title = \"\", y_title = \"\"):\n self._fig.update_layout(title_text = title,\n xaxis_title = x_title,\n yaxis_title = y_title)",
"def setTitle(self,value):\n self.PDFreactorConfiguration.in1[\"title\"] = value",
"def title(self, value):\n self.definition.title = value",
"def setFigureTitle(self, title: str):\n self.fig.text(0.5, 0.99, title,\n horizontalalignment='center',\n verticalalignment='top',\n fontsize='small')\n self.draw()",
"def setTitle(self, title):\n self._title = title",
"def updateTitle(rubricterm, event):\n rubricterm.updateTitle()"
]
| [
"0.7686431",
"0.7370893",
"0.7333007",
"0.72645044",
"0.7250716",
"0.71819836",
"0.71057916",
"0.7100658",
"0.7095059",
"0.7093847",
"0.7082512",
"0.7081393",
"0.7071999",
"0.6987861",
"0.69744796",
"0.6965746",
"0.6932339",
"0.68831265",
"0.6823803",
"0.67966646",
"0.67911947",
"0.6787073",
"0.67843676",
"0.6781857",
"0.6759102",
"0.67107016",
"0.66986555",
"0.669467",
"0.66874456",
"0.66811883"
]
| 0.755215 | 1 |
Return the title of a widget | def get_widget_title(widget):
if widget['title'] != '':
return widget['title']
else:
return widget['metadata']['panels'][0]['items'][0]['jaql']['title'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def title(self) -> str:\n return self.tk_ref.title()",
"def title(self):\n return self.run_command('title')[0]",
"def title(self):\n return self.container['title']",
"def get_title():",
"def get_window_title(self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_title(self):\n return self.run_command('get_title')[0]",
"def title(self):\n return win32gui.GetWindowText(self.hwnd)",
"def getTitle(self): #$NON-NLS-1$\r",
"def getTitle(self): #$NON-NLS-1$\r",
"def title(self):\n\n return self._title",
"def title(self):\n return self._frame._title",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self._get_title_()",
"def title(self, value: str):\n self.tk_ref.title(value)",
"def title(self):\n return self.browser.get_attribute(\"title\", self)",
"def configured_title(self):\n return self.get('title', self.DEFAULT_SPACE_TITLE)",
"def widget_terminal_title(\n widget: 'pygame_menu.widgets.Widget',\n widget_index: int = -1,\n current_index: int = -1\n) -> str:\n w_class_id = TerminalColors.BOLD + widget.get_class_id() + TerminalColors.ENDC\n if isinstance(widget, pygame_menu.widgets.Frame):\n w_title = TerminalColors.BRIGHT_WHITE + '┌━' + TerminalColors.ENDC\n w_title += f'{0} - {3}[{1},{2},'.format(w_class_id, *widget.get_indices(), TerminalColors.LGREEN)\n if widget.horizontal:\n w_title += 'H] '\n else:\n w_title += 'V] '\n if widget.is_scrollable:\n wsz = widget.get_inner_size()\n wsm = widget.get_max_size()\n wsh = wsm[0] if wsm[0] == wsz[0] else f'{wsm[0]}→{wsz[0]}'\n wsv = wsm[1] if wsm[1] == wsz[1] else f'{wsm[1]}→{wsz[1]}'\n w_title += f'∑ [{wsh},{wsv}] '\n w_title += TerminalColors.ENDC\n else:\n if widget.get_title() != '':\n title_f = TerminalColors.UNDERLINE + widget.get_title() + TerminalColors.ENDC\n w_title = f'{w_class_id} - {title_f} - '\n else:\n w_title = w_class_id + ' - '\n\n # Column/Row position\n w_title += TerminalColors.INDIGO\n cr = widget.get_col_row_index()\n w_title += '{' + str(cr[0]) + ',' + str(cr[1]) + '}'\n w_title += TerminalColors.ENDC\n\n # Add position\n w_title += TerminalColors.MAGENTA\n w_title += ' ({0},{1})'.format(*widget.get_position())\n w_title += TerminalColors.ENDC\n\n # Add size\n w_title += TerminalColors.BLUE\n w_title += ' ({0},{1})'.format(*widget.get_size())\n w_title += TerminalColors.ENDC\n\n # Add mods\n w_title += TerminalColors.CYAN\n if widget.is_floating():\n w_title += ' Φ'\n if not widget.is_visible():\n w_title += ' ╳'\n if not widget.is_selectable:\n w_title += ' β'\n if widget.is_selected():\n w_title += TerminalColors.BOLD + ' ⟵'\n if current_index != -1 and current_index != widget_index:\n w_title += f'! [{widget_index}->{current_index}]'\n if widget.get_menu() is None:\n w_title += ' !▲'\n w_title += TerminalColors.ENDC\n\n return w_title",
"def title(self):\n return self.__title",
"def title(self):\n return self.__title",
"def title(self):\n return self.__title",
"def get_title(self) -> str:\n pass",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def get_title(self):\n title = self.driver.title\n return title"
]
| [
"0.7554609",
"0.7312672",
"0.7257128",
"0.7138261",
"0.7099524",
"0.7089029",
"0.7006153",
"0.6958793",
"0.6958793",
"0.69035614",
"0.68345183",
"0.6812255",
"0.6812255",
"0.6812255",
"0.6798095",
"0.67852056",
"0.6750128",
"0.67444265",
"0.6724642",
"0.6695906",
"0.6695906",
"0.6695906",
"0.6684917",
"0.6666277",
"0.6666277",
"0.6666277",
"0.6666277",
"0.6666277",
"0.6666277",
"0.6659562"
]
| 0.83423537 | 0 |
Return the item on which the widget must be sorted, if any | def get_widget_sorted_item(widget):
for panel in widget['metadata']['panels']:
for item in panel['items']:
if 'sort' in item['jaql']:
return item['jaql']['title']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def item_comparer(self):\n return self.item_comparer_value",
"def OnCompareItems(self, item1, item2):\r\n\r\n return cmp(self.GetItemText(item1), self.GetItemText(item2))",
"def GetFirstVisibleItem(self):\r\n\r\n return self.GetNextVisible(self.GetRootItem())",
"def __getitem__(self,item):\n if self._dosort is not None:\n i = self._sorted_indices[item-1]\n else:\n i = item\n return super(AHFCatalogue,self).__getitem__(i)",
"def execute(self, item):\n items = item[\"items\"]\n reverse = not item.get(\"descending\", True)\n items.sort(key=operator.itemgetter(\"distance\"), reverse=reverse)\n\n return item",
"def OnCompareItems(self, item1, item2):\n # Get the item data\n data_1 = self.GetItemText(item1)\n data_2 = self.GetItemText(item2)\n # Compare the item data\n if data_1 < data_2:\n return -1\n elif data_1 > data_2:\n return 1\n else:\n return 0",
"def OnCompareItems(self, item1, item2):\r\n\r\n # do the comparison here, and not delegate to self._main_win, in order\r\n # to let the user override it\r\n\r\n return self.GetItemText(item1) == self.GetItemText(item2)",
"def FindToolForPosition(self, x, y):\r\n\r\n for i, item in enumerate(self._items):\r\n if not item.sizer_item:\r\n continue\r\n\r\n rect = item.sizer_item.GetRect()\r\n if rect.Contains((x,y)):\r\n \r\n # if the item doesn't fit on the toolbar, return None\r\n if not self.GetToolFitsByIndex(i):\r\n return None\r\n\r\n return item\r\n \r\n return None",
"def _updateSelectedItem(self):\n plot = self.plot\n if plot is not None:\n selected = plot.selection().getSelectedItems()\n # Give priority to image over scatter\n for klass in (items.ImageBase, items.Scatter):\n for item in selected:\n if isinstance(item, klass):\n # Found a matching item, use it\n self.getHistogramWidget().setItem(item)\n return\n self.getHistogramWidget().setItem(None)",
"def sorted_index(self, item):\n return _(_sorted_index(self._, item))",
"def GetFirstVisibleItem(self):\r\n\r\n id = self.GetRootItem()\r\n if not id:\r\n return id\r\n\r\n while id:\r\n if self.IsVisible(id):\r\n return id\r\n id = self.GetNext(id)\r\n\r\n return None",
"def getDependOnItem(self):\n currentRow = self.getCurrentRow()\n if currentRow == 0:\n return None\n return self.jobRow.child(currentRow - 1, 0)",
"def __getitem__(self, index):\r\n if isinstance(index, six.string_types):\r\n for order_by in self:\r\n if order_by == index or order_by.bare == index:\r\n return order_by\r\n raise KeyError\r\n return super(OrderByTuple, self).__getitem__(index)",
"def FindToolForPositionWithPacking(self, x, y):\r\n \r\n count = len(self._items)\r\n \r\n for i, item in enumerate(self._items):\r\n if not item.sizer_item:\r\n continue\r\n\r\n rect = item.sizer_item.GetRect()\r\n\r\n # apply tool packing\r\n if i+1 < count:\r\n rect.width += self._tool_packing\r\n\r\n if rect.Contains((x,y)):\r\n \r\n # if the item doesn't fit on the toolbar, return None\r\n if not self.GetToolFitsByIndex(i):\r\n return None\r\n\r\n return item\r\n\r\n return None",
"def findItemWidgetFromDate(self, data):\n for row in range(6):\n for col in range(7):\n itemWidget = self.table.cellWidget(row, col)\n _data = itemWidget.data\n if data == _data:\n return itemWidget",
"def sortby(self):\n ...",
"def selected_item(self) -> MenuItem | None:\n if self.selected_option == -1:\n return None\n else:\n return self.all_items[self.selected_option]",
"def GetPrevVisible(self, item):\r\n \r\n # find a previous sibling or parent which is visible\r\n lastGoodItem = self.GetPrevSibling(item)\r\n if not lastGoodItem or not self.IsVisible(lastGoodItem):\r\n parent = self.GetItemParent(item)\r\n rootHidden = self.HasAGWFlag(TR_HIDE_ROOT)\r\n rootItem = self.GetRootItem()\r\n \r\n while parent and not (rootHidden and parent == rootItem):\r\n if self.IsVisible(parent):\r\n lastGoodItem = parent\r\n break\r\n parent = self.GetItemParent(parent)\r\n\r\n if not lastGoodItem:\r\n return None\r\n \r\n # test if found item has visible children, if so and if the found item is not the \r\n # parent of the current item traverse the found item to the last visible child\r\n if not self.HasChildren(lastGoodItem) or not self.IsExpanded(lastGoodItem) or \\\r\n (self.GetItemParent(item) == lastGoodItem):\r\n return lastGoodItem\r\n \r\n lastChild = self.GetLastChild(lastGoodItem)\r\n while lastChild and self.IsVisible(lastChild):\r\n lastGoodItem = lastChild\r\n lastChild = self.GetLastChild(lastGoodItem)\r\n \r\n return lastGoodItem",
"def item_comparer(self, value):\n self.item_comparer_value = value",
"def getItemRect(self, item):\n r = Rect(1, 0, 1, 1)\n for p in self.menu.items:\n r.topLeft.x = r.bottomRight.x\n if p.name:\n r.bottomRight.x += (nameLength(p.name) + 2)\n if p is item:\n return r\n return r",
"def GetPrevVisible(self, item): \r\n\r\n i = self.GetNext(item, False)\r\n while i:\r\n if self.IsItemVisible(i):\r\n return i\r\n i = self.GetPrev(i, False)\r\n \r\n return None",
"def sort_key(self):\n ...",
"def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items",
"def _is_sorted(self, item):\n return isinstance(item, sortedsets.SortedNode) or self._is_weighted(item)",
"def __lt__(self, value):\n if not isinstance(value, Item):\n raise ValueError(\"Can't compare Item to non-Item type\")\n return self.views < value.views",
"def SortChildren(self, item):\r\n\r\n if not self._attr_set:\r\n setattr(self._main_win, \"OnCompareItems\", self.OnCompareItems)\r\n self._attr_set = True\r\n \r\n self._main_win.SortChildren(item)",
"def _grokker_sort_key(args):\n grokker, name, obj = args\n return priority.bind().get(grokker)",
"def get_active_desktop_sort(self):\n\t\t# non-active options have opacity of 0.2\n\t\tfor i in xrange(5):\n\t\t\t# raw_input(str(i))\n\t\t\tarrow_el = (\n\t\t\t\tself.sort_opts[i].find_element_by_tag_name(\"svg\")\n\t\t\t)\n\t\t\topacity = arrow_el.value_of_css_property(\"opacity\")\n\t\t\tif opacity != \"0.2\":\n\t\t\t\treturn i\n\t\treturn None",
"def by_order(item):\n if \"Order\" in item:\n return item[\"Order\"]\n return 0",
"def select(self):\n cond = lambda c: c.value\n sorted_children = sorted(self.children, key=cond)\n return sorted_children[-1]"
]
| [
"0.62678564",
"0.6071453",
"0.5718568",
"0.56606954",
"0.55855614",
"0.5572795",
"0.553234",
"0.5485188",
"0.5474386",
"0.54547495",
"0.54483056",
"0.5421757",
"0.54117554",
"0.54097724",
"0.53953743",
"0.538942",
"0.5367559",
"0.5348384",
"0.53402",
"0.53376174",
"0.53265804",
"0.5319673",
"0.531351",
"0.5283272",
"0.52766675",
"0.52684337",
"0.5268034",
"0.5267177",
"0.5258791",
"0.52536386"
]
| 0.73745424 | 0 |
Return the list of widget IDs in the order of appearance from the layout object | def get_dashboard_ordered_widget_ids(dashfile_data):
ordered_widget_ids = []
for column in dashfile_data['layout']['columns']:
for cell in column['cells']:
for subcell in cell['subcells']:
for element in subcell['elements']:
ordered_widget_ids.append(element['widgetid'])
return ordered_widget_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_list(self):\n return list(self.widget_dict.values())",
"def gui_layout_view(self) -> List[List[sg.Element]]:\n return []",
"def gui_layout_edit(self) -> List[List[sg.Element]]:\n\n return []",
"def widget_map(self):\n return self._widget_map",
"def getAllWidgets(self):\n \n visualisations = Visualisation.objects.filter(dataSource=self)\n widgets = []\n for vis in visualisations:\n widgets.append(vis.getWidget())\n return widgets",
"def widgets(self):\n raise NotImplementedError(\"This method is not ready to be used yet\")",
"def get_layout_code(self, obj):\n return []",
"def widgets(self):\r\n return resources.Widgets(self)",
"def get_widgets(layout, placeholder, user=None, workspace=None,\n position=None, occupied_cells=[], sort_items=True):\n # We should get the layout, see loop through its' plugins and see which of\n # those do have renderers. Then we get all the plugins (based on whether\n # they are restricted or not - get the list) and then filter out those\n # that do not have renderers.\n\n ensure_autodiscover()\n\n registered_widgets = {}\n\n plugin_widget_uids = plugin_widget_registry._registry.keys()\n\n if not RESTRICT_PLUGIN_ACCESS or getattr(user, 'is_superuser', False):\n for uid, plugin in plugin_registry._registry.items():\n # We should make sure that there are widgets available for the\n # placeholder.\n plugin_widget_uid = PluginWidgetRegistry.namify(\n layout.uid,\n placeholder.uid,\n uid\n )\n\n # Get cells occupied by plugin widget.\n widget_occupied_cells = get_occupied_cells(\n layout,\n placeholder,\n uid,\n position,\n check_boundaries=True\n )\n\n if plugin_widget_uid in plugin_widget_uids \\\n and widget_occupied_cells is not False \\\n and not lists_overlap(widget_occupied_cells, occupied_cells):\n\n plugin_widget = plugin_widget_registry.get(plugin_widget_uid)\n kwargs = {\n 'placeholder_uid': placeholder.uid,\n 'plugin_uid': uid\n }\n if workspace:\n kwargs.update({'workspace': workspace})\n if position:\n kwargs.update({'position': position})\n\n plugin_group = safe_text(plugin.group)\n if plugin_group not in registered_widgets:\n registered_widgets[plugin_group] = []\n\n widget_name = safe_text(plugin.name)\n\n registered_widgets[plugin_group].append(\n (\n uid,\n '{0} ({1}x{2})'.format(widget_name,\n plugin_widget.cols,\n plugin_widget.rows),\n reverse('dash.add_dashboard_entry', kwargs=kwargs)\n )\n )\n else:\n allowed_plugin_uids = get_allowed_plugin_uids(user)\n\n for uid, plugin in plugin_registry._registry.items():\n # We should make sure that there are widgets available for the\n # placeholder and user has access to the widget desired.\n plugin_widget_uid = PluginWidgetRegistry.namify(\n layout.uid,\n placeholder.uid,\n uid\n )\n\n # Get cells occupied by plugin widget.\n widget_occupied_cells = get_occupied_cells(\n layout,\n placeholder,\n uid,\n position,\n check_boundaries=True\n )\n\n if uid in allowed_plugin_uids \\\n and plugin_widget_uid in plugin_widget_uids \\\n and widget_occupied_cells is not False \\\n and not lists_overlap(widget_occupied_cells, occupied_cells):\n\n plugin_widget = plugin_widget_registry.get(plugin_widget_uid)\n kwargs = {\n 'placeholder_uid': placeholder.uid,\n 'plugin_uid': uid\n }\n if workspace:\n kwargs.update({'workspace': workspace})\n if position:\n kwargs.update({'position': position})\n\n plugin_group = safe_text(plugin.group)\n if plugin_group not in registered_widgets:\n registered_widgets[plugin_group] = []\n\n registered_widgets[plugin_group].append(\n (\n uid,\n '{0} ({1}x{2})'.format(safe_text(plugin.name),\n plugin_widget.cols,\n plugin_widget.rows),\n reverse('dash.add_dashboard_entry', kwargs=kwargs)\n )\n )\n\n if sort_items:\n for key, prop in registered_widgets.items():\n prop.sort()\n\n return registered_widgets",
"def _init_widgets(self):\n comps = self.ui.component_list\n comps.addItems(sorted(self._labels.keys()))\n data = self.ui.data_list\n data.addItems(sorted(self._data.keys()))",
"def widgets(self) -> Munch:\n return self._widgets",
"def signature(self):\n inp = [a.component_id for a in self._argument_widgets]\n out = self._output_widget.component_id\n return inp, out",
"def getIDs():",
"def widgets(self) -> WidgetRegistry:\n return self._widgets",
"def delete_widgets_from(layout):\n for i in reversed(range(layout.count())):\n widgetToRemove = layout.itemAt(i).widget()\n # remove it from the layout list\n layout.removeWidget(widgetToRemove)\n # remove it from the gui\n widgetToRemove.setParent(None)",
"def get_layout_names(base_url=DEFAULT_BASE_URL):\n res = commands.cyrest_get('apply/layouts', base_url=base_url)\n return res",
"def controls(self):\n controls = []\n for i in range(len(self.__listReq)):\n if self.__controlsChecks[i].isChecked():\n controls.append(self.__listReq[i].get(\"id\"))\n return controls",
"def widgetId(form, component):\n return '.'.join(\n (iditem for iditem in\n (str(form.prefix), component.prefix, component.identifier,)\n if iditem))",
"def window_handles(self):\n pass",
"def widgets(std_prm: Parameter) -> List[QWidget]:\n widgets = []\n\n # The name widget\n textEdit = QLineEdit()\n textEdit.setText(std_prm[\"name\"])\n widgets.append(textEdit)\n\n # The input widget\n inputWidget = std_prm[\"build method\"](std_prm[\"build method prms\"], std_prm[\"slot\"])\n widgets.append(inputWidget)\n\n # Add the input widget to the parameter\n # this field will be used to identify the parameter\n # in the slot\n std_prm[\"name widget\"] = textEdit\n std_prm[\"widget\"] = inputWidget\n return widgets",
"def create_widgets(self):\n for name in self.phonebook:\n temp_button = Label(text=name, id=name)\n self.root.ids.entries_box.add_widget(temp_button)",
"def controlOrder():\n return ['OverlayListPanel',\n 'LocationPanel',\n 'OverlayInfoPanel',\n 'OverlayDisplayPanel',\n 'CanvasSettingsPanel',\n 'AtlasPanel',\n 'OverlayDisplayToolBar',\n 'OrthoToolBar',\n 'FileTreePanel']",
"def get_widgets_array(self, g_parent, r_parent, gamut_list=gamut_list):\n widget_list = {\"gamut_button\": [], \"radio_button\": [], \"primary\": []}\n widget_list[\"gamut_button\"] = [0] * len(gamut_list)\n widget_list[\"radio_button\"] = [0] * 2\n widget_list[\"primary\"] = [0] * 3\n for idx, gamut in enumerate(gamut_list):\n widget_list[\"gamut_button\"][idx]\\\n = ttk.Radiobutton(g_parent, text=gamut,\n variable=self.gamut_button_value,\n value=gamut)\n widget_list[\"radio_button\"][0]\\\n = ttk.Radiobutton(r_parent, text='clip off',\n variable=self.rb_value, value=\"off\")\n widget_list[\"radio_button\"][1]\\\n = ttk.Radiobutton(r_parent, text='clip on',\n variable=self.rb_value, value=\"on\")\n\n return widget_list",
"def create_widgets(self):",
"def getId(self):\n return _libsbml.Layout_getId(self)",
"def parse_layout(layout):\n global index \n for lt_obj in layout:\n print(lt_obj.__class__.__name__)\n print(lt_obj.bbox)\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n print(lt_obj.get_text())\n d[lt_obj.get_text().strip()]=(index,lt_obj.bbox)\n index+=1\n elif isinstance(lt_obj, LTFigure):\n parse_layout(lt_obj) # Recursive",
"def get_ids(self):\n return self._graphs.keys()",
"def getAllWindowHandles(self):\n cmdId = self.executeCommand(Command.GET_WINDOW_HANDLES)\n return cmdId",
"def get_control_ids(self) -> List[str]:\n return self._control_dict.keys()",
"def get_declared_items(self):\n for k, v in super(AndroidListView, self).get_declared_items():\n if k == 'layout':\n yield k, v\n break"
]
| [
"0.6166304",
"0.61398214",
"0.61017835",
"0.60591733",
"0.5988189",
"0.581369",
"0.57681173",
"0.56985873",
"0.5695312",
"0.5651034",
"0.5631582",
"0.5631433",
"0.5525349",
"0.55079883",
"0.5484116",
"0.5450929",
"0.54188025",
"0.54106236",
"0.5409411",
"0.5408571",
"0.54049325",
"0.5400762",
"0.53816575",
"0.5352634",
"0.5331665",
"0.5305967",
"0.5296956",
"0.529066",
"0.5288697",
"0.5260298"
]
| 0.728899 | 0 |
Return the list of widget titles present on the given dashboard, following the order defined in the dashboard layout | def get_ordered_titles(dashfile_data):
ordered_widget_ids = get_dashboard_ordered_widget_ids(dashfile_data)
kpis_id_title_mapping = {}
charts_id_title_mapping = {}
tables_id_title_mapping = {}
kpi_titles = []
chart_titles = []
table_titles = []
# Create a local mapping between the widget ID and its title, the mapping is specific to the widget type
for widget in dashfile_data['widgets']:
widget_id = widget['oid']
widget_title = get_widget_title(widget)
widget_sorted_item = get_widget_sorted_item(widget)
if widget['type'] in KPI_TYPES:
kpis_id_title_mapping[widget_id] = widget_title
elif widget['type'] in CHART_TYPES:
charts_id_title_mapping[widget_id] = {
'title': widget_title,
'sortedItem': widget_sorted_item
}
elif widget['type'] in TABLE_TYPES:
tables_id_title_mapping[widget_id] = {
'title': widget_title,
'sortedItem': widget_sorted_item
}
elif widget['type'] not in IGNORED_TYPES:
logging.warning('The widget type {0} is unknown and will not be processed.'.format(widget['type']))
continue
# Sort the titles following the order of the widgets in the layout
for ordered_widget_id in ordered_widget_ids:
if ordered_widget_id in kpis_id_title_mapping:
kpi_titles.append(kpis_id_title_mapping[ordered_widget_id])
elif ordered_widget_id in charts_id_title_mapping:
chart_titles.append(charts_id_title_mapping[ordered_widget_id])
elif ordered_widget_id in tables_id_title_mapping:
table_titles.append(tables_id_title_mapping[ordered_widget_id])
return [kpi_titles, chart_titles, table_titles] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dashboard_ordered_widget_ids(dashfile_data):\n ordered_widget_ids = []\n for column in dashfile_data['layout']['columns']:\n for cell in column['cells']:\n for subcell in cell['subcells']:\n for element in subcell['elements']:\n ordered_widget_ids.append(element['widgetid'])\n return ordered_widget_ids",
"def get_dashboard_elements(dashboard):\n items = {\"visualization\": [], \"chart\": [], \"reportTable\": [], \"eventVisualization\": [], \"eventReport\": [], \"eventChart\": [], \"map\": []}\n for dashboardItem in dashboard['dashboardItems']:\n if any(version in api_source.version for version in ['2.36', '2.37']):\n items_list = ['visualization', 'eventVisualization', 'eventReport', 'eventChart', 'map']\n else:\n items_list = ['visualization', 'eventVisualization', 'map']\n for dashboard_item in items_list:\n if dashboard_item in dashboardItem:\n items[dashboard_item].append(dashboardItem[dashboard_item]['id'])\n return items",
"def _replace_dashboard_config_variables(self):\n data = self._load_config_file(CloudwatchConfigType.DASHBOARD.value)\n widgets = []\n for item in data:\n self._replace_all_config_variables(\n item,\n None,\n self.cluster_name,\n self.provider_config[\"region\"],\n )\n for node_id in self.node_ids:\n item_out = copy.deepcopy(item)\n (item_out, modified_str_count) = \\\n self._replace_all_config_variables(\n item_out,\n str(node_id),\n None,\n None,\n )\n widgets.append(item_out)\n if not modified_str_count:\n break # no per-node dashboard widgets specified\n return widgets",
"def get_widget_sorted_item(widget):\n for panel in widget['metadata']['panels']:\n for item in panel['items']:\n if 'sort' in item['jaql']:\n return item['jaql']['title']\n return None",
"def get_dashboard_names(cls):\n dashboards = cls._get_all_dashboards()\n return [str(dboard[\"filename\"]) for dboard in dashboards]",
"def get_widget_title(widget):\n if widget['title'] != '':\n return widget['title']\n else:\n return widget['metadata']['panels'][0]['items'][0]['jaql']['title']",
"def get_widgets(layout, placeholder, user=None, workspace=None,\n position=None, occupied_cells=[], sort_items=True):\n # We should get the layout, see loop through its' plugins and see which of\n # those do have renderers. Then we get all the plugins (based on whether\n # they are restricted or not - get the list) and then filter out those\n # that do not have renderers.\n\n ensure_autodiscover()\n\n registered_widgets = {}\n\n plugin_widget_uids = plugin_widget_registry._registry.keys()\n\n if not RESTRICT_PLUGIN_ACCESS or getattr(user, 'is_superuser', False):\n for uid, plugin in plugin_registry._registry.items():\n # We should make sure that there are widgets available for the\n # placeholder.\n plugin_widget_uid = PluginWidgetRegistry.namify(\n layout.uid,\n placeholder.uid,\n uid\n )\n\n # Get cells occupied by plugin widget.\n widget_occupied_cells = get_occupied_cells(\n layout,\n placeholder,\n uid,\n position,\n check_boundaries=True\n )\n\n if plugin_widget_uid in plugin_widget_uids \\\n and widget_occupied_cells is not False \\\n and not lists_overlap(widget_occupied_cells, occupied_cells):\n\n plugin_widget = plugin_widget_registry.get(plugin_widget_uid)\n kwargs = {\n 'placeholder_uid': placeholder.uid,\n 'plugin_uid': uid\n }\n if workspace:\n kwargs.update({'workspace': workspace})\n if position:\n kwargs.update({'position': position})\n\n plugin_group = safe_text(plugin.group)\n if plugin_group not in registered_widgets:\n registered_widgets[plugin_group] = []\n\n widget_name = safe_text(plugin.name)\n\n registered_widgets[plugin_group].append(\n (\n uid,\n '{0} ({1}x{2})'.format(widget_name,\n plugin_widget.cols,\n plugin_widget.rows),\n reverse('dash.add_dashboard_entry', kwargs=kwargs)\n )\n )\n else:\n allowed_plugin_uids = get_allowed_plugin_uids(user)\n\n for uid, plugin in plugin_registry._registry.items():\n # We should make sure that there are widgets available for the\n # placeholder and user has access to the widget desired.\n plugin_widget_uid = PluginWidgetRegistry.namify(\n layout.uid,\n placeholder.uid,\n uid\n )\n\n # Get cells occupied by plugin widget.\n widget_occupied_cells = get_occupied_cells(\n layout,\n placeholder,\n uid,\n position,\n check_boundaries=True\n )\n\n if uid in allowed_plugin_uids \\\n and plugin_widget_uid in plugin_widget_uids \\\n and widget_occupied_cells is not False \\\n and not lists_overlap(widget_occupied_cells, occupied_cells):\n\n plugin_widget = plugin_widget_registry.get(plugin_widget_uid)\n kwargs = {\n 'placeholder_uid': placeholder.uid,\n 'plugin_uid': uid\n }\n if workspace:\n kwargs.update({'workspace': workspace})\n if position:\n kwargs.update({'position': position})\n\n plugin_group = safe_text(plugin.group)\n if plugin_group not in registered_widgets:\n registered_widgets[plugin_group] = []\n\n registered_widgets[plugin_group].append(\n (\n uid,\n '{0} ({1}x{2})'.format(safe_text(plugin.name),\n plugin_widget.cols,\n plugin_widget.rows),\n reverse('dash.add_dashboard_entry', kwargs=kwargs)\n )\n )\n\n if sort_items:\n for key, prop in registered_widgets.items():\n prop.sort()\n\n return registered_widgets",
"def get_dashboard_ui(self, user, packet):\n dashboard_ui = []\n plugins_order = mmconf.OPT['plugins_order']\n for plugin in self.dashboard_handlers:\n puipos = 999999\n plugins_order = mmconf.OPT['plugins_order']\n if plugin in plugins_order:\n puipos = plugins_order[plugin]\n if puipos != 0:\n html = self.dashboard_handlers[plugin]()\n try:\n html = html.decode('utf-8')\n except AttributeError:\n pass\n dashboard_elem = {\"uipos\":puipos, \"name\":plugin, \"html\":html}\n dashboard_ui.append(dashboard_elem)\n dashboard_ui.sort(key=lambda o: o['uipos'])\n return {\"dashboard_ui\":dashboard_ui}",
"def test_collect_dashboard_dependencies(self, module_repo):\n expected_result = {(\"CommonScripts\", True)}\n\n test_input = [\n {\n \"Dummy_dashboard\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Dashboards\",\n )\n\n assert set(found_result) == set(expected_result)",
"def dashboards(self):\r\n return resources.Dashboards(self)",
"def test_collect_dashboard_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"dashboard\", \"Dummy_dashboard\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_dashboard\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Dashboards\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def getAllWidgets(self):\n \n visualisations = Visualisation.objects.filter(dataSource=self)\n widgets = []\n for vis in visualisations:\n widgets.append(vis.getWidget())\n return widgets",
"def get_layout_names(base_url=DEFAULT_BASE_URL):\n res = commands.cyrest_get('apply/layouts', base_url=base_url)\n return res",
"def _get_locators(self):\n return locator.DashboardWidget",
"def dashboards(self) -> dict:\n return Config.get_dashboards()",
"def get_dashboard_components(doctype, field):\n\n\tdashs = frappe.get_all(\"Dashmanager\", filters={\"ref_doctype\": doctype,\"ref_docfield\":doctype+\"-\"+field})\n\tdash = frappe.get_doc(\"Dashmanager\", dashs[0])\n\treturn dash.build_dashboard_components()",
"def urlBars(self):\n urlBars = []\n for index in range(self.count()):\n urlBars.append(self.widget(index))\n return urlBars",
"def widgets(self):\r\n url = '{0}/{1}/'.format(self.get_url(), 'widgets')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def _generateNamedContainingPanel(self, obj, **args):\n result = []\n parent = obj.parent\n while parent and (parent.parent != parent):\n if parent.getRole() == pyatspi.ROLE_PANEL:\n label = self._generateLabelAndName(parent)\n if label:\n result.extend(label)\n break\n parent = parent.parent\n return result",
"def test_dashboards_v2_list(self):\n pass",
"def test_dashboard_has_dashboard_in_title(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Dashboard\").click()\n self.assertTrue('Dashboard' in self.browser.title, 'Dashboard did not have \"Dashboard\" in title')",
"def list_of_stories():\n return render_template(\"list_of_stories.html\", stories = stories.values())",
"def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets",
"def getWidget(self):\n \n firstDataset = DashboardDataset.objects.filter(visualisation=self)[0]\n \n widget = {'name': self.name,\n 'id': \"vis\" + str(self.pk),\n 'pk': self.pk,\n 'category': self.category.name,\n 'type': self.type,\n 'dataset': [json.loads(d.dataJSON, cls=util.DateTimeDecoder) for d in DashboardDataset.objects.filter(visualisation=self)],\n 'datasetLabels': [d.name for d in DashboardDataset.objects.filter(visualisation=self)],\n 'sourceName': self.dataSource.name,\n 'sourceLink': self.dataSource.link,\n 'datasetName': firstDataset.name,\n 'datasetLink': firstDataset.link,\n 'description': self.description,\n 'xLabel': self.xLabel,\n 'yLabel': self.yLabel,\n 'sizeX': self.sizeX,\n 'sizeY': self.sizeY}\n return widget",
"def widgets(std_prm: Parameter) -> List[QWidget]:\n widgets = []\n\n # The name widget\n textEdit = QLineEdit()\n textEdit.setText(std_prm[\"name\"])\n widgets.append(textEdit)\n\n # The input widget\n inputWidget = std_prm[\"build method\"](std_prm[\"build method prms\"], std_prm[\"slot\"])\n widgets.append(inputWidget)\n\n # Add the input widget to the parameter\n # this field will be used to identify the parameter\n # in the slot\n std_prm[\"name widget\"] = textEdit\n std_prm[\"widget\"] = inputWidget\n return widgets",
"def get_dashboards(resource_root):\n return call(resource_root.get, DASHBOARDS_PATH, ApiDashboard, \\\n ret_is_list=True)",
"def get_dinner_titles(workbook):\n\n\tworksheets = workbook.worksheets()\n\tworksheet_titles = [i.title for i in worksheets]\n\t# Exclude system titles\n\tworksheet_titles.remove('History')\n\tworksheet_titles.remove('Settings')\n\n\treturn worksheet_titles",
"def _getListing(self):\n\n # lets assure consistent litsting order\n items = self._items.items()\n items.sort()\n return [ \"%s%s%s: %s\" % (_def_sep, str(x[1]), _def_sep, x[1].__doc__)\n for x in items ]",
"def list():\n index = config.index\n output_format = \"%-7s %-20s %s\"\n click.secho(output_format % (\"ID\", \"CREATED\", \"BACKENDS\"), fg=\"cyan\")\n for archive in sorted(index.archives(), key=lambda x: x[\"id\"]):\n # Print it out\n click.echo(\n output_format\n % (\n archive[\"id\"],\n datetime.datetime.fromtimestamp(archive[\"created\"]).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \", \".join(sorted(archive[\"backend_names\"])),\n )\n )",
"def widgets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageWidgetArgs']]]]:\n return pulumi.get(self, \"widgets\")"
]
| [
"0.6569436",
"0.6078979",
"0.6001605",
"0.59308547",
"0.5854095",
"0.580947",
"0.57694864",
"0.5699261",
"0.56739503",
"0.55418026",
"0.5426727",
"0.54243344",
"0.5404837",
"0.5404529",
"0.53320867",
"0.5310989",
"0.5274873",
"0.51577336",
"0.51369613",
"0.51191705",
"0.51112854",
"0.51099133",
"0.5053337",
"0.5026083",
"0.50237197",
"0.5017678",
"0.5007546",
"0.49945733",
"0.49696225",
"0.49664682"
]
| 0.6873038 | 0 |
Update the dashboard kpi mappings file Add a dict object which key is the dashboard name and values are the dashboard's KPI titles This mapping is used by diff_kpis to name the kpis that are compared | def update_mappings(dashboard_data, mappings_file):
logging.info('update_mappings')
with open(mappings_file, 'r') as f:
try:
data = json.load(f)
except ValueError:
data = {}
if dashboard_data.slug not in data:
data[dashboard_data.slug] = {}
table_data = [{'title': t['title'].replace('\"', ''), 'sortedItem': t['sortedItem']} for t in dashboard_data.tables]
data[dashboard_data.slug][dashboard_data.type] = {
'kpis': dashboard_data.kpis,
'charts': dashboard_data.charts,
'tables': table_data
}
with open(mappings_file, 'w') as f:
f.write(json.dumps(data, indent=4)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_database_map(self, path):\n if path:\n filename = path + '/APD_MAP.txt'\n else:\n filename = 'APD_MAP.txt'\n filepointer = open(filename, 'w')\n for invariom, molecule in self.map.items():\n filepointer.write(invariom + ':' + molecule + '\\n')\n filepointer.close()",
"def update(self, ifile, ofile, key):\n if os.path.exists(ifile):\n with open(ifile) as f:\n map = {}\n data = json.load(f)\n for kw, val in data.iteritems():\n map[val[\"panda_resource\"]] = kw\n\n return self.save(ofile, map)\n else:\n return False",
"def _browse_to_old_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n filein = filedialog.askopenfilename(\\\n filetypes=[('Map Creator Dictionary', '.xml'), ], \\\n initialdir=self.MapCreator, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['Map Creator Dictionary'], \\\n defaultextension='.xml')\n self.old_dict.set(filein)\n pass",
"def view_map(request):\n\n # Retrieve the installations\n install_list = Installation.objects.filter(is_active=True)\n arr = []\n\n # For each Installation, add the affiliated Institutions\n for i in install_list:\n lists = Institution.objects.filter(host__name=i.name)\n arr.append(lists)\n\n d = dict(\n install_list = install_list,\n arr = arr,\n installation_count=install_list.count()\n )\n\n d.update(get_total_published_counts())\n\n return render(request, 'installations/map2.html', d)",
"def update_tableau_pref_file(dirname: str = '.') -> None:\n\n # Obtain all colormaps in CMasher without reversed versions\n # This is because Tableau already has a function for this\n cmaps = [y for y in cmr.cm.cmap_d.values() if not y.name.endswith('_r')]\n\n # Create a dict that contains the Tableau type for each colormap type\n cmap_types = {\n 'sequential': 'ordered-sequential',\n 'diverging': 'ordered-diverging',\n 'cyclic': 'regular'}\n\n # Create empty dict of color-palette entries for all colormaps\n entries_dict = {}\n\n # Loop over all colormaps and create their color-palette entries\n for cmap in cmaps:\n # Obtain the type of this colormap\n cmap_type = cmap_types[cmr.get_cmap_type(cmap)]\n\n # Obtain all colors of this colormap in HEX-format\n colors_hex = cmr.take_cmap_colors(cmap, N=None, return_fmt='hex')\n\n # Create a list with all color representations in HEX\n colors_list = list(map(lambda x: \"<color>%s</color>\" % (x),\n colors_hex))\n\n # Combine all these colors into a single string\n colors_str = '\\n'.join(colors_list)\n\n # Make sure to indent all lines in this string by 1 tab\n colors_str = indent(colors_str, '\\t').expandtabs(4)\n\n # Create color-palette entry string\n entry_str = dedent(\"\"\"\n <color-palette name=\"cmr.{0}\" type=\"{1}\">\n {2}\n </color-palette>\"\"\").format(\n cmap.name, cmap_type, colors_str)[1:]\n\n # Indent this string by 1 tab\n entry_str = indent(entry_str, '\\t').expandtabs(4)\n\n # Add this entry to the dict\n entries_dict[cmap.name] = entry_str\n\n # Obtain absolute path to preferences file in provided dirname\n filename = path.abspath(path.join(dirname, 'Preferences.tps'))\n\n # Check if this file already exists\n if path.exists(filename):\n # If so, read in the file contents\n with open(filename, 'r') as f:\n text = f.read()\n\n # Define the strings that enclose the colormap entries usually\n start_str = \"<workbook>\\n <preferences>\\n\"\n end_str = \"\\n </preferences>\\n</workbook>\"\n\n # Search for these strings\n start_idx = text.find(start_str)+29\n end_idx = text.find(end_str)\n sub_contents = text[start_idx:end_idx]\n\n # Now search this sub_contents string for all colormap names\n cmap_names = re.findall(r\"\\\"cmr\\.(\\w+)\\\"\", sub_contents)\n\n # Search entries_dict for all cmap_names\n for cmap in cmap_names:\n # Check if cmap is in entries_dict\n if cmap not in entries_dict:\n # If not, obtain the entire entry\n idx = sub_contents.find('cmr.'+cmap)\n start_idx_entry = idx-25\n match = re.search(r\"<\\/color-palette>\\n\",\n sub_contents[start_idx_entry:])\n end_idx_entry = match.end()+start_idx_entry\n\n # Remove this entry from sub_contents\n sub_contents = ''.join([sub_contents[:start_idx_entry],\n sub_contents[end_idx_entry:]])\n\n # Search this sub_contents string for all strings in entries_dict\n for cmap, cmap_entry in dict(entries_dict).items():\n # Check if this colormap name already exists\n idx = sub_contents.find('cmr.'+cmap)\n if(idx != -1):\n # If so, obtain the entire entry\n start_idx_entry = idx-25\n match = re.search(r\"<\\/color-palette>\",\n sub_contents[start_idx_entry:])\n end_idx_entry = match.end()+start_idx_entry\n\n # Replace this entry with the new entry\n sub_contents = ''.join([sub_contents[:start_idx_entry],\n cmap_entry,\n sub_contents[end_idx_entry:]])\n\n # Remove cmap from entries_dict\n entries_dict.pop(cmap)\n\n # Combine everything remaining in entries_dict together\n entries_str = '\\n'.join(['', *entries_dict.values()])\n\n # Join sub_contents and entries_str together\n sub_contents = ''.join([sub_contents, entries_str])\n\n # Insert the sub_contents into pref_file_contents\n text = ''.join([text[:start_idx], sub_contents, text[end_idx:]])\n\n # Save this to the preferences file\n with open(filename, 'w') as f:\n f.write(text)\n\n else:\n # If not, combine everything in entries_dict together to single string\n entries_str = '\\n'.join(entries_dict.values())\n\n # Create the string for the new 'Preferences.tps' file\n pref_file = dedent(\"\"\"\n <?xml version='1.0'?>\n <workbook>\n <preferences>\n {0}\n </preferences>\n </workbook>\"\"\").format(entries_str)[1:]\n\n # Create this file\n with open(filename, 'w') as f:\n f.write(pref_file)",
"def show_mappings(project):\n # with BMI(_username, _password, project) as bmi:\n # ret = bmi.show_mounted()\n # if ret[constants.STATUS_CODE_KEY] == 200:\n # table = PrettyTable(field_names=['Target', 'Block Device'])\n # mappings = ret[constants.RETURN_VALUE_KEY]\n # for k, v in mappings.iteritems():\n # table.add_row([k, v])\n # click.echo(table.get_string())\n # else:\n # click.echo(ret[constants.MESSAGE_KEY])\n click.echo(\"Need to Re-Implement\")",
"def write_kpi_indices(dst_file):\n global kpi_list\n with open(dst_file, 'w') as f:\n for kpi in kpi_list:\n f.write(kpi.desc() + '\\n')",
"def extract_dashboards_data(dashboards_dir, dashboards_list, topics_list, mappings_file):\n logging.info('Updating all mappings...')\n\n dashboards = os.listdir(dashboards_dir)\n if not dashboards:\n logging.critical('No dashboards found.')\n exit(1)\n\n # get the list of dashboards to use. If empty, us all\n actual_dashboard_list = []\n if dashboards_list != \"\":\n actual_dashboard_list = dashboards_list.split(\",\")\n\n # get the list of topics to use. If empty, us all\n actual_topics_list = []\n if topics_list != \"\":\n actual_topics_list = topics_list.split(\",\")\n\n extract_dashboard_mapping({\n \"dashboards\": dashboards,\n \"actual_dashboard_list\": actual_dashboard_list,\n \"actual_topics_list\": actual_topics_list,\n \"dashboards_dir\": dashboards_dir,\n \"mappings_file\": mappings_file,\n })\n\n logging.info('Updating all mappings - DONE')\n logging.info('Mappings path: {}'.format(mappings_file))",
"def present_map(cmdb_ci_types, db_ci_types, cmdb_rel_types, db_rel_types, cmdb_ci_attributes, db_ci_attributes, cmdb_rel_attributes, db_rel_attributes, similar_ci, similar_rel, similar_attr_ci, similar_attr_rel):\n print(\"\\n===============================================================================================================================================================================\")\n print(blue + \"CONFIGURATION ITEMS MAPPING\" + reset)\n print(\"===============================================================================================================================================================================\")\n print()\n data = []\n for db_ci in similar_ci:\n cmdb_ci = list(similar_ci[db_ci].keys())[0]\n sim = similar_ci.get(db_ci).get(cmdb_ci)\n row = [cmdb_ci, cmdb_ci_types.get(\n cmdb_ci), db_ci, db_ci_types.get(db_ci), sim]\n data.append(row)\n print(tabulate(data, headers=[\n \"CI in CMDB\", \"Description\", \"CI in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n\n for db_ci in similar_ci:\n data = []\n cmdb_ci = list(similar_ci[db_ci].keys())[0]\n print(\"**************************************************************************************************\")\n print(\n green + str(cmdb_ci) + \" Attributes Mapping\" + reset)\n print(\"**************************************************************************************************\")\n print()\n atrs = similar_attr_ci.get(cmdb_ci)\n if atrs != None:\n for cmdb_at in atrs:\n db_at = list(atrs.get(cmdb_at).keys())[0]\n sim = atrs.get(cmdb_at).get(db_at)\n row = [cmdb_at, cmdb_ci_attributes.get(\n cmdb_ci).get(cmdb_at), db_at, db_ci_attributes.get(db_ci).get(db_at), sim]\n data.append(row)\n print(tabulate(data, headers=[\"Attribute in CMDB\", \"Description\",\n \"Attribute in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n print()\n\n print(\"===============================================================================================================================================================================\")\n print(blue + \"RELATIONSHIPS MAPPING\" + reset)\n print(\"===============================================================================================================================================================================\")\n print()\n\n data = []\n for db_rel in similar_rel:\n cmdb_rel = list(similar_rel[db_rel].keys())[0]\n sim = similar_rel.get(db_rel).get(cmdb_rel)\n row = [cmdb_rel, cmdb_rel_types.get(\n cmdb_rel), db_rel, db_rel_types.get(db_rel), sim]\n data.append(row)\n atrs = similar_attr_rel.get(cmdb_rel)\n print(tabulate(data, headers=[\n \"Relationship in CMDB\", \"Description\", \"Relationship in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n\n for db_rel in similar_rel:\n data = []\n cmdb_rel = list(similar_rel[db_rel].keys())[0]\n print(\"**************************************************************************************************\")\n print(green + str(cmdb_rel) + \" Attributes Mapping\" + reset)\n print(\"**************************************************************************************************\")\n print()\n for cmdb_at in atrs:\n db_at = list(atrs.get(cmdb_at).keys())[0]\n sim = atrs.get(cmdb_at).get(db_at)\n cmdb_at_desc = cmdb_rel_attributes.get(cmdb_rel)\n if cmdb_at_desc != None:\n cmdb_at_desc = cmdb_at_desc.get(cmdb_at)\n db_at_desc = db_rel_attributes.get(db_rel)\n if db_at_desc != None:\n db_at_desc = db_at_desc.get(db_at)\n row = [cmdb_at, cmdb_at_desc, db_at,\n db_at_desc, sim]\n data.append(row)\n print(tabulate(data, headers=[\"Attribute in CMDB\", \"Description\",\n \"Attribute in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()",
"def updateDictFile(self):\n if self.dictFile.vdata.get('version',0): return\n #--Update to version 1\n for name in self.data.keys():\n installer = self.data[name]\n if isinstance(installer,Installer):\n self.data[name] = installer.__copy__(InstallerArchive)\n self.dictFile.vdata['version'] = 1",
"def saveMapping(self, mapfile=\"./mapping.json\"):\n\t\tmappings = {\n\t\t\t\"Sharing\" : self.share_levels,\n\t\t\t\"Type\" : self.type_map,\n\t\t\t\"Extra-Tag\" : self.extra_tag,\n\t\t\t\"Privacy\" : self.privacy_levels\n\t\t}\n\t\ttry:\n\t\t\tfd = open(mapfile, \"w\")\n\t\t\tjson.dump(mappings, fd, sort_keys=True,indent=4,separators=(',', ': '))\n\t\t\tfd.close()\n\t\texcept Exception as e:\n\t\t\tprint(\"IMPOSSIBLE TO SAVE MAPPINGS to %s\" % mapfile)\n\t\t\tprint(e)\n\t\treturn",
"def export_to_json(filename, idmap, missing):\n\n rs = data.RedisSource()\n missing_info = set()\n with open(filename, 'w') as io:\n for screen_name in rs.screen_names:\n info = panelist_info(rs, screen_name, idmap, missing_info)\n if info:\n io.write(info)\n io.write(\"\\n\")\n if missing_info:\n print(\"missing info for {}\".format(len(missing_info)))\n with open(missing, 'w') as io:\n io.write(\"\\n\".join(missing_info))",
"def dashboard(self):\r\n return {}",
"def put_dashboard(self):\n self.sd.putNumber(\"vision_y\", self.range_finder.getDistance())",
"def update_feature(selfs, k, v, cfg_path):\n with open(cfg_path, 'r') as cfg:\n file_dict = yaml.safe_load(cfg)\n # overprint the entries with the new config_dict\n file_dict['{}'.format(k)] = v\n with open(cfg_path, 'w') as w_file:\n w_file.write(yaml.dump(file_dict))",
"def add_descriptors(self, mapping):\n for key, desc in mapping.iteritems():\n self.descriptors[int(key, 16)] = desc",
"def add_descriptors(self, mapping):\n for key, desc in mapping.iteritems():\n self.descriptors[int(key, 16)] = desc",
"def publish(self, kpi_dict):\n pass",
"def update_importation(password: str):\n # Load imported cases\n imp_df = load_dhhs_df(ACQUIRED_OVERSEAS)\n imports_data = {}\n for region in CLUSTER_MAP.keys():\n current_cluster = CLUSTER_MAP[region].lower()\n update_df = imp_df[imp_df.cluster_name == current_cluster]\n region_name = get_region_name(current_cluster)\n imports_data[region_name] = {\n \"description\": f\"Daily imports for {region_name}\",\n \"times\": list(update_df.date_index),\n \"values\": list(update_df.new),\n }\n\n file_path = os.path.join(IMPORT_DIR, \"imports.secret.json\")\n with open(file_path, \"w\") as f:\n json.dump(imports_data, f, indent=2)\n\n secrets.write(file_path, password)",
"def test_dashboards_v2_update(self):\n pass",
"def rebuild_dash_layout_settings_dict(self, email=None):\n if email is None:\n return {}\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n try:\n if user_info.dash_layout_settings:\n lookup_dict = self.build_dict(user_info=user_info)\n values = user_info.dash_layout_settings\n default_nav = [\"app_management\", \"appscale_management\",\n \"debugging_monitoring\"]\n\n nav_list = []\n for key_dict in values.get('nav'):\n for temp_key in key_dict:\n nav_list.append(temp_key)\n\n if set(nav_list) != set(default_nav):\n for key in default_nav:\n if nav_list.count(key) == 0:\n nav_list.append(key)\n\n default_panel = [\"app_console\", \"upload_app\", \"cloud_stats\",\n \"database_stats\", \"memcache_stats\"]\n\n panel_list = []\n for key_dict in values.get('panel'):\n for temp_key in key_dict:\n panel_list.append(temp_key)\n\n if set(panel_list) != set(default_panel):\n for key in default_panel:\n if panel_list.count(key) == 0:\n panel_list.append(key)\n\n values['nav'] = [{key: lookup_dict.get(key)}\n for key in nav_list if key in lookup_dict]\n\n new_panel_vals = []\n for key in panel_list:\n is_admin_panel = lookup_dict.get(key).get('is_admin_panel')\n if key in lookup_dict and (not is_admin_panel or\n is_admin_panel ==\n user_info.is_user_cloud_admin):\n new_panel_vals.append({key: lookup_dict.get(key)})\n\n values['panel'] = new_panel_vals\n user_info.dash_layout_settings = values\n user_info.put()\n return user_info.dash_layout_settings\n else:\n return self.set_dash_layout_settings(user_info=user_info)\n except Exception as err:\n logging.exception(err)\n return self.set_dash_layout_settings(user_info=user_info)\n except Exception as err:\n logging.exception(err)",
"def dashboard_view(self):\n return AttrDict({\n 'file_histogram': [h for h in self.file_histogram.values()],\n 'files': [f for f in self.files.values()],\n 'nodes': [\n {\n \"type\": \"Project\",\n \"count\": 1\n },\n {\n \"type\": \"Subject\",\n \"count\": len(self.subjects)\n },\n {\n \"type\": \"Samples\",\n # samples is a dict keyed by subject id, sum the len of each subject's sample list\n \"count\": sum([len(sl) for sl in list(self.samples.values())])\n },\n ],\n 'size': sum([f['size']for f in self.files.values()]),\n 'project_id': self.name,\n 'public': self.attributes['public'],\n 'createdDate': self.attributes.workspace.createdDate,\n 'lastModified': self.attributes.workspace.lastModified,\n 'data_type': self.data_type,\n 'data_category': self.data_category,\n 'problems': self.problems\n })",
"def create_map(\n datapointsPath: Union[Path, str],\n linksPath: Union[Path, str],\n datapointAttrPath: Union[Path, str],\n node_attr_map: Dict[str, str],\n link_attr_map: Dict[str, str],\n snapshots: List[Dict] = [],\n playerSettings: Dict[str, Any] = {},\n outFolder: Union[Path, str] = \"data_out\",\n):\n\n # create folders and copy the index file\n print(f\">> creating folders\")\n out_dir = Path(outFolder)\n out_data_path = out_dir / \"data\"\n if not out_data_path.exists():\n print(f\"\\t- new folder - {out_data_path}\")\n out_data_path.mkdir(parents=True, exist_ok=True)\n else:\n print(f\"\\t- found existing. overwriting - {out_data_path}\")\n\n # copy the index and run scripts to out directory\n shutil.copy(\"src/index.html\", out_dir)\n print(f\"\\t- copied {out_dir}/index.html\")\n\n shutil.copy(\"src/run_local.sh\", out_dir)\n print(f\"\\t- copied {out_dir}/run_local.sh\\n\")\n\n # write the files\n print(f\">> building dataset\")\n __write_dataset_file(datapointsPath, datapointAttrPath, out_data_path)\n print(f\"\\t- new dataset file written to {out_data_path / 'nodes.json'}.\\n\")\n\n print(f\">> building network\")\n __write_network_file(datapointsPath, linksPath, node_attr_map, link_attr_map, out_data_path)\n print(f\"\\t- new network file written to {out_data_path / 'links.json'}.\\n\")\n\n print(f\">> building settings\")\n __write_settings_file(snapshots, playerSettings, out_data_path)\n print(f\"\\t- new settings file written to {out_data_path / 'settings.json'}.\\n\")",
"def map(item):\n user_services.update_dashboard_stats_log(item.id)",
"def get_aui_sdui_mapping(input_file=None):\n input_file = os.path.join(DATA_DIR, \"umls\", \"MRCONSO.RRF\") if input_file is None else input_file\n mapping = {}\n with open(input_file, 'r') as f:\n for line in f:\n line_array = line.split(\"|\")\n if line_array[MRCONSO_SAB_INDEX] == 'MSH' and line_array[MRCONSO_SDUI_INDEX].strip() != \"\":\n mapping[line_array[MRCONSO_AUI_INDEX]] = line_array[MRCONSO_SDUI_INDEX]\n return mapping",
"def write_map(self, file_name):\n\n if self.pixel == \"HEALPIX\":\n hp.fitsfunc.write_map(file_name, self.data, overwrite=True)\n if self.pixel == \"CAR\":\n enmap.write_map(file_name, self.data)",
"def set_dev_hash(self,dev_hash):\r\n self.devHash = dev_hash\r\n self.names = ['project 0','project 1']\r\n self.proj0_dev = {}\r\n self.proj1_dev = {}\r\n self.proj0_total = 0\r\n self.proj1_total = 0\r\n print dev_hash\r\n\r\n for k,v in self.devHash.iteritems():\r\n dev0,dev1 = k\r\n port = int(v)\r\n self.proj0_total += port\r\n self.proj1_total += port\r\n if self.proj0_dev.has_key(dev0) == 0:\r\n self.proj0_dev[dev0] = 0\r\n self.proj0_dev[dev0] += port\r\n if self.proj1_dev.has_key(dev1) == 0:\r\n self.proj1_dev[dev1] = 0\r\n self.proj1_dev[dev1] += port\r\n self.data = []\r\n self.label = []\r\n for proj in (self.proj0_dev,self.proj1_dev):\r\n print proj\r\n data = []\r\n label = []\r\n for k,v in proj.iteritems():\r\n port = float(proj[k])\r\n pcent_port = (port * 100)/self.proj0_total\r\n data.append(pcent_port)\r\n label.append(k)\r\n self.data.append(data)\r\n self.label.append(label)\r\n\r\n print self.data\r\n print self.label",
"def associate_files(self):\n self.MatlabFiles = {'defaults': os.path.join(self.ParentDir,'defaults.m'),\n 'avevel': os.path.join(self.OutDir, 'pix2avevel.mat'),\n 'cumdef': os.path.join(self.OutDir, 'pix2cumdef.mat'),\n 'variance': os.path.join(self.OutDir, 'vaiance.mat')}",
"def _browse_to_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n filein = filedialog.askopenfilename(\\\n filetypes=[('Map Creator Dictionary', '.xml'), ], \\\n initialdir=self.MapCreator, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['Map Creator Dictionary'], \\\n defaultextension='.xml')\n self.dict_in.set(filein)\n if self.ddnCurProject.get() \\\n and self.dict_in.get() and self.terms_in.get():\n self.btnSaveProject['state'] = 'normal'\n pass",
"def set_map_keys(self):\n self.map_keys.clear()\n # The key to use with 'img_dict', the name of the current dataset.\n plot_item = self._get_current_plot_item()\n keys_unsorted = list(self.io_model.img_dict[plot_item].keys())\n if len(keys_unsorted) != len(set(keys_unsorted)):\n logger.warning(\n f\"DrawImageAdvanced:set_map_keys(): repeated keys in the dictionary 'img_dict': {keys_unsorted}\"\n )\n keys_elines, keys_scalers = [], []\n for key in keys_unsorted:\n if check_if_eline_supported(key): # Check if 'key' is an emission line (such as \"Ca_K\")\n keys_elines.append(key)\n else:\n keys_scalers.append(key)\n keys_elines.sort()\n keys_scalers.sort()\n self.map_keys = keys_elines + keys_scalers"
]
| [
"0.53087455",
"0.5226565",
"0.521083",
"0.5169348",
"0.5110115",
"0.51062304",
"0.50751776",
"0.5055464",
"0.48677987",
"0.4862654",
"0.48615238",
"0.48604602",
"0.48286825",
"0.47872168",
"0.4719879",
"0.47133425",
"0.47133425",
"0.4647157",
"0.46456125",
"0.46437544",
"0.46169597",
"0.45893678",
"0.4581011",
"0.45780003",
"0.45662674",
"0.45551312",
"0.45456868",
"0.45361918",
"0.45349094",
"0.45319065"
]
| 0.66152465 | 0 |
Adds an import and export function to a class under the name export_data and import_data. This is currently unused and maybe should be removed. The original intention was to be able to add methods to allow json compatible output dictionaries to external functions. | def add_base_class(
existing_object: Any,
import_method: Callable[[Any], Any],
export_method: Callable[[Any], Any],
):
existing_object.export_data = types.MethodType(export_method, existing_object)
existing_object.import_data = types.MethodType(import_method, existing_object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_(self, exported, update=False):\n for path in exported:\n kv = exported[path]\n fn = self.update if update else self.write\n fn(path, **kv)",
"def exports():",
"def export(self, class_name, method_name, export_data=False,\n export_dir='.', export_filename='data.json',\n export_append_checksum=False, **kwargs):\n # Arguments:\n self.class_name = class_name\n self.method_name = method_name\n\n # Templates of primitive data types:\n temp_type = self.temp('type')\n temp_arr = self.temp('arr')\n temp_arr_ = self.temp('arr[]')\n temp_arr__ = self.temp('arr[][]')\n\n # Estimator:\n est = self.estimator\n self.params = est.get_params()\n\n # Check kernel type:\n supported_kernels = ['linear', 'rbf', 'poly', 'sigmoid']\n if self.params['kernel'] not in supported_kernels:\n msg = 'The kernel type is not supported.'\n raise ValueError(msg)\n\n self.n_features = len(est.support_vectors_[0])\n self.svs_rows = est.n_support_\n self.n_svs_rows = len(est.n_support_)\n\n self.weights = self.temp('arr[]', skipping=True).format(\n type='int', name='weights', values=', '.join([str(e) for e in\n self.svs_rows]),\n n=len(self.svs_rows))\n self.n_weights = len(self.svs_rows)\n\n self.n_classes = len(est.classes_)\n self.is_binary = self.n_classes == 2\n self.prefix = 'binary' if self.is_binary else 'multi'\n\n # Support vectors:\n vectors = []\n for vector in est.support_vectors_:\n _vectors = [temp_type.format(self.repr(v)) for v in vector]\n _vectors = temp_arr.format(', '.join(_vectors))\n vectors.append(_vectors)\n vectors = ', '.join(vectors)\n vectors = self.temp('arr[][]', skipping=True).format(\n type='double', name='vectors', values=vectors,\n n=len(est.support_vectors_), m=len(est.support_vectors_[0]))\n self.vectors = vectors\n self.n_vectors = len(est.support_vectors_)\n\n # Coefficients:\n coeffs = []\n for coeff in est.dual_coef_:\n _coeffs = [temp_type.format(self.repr(c)) for c in coeff]\n _coeffs = temp_arr.format(', '.join(_coeffs))\n coeffs.append(_coeffs)\n coeffs = ', '.join(coeffs)\n coeffs = temp_arr__.format(type='double', name='coefficients',\n values=coeffs, n=len(est.dual_coef_),\n m=len(est.dual_coef_[0]))\n self.coefficients = coeffs\n self.n_coefficients = len(est.dual_coef_)\n\n # Interceptions:\n inters = [temp_type.format(self.repr(i)) for i in est._intercept_]\n inters = ', '.join(inters)\n inters = temp_arr_.format(type='double', name='intercepts',\n values=inters, n=len(est._intercept_))\n self.intercepts = inters\n self.n_intercepts = len(est._intercept_)\n\n # Kernel:\n self.kernel = str(self.params['kernel'])\n if self.target_language == 'c':\n self.kernel = self.kernel[0]\n\n # Gamma:\n self.gamma = self.params['gamma']\n if self.gamma == 'auto':\n self.gamma = 1. / self.n_features\n self.gamma = self.repr(self.gamma)\n\n # Coefficient and degree:\n self.coef0 = self.repr(self.params['coef0'])\n self.degree = self.repr(self.params['degree'])\n\n if self.target_method == 'predict':\n # Exported:\n if export_data and os.path.isdir(export_dir):\n self.export_data(export_dir, export_filename,\n export_append_checksum)\n return self.predict('exported')\n # Separated:\n return self.predict('separated')",
"def importer():\n pass",
"def addfunctions2new(abunch, key):\n snames = [\n \"BuildingSurface:Detailed\",\n \"Wall:Detailed\",\n \"RoofCeiling:Detailed\",\n \"Floor:Detailed\",\n \"FenestrationSurface:Detailed\",\n \"Shading:Site:Detailed\",\n \"Shading:Building:Detailed\",\n \"Shading:Zone:Detailed\",\n ]\n snames = [sname.upper() for sname in snames]\n if key in snames:\n func_dict = {\n \"area\": fh.area,\n \"height\": fh.height, # not working correctly\n \"width\": fh.width, # not working correctly\n \"azimuth\": fh.azimuth,\n \"tilt\": fh.tilt,\n \"coords\": fh.getcoords, # needed for debugging\n }\n try:\n abunch.__functions.update(func_dict)\n except KeyError as e:\n abunch.__functions = func_dict\n return abunch",
"def assign_functions(self):\n # get function declarations from json string\n self.functions = self.definitions.get(\"functions\", [])\n\n # generate function declaration in header file\n header = cls.header_from_function_name_and_args(\n _func[\"name\"], _func[\"args\"]\n )\n\n _functions = OrderedDict()\n for func in self.functions:\n _name = func[\"name\"]\n _type = func[\"type\"]\n _args = func[\"args\"]\n _deriv = self.get_derivatives(func.get(\"deriv\", []))\n _functions[_name] = {\n \"name\": _name,\n \"type\": _type,\n \"args\": _args,\n \"deriv\": _deriv,\n }\n self._functions = _functions",
"def export():\n def wrap(func, *args, **kwargs):\n func._rpcserver_export = True\n doc = func.__doc__\n func.__doc__ = \"**RPC Exported Function**\"\n if doc:\n func.__doc__ += doc\n\n return func\n return wrap",
"def __init__(self, export_tuples=None):\n self._exports = export_tuples if export_tuples else []",
"def add_function_to_import(self, dll_import_descriptor, dll_name, fn_name):\n\n # TODO : Currently, only the functions in the list are supported.\n ordinal = self.get_ordinal_from_common_library(dll_name, fn_name)\n if ordinal == 0:\n print(\"not supported yet.\")\n exit()\n\n ordinal += self._ORDINAL_MASK_\n thunk = self.pe_manager.gen_new_thunk(ordinal)\n last_import_thunk_offset = self.get_last_import_thunk_offset()\n print(\"IMPORT THUNK OFFSET : {:x}\".format(last_import_thunk_offset))\n print(\"IMPORT THUNK RVA : {:x}\".format(\n self.PE.get_rva_from_offset(last_import_thunk_offset)\n ))\n thunk.set_file_offset(last_import_thunk_offset + 4)\n self.append_import_thunk_to_descriptor(dll_import_descriptor, thunk)",
"def test_class_module_with_exports():\n \n class Derived(Module):\n \n \"\"\" I heard you like docstrings \"\"\"\n \n yo = 'dogg'\n \n @export\n def yodogg(self):\n return \"I heard you like\"\n \n @export\n def nodogg(self):\n return None\n \n export(yo, name='yo')\n \n from clu.app import Derived as derived\n \n assert isinstance(derived, Module)\n assert derived.yo == 'dogg'\n assert derived.yodogg() == \"I heard you like\"\n assert derived.nodogg() is None\n \n for attname in dir(derived):\n assert hasattr(derived, attname)\n \n print(\"dir(derived):\")\n pprint(dir(derived))\n print()\n \n print(\"derived.exporter.exports():\")\n pprint(derived.exporter.exports())\n print()\n \n assert type(derived.exporter).__name__ == 'Exporter'",
"def export(self, class_name, method_name, export_data=False,\n export_dir='.', export_filename='data.json',\n export_append_checksum=False, embed_data=False, **kwargs):\n # Arguments:\n self.class_name = class_name\n self.method_name = method_name\n\n # Estimator:\n est = self.estimator\n\n self.n_features = est.n_features_\n self.n_classes = len(self.estimator.tree_.value.tolist()[0][0])\n\n temp_arr_scope = self.temp('arr_scope')\n temp_arr_ = self.temp('arr[]')\n temp_arr__ = self.temp('arr[][]')\n\n left_childs = [str(e) for e in self.estimator.tree_.children_left]\n left_childs = temp_arr_.format(type='int', name='lChilds',\n values=', '.join(left_childs),\n n=len(left_childs))\n self.left_childs = left_childs\n\n right_childs = [str(e) for e in self.estimator.tree_.children_right]\n right_childs = temp_arr_.format(type='int', name='rChilds',\n values=', '.join(right_childs),\n n=len(right_childs))\n self.right_childs = right_childs\n\n thresholds = [self.repr(e) for e in\n self.estimator.tree_.threshold.tolist()]\n type_ = 'float64' if self.target_language == 'go' else 'double'\n thresholds = temp_arr_.format(type=type_, name='thresholds',\n values=', '.join(thresholds),\n n=len(thresholds))\n self.thresholds = thresholds\n\n indices = [str(e) for e in self.estimator.tree_.feature]\n indices = temp_arr_.format(type='int', name='indices',\n values=', '.join(indices), n=len(indices))\n self.indices = indices\n\n classes = self.estimator.tree_.value.tolist()\n n = len(classes)\n m = self.n_classes\n classes = [', '.join([str(int(x)) for x in e[0]]) for e in classes]\n classes = ', '.join([temp_arr_scope.format(v) for v in classes])\n classes = temp_arr__.format(type='int', name='classes', values=classes,\n n=n, m=m)\n self.classes = classes\n\n if self.target_method == 'predict':\n # Exported:\n if export_data and os.path.isdir(export_dir):\n self.export_data(export_dir, export_filename,\n export_append_checksum)\n return self.predict('exported')\n # Embedded:\n if embed_data:\n return self.predict('embedded')\n # Separated:\n return self.predict('separated')",
"def main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input .py file', nargs='+')\n args = parser.parse_args()\n\n mod_func = []\n\n for pyfile in args.input:\n tree = ast.parse(open(pyfile).read())\n\n methods = sorted({node.name for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)})\n mod_func.extend([[pyfile, methods[i]] for i in range(len(methods))])\n\n write_csv(\"meth_func.csv\", mod_func)",
"def export_type(cls):\n cls.export_in_rule_data = True\n return cls",
"def import_and_add(self, import_str):\n # loaded_classes.clear()\n\n try:\n import_module(import_str)\n except ImportError as e:\n traceback.print_exc()\n logger.warning(\"Tried to import `%s` and failed, ignoring\", import_str)\n logger.warning(\"Error: %s\", e)\n # else:\n # for k in loaded_classes:\n # if k.__module__.startswith(\"dataclay\"):\n # # dataClay contrib classes should not be registered here\n # continue\n # else:\n # self.add_class(k)",
"def _get_data_preprocessing_fns(self):\n # Create new functions with partial positional arguments assigned\n process_path_fn = \\\n partial(data_preprocessing.process_path,\n one_hot=self.ONE_HOT,\n num_classes=self._NUM_CLASSES,\n class_names=self._CLASS_NAMES)\n process_img_path_fn = data_preprocessing.process_img_path\n convert_format_fn = \\\n partial(data_preprocessing.convert_format,\n grayscale_in=self._GRAYSCALE_IN,\n grayscale_out=self._GRAYSCALE_OUT)\n random_rotate_fn = \\\n partial(data_preprocessing.random_rotate,\n stddev=self._ROTATE_STDDEV)\n random_zoom_fn = \\\n partial(data_preprocessing.random_zoom,\n max_percent=self._ZOOM_MAX_PERCENT,\n stddev=self._ZOOM_STDDEV,\n img_height=self._HEIGHT,\n img_width=self._WIDTH)\n resize_fn = \\\n partial(data_preprocessing.resize,\n height=self._HEIGHT,\n width=self._WIDTH)\n\n funcs = edict({'process_path': process_path_fn,\n 'process_img_path': process_img_path_fn,\n 'convert_format': convert_format_fn,\n 'random_rotate': random_rotate_fn,\n 'random_zoom': random_zoom_fn,\n 'resize': resize_fn})\n\n return funcs",
"def addfunctions(dtls, bunchdt):\n snames = [\n \"BuildingSurface:Detailed\",\n \"Wall:Detailed\",\n \"RoofCeiling:Detailed\",\n \"Floor:Detailed\",\n \"FenestrationSurface:Detailed\",\n \"Shading:Site:Detailed\",\n \"Shading:Building:Detailed\",\n \"Shading:Zone:Detailed\",\n ]\n for sname in snames:\n if sname.upper() in bunchdt:\n surfaces = bunchdt[sname.upper()]\n for surface in surfaces:\n func_dict = {\n \"area\": fh.area,\n \"height\": fh.height, # not working correctly\n \"width\": fh.width, # not working correctly\n \"azimuth\": fh.azimuth,\n \"tilt\": fh.tilt,\n \"coords\": fh.getcoords, # needed for debugging\n }\n try:\n surface.__functions.update(func_dict)\n except KeyError as e:\n surface.__functions = func_dict\n # add common functions\n # for name in dtls:\n # for idfobject in bunchdt[name]:\n # idfobject.__functions\n # idfobject['__functions']['fieldnames'] = fieldnames\n # idfobject['__functions']['fieldvalues'] = fieldvalues\n # idfobject['__functions']['getrange'] = GetRange(idfobject)\n # idfobject['__functions']['checkrange'] = CheckRange(idfobject)",
"def import_(cls, data_id, type_,*args, **kwargs):\n data_path = cls.my_data_path(data_id)\n config = cls.generate_config(data_path,*args,**kwargs)\n task_id = Task.register(Task.TYPE.DATA_IMPORT)\n DataImport(data_id= data_id, task_id= task_id, type_l= type_, config= config).save()\n Task.fire(task_id)",
"def export(self, class_name, method_name,\n export_data=False, export_dir='.', export_filename='data.json',\n export_append_checksum=False, embed_data=True, **kwargs):\n # Arguments:\n self.class_name = class_name\n self.method_name = method_name\n\n # Estimator:\n est = self.estimator\n\n self.estimators = [est.estimators_[idx] for idx\n in range(est.n_estimators)]\n self.n_estimators = len(self.estimators)\n self.n_features = est.estimators_[0].n_features_\n self.n_classes = est.n_classes_\n\n if self.target_method == 'predict':\n # Exported:\n if export_data and os.path.isdir(export_dir):\n self.export_data(export_dir, export_filename,\n export_append_checksum)\n return self.predict('exported')\n # Embedded:\n return self.predict('embedded')",
"def __init__(self):\n self.functions = {}",
"def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()",
"def import_object(import_str, *args, **kwargs):\r\n return import_class(import_str)(*args, **kwargs)",
"def _add_function(self, alias, func):\n # Construct a function that will call the user supplied function with\n # the proper arguments. We prepend 'self' so the user supplied function\n # has easy access to all the filepaths.\n def fname(**kwargs):\n return func(self, **kwargs)\n\n # Bind the fname function to this instance of FileNames\n self.__dict__[alias] = fname",
"def from_function(cls, py_func, py_file):\n raise NotImplementedError",
"def from_function(cls, py_func, py_file):\n raise NotImplementedError",
"def importObject(importStr, *args, **kwargs):\n return importClass(importStr)(*args, **kwargs)",
"def addTransform(self,input,output,func):\n import inspect\n from collections import defaultdict\n\n try:\n args, varargs, varkw, defaults = inspect.getargspec(func)\n if len(args)-1 > len(defaults) or varkw:\n raise TypeError('input function must take one argument')\n except TypeError:\n raise TypeError('input func is not a callable')\n\n #make sure we are in the instance\n if '_inputtransforms' not in self.__dict__:\n dd = defaultdict(dict)\n dd.update(self._inputtransforms)\n self._inputtransforms = dd\n\n dd[input][output] = func",
"def __init__(self, func, dfunc, name=\"\"):\n self.name = name\n self.func = func\n self.dfunc = dfunc",
"def __create_custom_objects():\n # make some preparation to properly load objects from keras_contribute\n instance_holder = {\"instance\": None}\n\n class ClassWrapper(CRF):\n def __init__(self, *args, **kwargs):\n instance_holder[\"instance\"] = self\n super(ClassWrapper, self).__init__(*args, **kwargs)\n\n def loss(*args):\n method = getattr(instance_holder[\"instance\"], \"loss_function\")\n return method(*args)\n\n def accuracy(*args):\n method = getattr(instance_holder[\"instance\"], \"accuracy\")\n return method(*args)\n\n return {\"ClassWrapper\": ClassWrapper, \"CRF\": ClassWrapper, \"crf_loss\": loss,\n \"crf_viterbi_accuracy\": accuracy}",
"def collect_functions(self):\n if not self.functions:\n for item in dir(self.file_import):\n new_function = getattr(self.file_import, item)\n # if it is a YMLMetadataCollector wrapper, add it to the list.\n if (\n callable(new_function)\n and isinstance(new_function, FunctionType)\n and \"YMLMetadataCollector\" in repr(new_function)\n ):\n self.functions.append(new_function)",
"def export(exp_data: ExportData) -> None:\n pass"
]
| [
"0.5648476",
"0.5640173",
"0.5598629",
"0.55643046",
"0.5322802",
"0.53223747",
"0.52671325",
"0.52416205",
"0.5182817",
"0.5167931",
"0.513596",
"0.5119927",
"0.5087243",
"0.5070539",
"0.50628704",
"0.5059321",
"0.5019335",
"0.5014842",
"0.5010461",
"0.5010175",
"0.49909532",
"0.49739712",
"0.49634343",
"0.49634343",
"0.49565196",
"0.49545595",
"0.4953617",
"0.494876",
"0.4933229",
"0.4905621"
]
| 0.6563669 | 0 |
A property for the location of any parameter files that can be used to build the skeleton of the bill of materials. Returns list A list of locations of the .json that make up the paramaters to be assembled into the skeleton. Raises ConfigurationNotFullyPopulated If the property is called but the configuration has not been populated and has the property is None. | def parameters(cls) -> list:
if cls._parameters is None:
msg = (
"location of any files which contain json parameters "
"to be assembled required as a list of strings"
)
run_log.error(msg)
raise ConfigurationNotFullyPopulated(msg)
return cls._parameters | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parts(cls) -> list:\n if cls._parts is None:\n msg = (\n \"location of any files which contain json parts\"\n \" to be assembled required as a list of strings\"\n )\n run_log.error(msg)\n raise ConfigurationNotFullyPopulated(msg)\n return cls._parts",
"def parameters(config=\"phi_config.json\"):\n\n with open(config) as config_file:\n _parameters = json.load(config_file)\n\n # TODO: This could be simplified. Refactor where commented.\n _path_list = []\n for _file in _parameters[\"files\"]:\n\n # Create a path to the file for easier reading later.\n _file_path = \"{0}{1}\".format(_parameters[\"csv_path\"], _file[\"name\"])\n\n # Deal with the potential \"variables\" list\n _variables = _file[\"variables\"] if _file.get(\"variables\") else []\n\n # Append a tuple of these to the _path_list\n _path_list.append(tuple([_file_path, _variables]))\n\n _parameters[\"paths\"] = _path_list\n\n # Similar process for the target values.\n _file_path = \"{0}{1}\".format(_parameters[\"csv_path\"], _parameters[\"target\"][\"name\"])\n _variables = (\n _parameters[\"target\"][\"variables\"]\n if _parameters[\"target\"].get(\"variables\")\n else []\n )\n\n _parameters[\"target\"] = tuple([_file_path, _variables])\n\n return _parameters",
"def _populate_params(self):\n self.params = []\n for root, dirs, files in os.walk(os.curdir):\n for file in files:\n fullfile = str(os.path.join(root, file))\n if self.config.regex_find_params.match(fullfile):\n self.params.append(fullfile)",
"def parameters_path(self):\n return self._parameters_path",
"def conf_load_skeleton_locations(fin,skeleton):\n action = conf_load_skeleton_locations_specification(fin)\n if action == 'DEFAULT':\n locations = REAL\n elif action == 'FILE':\n locations = conf_load_senzory_locations_file(fin)\n elif action == '+':\n locations = conf_load_senzory_locations_in(fin)\n else:\n raise EnvironmentError(\n \"Unknown action option for LOCATIONS. Expected one of (DEFAULT|FILE|+)\"\n )\n skeleton.locations = locations",
"def __read_job_params_file__(self):\n # | - __read_job_params_file__\n job_params = {}\n\n # file_path = self.full_path + \"/\" + \"job_parameters.json\"\n\n file_exists = False\n\n file_path = os.path.join(\n self.full_path,\n \"job_parameters.json\")\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n ind_i = self.full_path.rfind(self.full_path.split(\"/\")[-1])\n path_i_rt = self.full_path[:ind_i - 1]\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_parameters.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_params.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n if not file_exists:\n print(\"No job_params file found for following job:\")\n print(self.full_path)\n\n return(job_params)\n # __|",
"def GetParameters(cls):\n return [\n ParameterDefinition('csvList', 'IN', description='list of all csv trace files'),\n ParameterDefinition('logoPath', 'IN', description='path to the logo image file to be included as watermark (optional)'),\n ]",
"def paths(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('paths')",
"def properties_path(self):\n return self.path / 'properties.json'",
"def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()",
"def _get_params_filepath(self):\n\t\treturn os.path.join(self.workdir, \"params.txt\")",
"def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret",
"def load_parameters(self):\n json_data = open(\"param.json\")\n data = json.load(json_data)\n self.items = data[\"items\"]\n self.pollInterval = self.items[0]['poll_interval']",
"def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)",
"def init_configs(self):\n\n # get current location\n self.script_dir = os.path.dirname(__file__)\n\n # load configuration file\n with open(os.path.join(self.script_dir, \"config.json\")) as f:\n self.configs = json.load(f)\n \n # load some configs as attributes\n self.resource_folder = os.path.join(self.script_dir, self.configs[\"resource_path\"], self.resource_type, self.language)\n self.pre_processed_folder = os.path.join(self.resource_folder, self.configs[\"pre_processed_path\"])\n self.results_folder = os.path.join(self.resource_folder, self.configs[\"results_path\"])\n self.chunk_size = self.configs[\"resources\"][self.resource_type][\"chunk_size\"]",
"def paths(self):\n return self._swagger",
"def _get_path_parameters(self) -> Generator[Tuple[str, Type], None, None]:",
"def search_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"search_paths\")",
"def search_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"search_paths\")",
"def search_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"search_paths\")",
"def parameters(self) -> dict:\n return self._config.get('parameters', dict())",
"def parameters(self) -> Optional[Sequence['outputs.ImageRecipeComponentParameter']]:\n return pulumi.get(self, \"parameters\")",
"def getLocalParameters():\n try:\n config_file = open(\"./meta-files/parameters.yml\")\n params = yaml.load(config_file, Loader=yaml.FullLoader)\n return params\n except:\n raise ValueError(\"Unable to read or parse the system's parameters file\")",
"def magic_config(self,parameter_s=''):\n \n page('Current configuration structure:\\n'+\n pformat(self.rc.dict()))",
"def config_bundle(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"config_bundle\")",
"def _iter_configurations() -> Iterable[pathlib.Path]:\n for ext in CONFIGURATION_FILE_FORMATS:\n yield from HERE.rglob(f\"*{ext}\")",
"def config_bundle(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"config_bundle\")",
"def _get_parameters(self) -> list:\n return self.parameters",
"def get_config_path(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetConfigPath', self.handle)",
"def params(self):\n return self.event.get('pathParameters', dict())"
]
| [
"0.67385864",
"0.55904484",
"0.55675447",
"0.5486506",
"0.5365225",
"0.53188455",
"0.52505314",
"0.5213413",
"0.5184519",
"0.5162303",
"0.51546144",
"0.5019257",
"0.5015219",
"0.5014257",
"0.50114226",
"0.4997435",
"0.4982885",
"0.49784917",
"0.49784917",
"0.49784917",
"0.49560684",
"0.4945923",
"0.49442828",
"0.4941023",
"0.49332348",
"0.48956895",
"0.48947993",
"0.48629472",
"0.48373988",
"0.48295555"
]
| 0.73307854 | 0 |
A property for the locations of the .json that will be loaded into the transaltor. Returns list Location of translation file. Raises ConfigurationNotFullyPopulated If the property is called but the configuration has not been populated and has the property is None. | def translations(cls) -> list:
if cls._translations is None:
msg = (
"translation location not defined, the file location"
"required as a list of strings"
)
run_log.error(msg)
raise ConfigurationNotFullyPopulated(msg)
return cls._translations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def locations(self):\n return self.data.get(\"locations\", [])",
"def parts(cls) -> list:\n if cls._parts is None:\n msg = (\n \"location of any files which contain json parts\"\n \" to be assembled required as a list of strings\"\n )\n run_log.error(msg)\n raise ConfigurationNotFullyPopulated(msg)\n return cls._parts",
"def get_locations():\n return STATUS['locations']",
"def locations(self) -> Optional['outputs.MultiRegionSettingsResponse']:\n return pulumi.get(self, \"locations\")",
"def locations(self) -> Optional[Sequence['outputs.GetTrafficPolicyDocumentRuleLocationResult']]:\n return pulumi.get(self, \"locations\")",
"def locations(self):\r\n return resource.Location(self)",
"def parameters(cls) -> list:\n if cls._parameters is None:\n msg = (\n \"location of any files which contain json parameters \"\n \"to be assembled required as a list of strings\"\n )\n run_log.error(msg)\n raise ConfigurationNotFullyPopulated(msg)\n return cls._parameters",
"def read_locations():\n r = open(\"resources/files/locations.txt\", \"r\", newline=\"\\n\")\n locations = r.read().split(\"\\n\")\n return locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self.__locations",
"def locations(self):\n return self._locations",
"def location(self):\n return self.properties.get(\"location\", Location())",
"def load_config(self):\n\n try:\n file = open(self.cfg_file_name, 'r')\n str_data = file.read()\n except OSError as err:\n print(\"can't load property: {0}\".format(err))\n return None #wx.DefaultPosition\n else:\n file.close()\n #print(\"position loaded\")\n data = json.loads(str_data)\n return data #wx.Point(*position)",
"def properties_path(self):\n return self.path / 'properties.json'",
"def paths(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('paths')",
"def locations(self):\n if \"locations\" in self._prop_dict:\n return LocationsCollectionPage(self._prop_dict[\"locations\"])\n else:\n return None",
"def get_locations(self):\n try:\n output_json = {}\n total_locations = list(self.mongo_db_object.find_all(AppConfigurations.MONGO_DATABASE,\n AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))\n output_json = total_locations\n return AppConstants.result_success_template(output_json)\n\n except Exception as e:\n print(\"Error while fetching the Location Data.\", str(e))",
"def get_locations(self) -> list:\n return self.client.locations.get_all()",
"def storage_locations(self) -> Sequence[str]:\n return pulumi.get(self, \"storage_locations\")",
"def location(self):\r\n return self._get('location', {})",
"def locations(self):\n return [part.file for part in self.iterParts() if part]",
"def locations(self):\n return [part.file for part in self.iterParts() if part]",
"def locations(self):\n return [part.file for part in self.iterParts() if part]",
"def getLocationContents(self, locs, assemblyLevel=False, locContents=None):\n # Why isn't locContents an attribute of reactor? It could be another\n # property that is generated on demand\n if not locContents:\n locContents = self.makeLocationLookup(assemblyLevel)\n try:\n # now look 'em up\n return [locContents[str(loc)] for loc in locs]\n except KeyError as e:\n raise KeyError(\"There is nothing in core location {0}.\".format(e))",
"def location(self):\n if \"location\" in self._prop_dict:\n if isinstance(self._prop_dict[\"location\"], OneDriveObjectBase):\n return self._prop_dict[\"location\"]\n else :\n self._prop_dict[\"location\"] = Location(self._prop_dict[\"location\"])\n return self._prop_dict[\"location\"]\n\n return None"
]
| [
"0.6489377",
"0.6273797",
"0.61873615",
"0.61667126",
"0.607361",
"0.5990457",
"0.5929629",
"0.58884853",
"0.58612424",
"0.58612424",
"0.58612424",
"0.58612424",
"0.58612424",
"0.58612424",
"0.58612424",
"0.57474357",
"0.57359576",
"0.57176447",
"0.57171977",
"0.5691996",
"0.5639514",
"0.5525508",
"0.55096304",
"0.5498556",
"0.5474609",
"0.53917795",
"0.53917795",
"0.53917795",
"0.5346398",
"0.5334434"
]
| 0.6935578 | 0 |
The parts section of the configuration are used by the parsers to contain information about parts to be parsed to form the skeleton (dictionary form) of a Bill of Materials. Returns list The object assigned to parts. Raises ConfigurationNotFullyPopulated If the property is called but the configuration has not been populated and has the property is None. | def parts(cls) -> list:
if cls._parts is None:
msg = (
"location of any files which contain json parts"
" to be assembled required as a list of strings"
)
run_log.error(msg)
raise ConfigurationNotFullyPopulated(msg)
return cls._parts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config(self):\n package = self.package\n if not hasattr(self, '_partconfig'):\n self._partconfig = {}\n\n if package not in self._partconfig:\n self._partconfig[package] = Parts(package, *self.parts)\n return self._partconfig[package]",
"def get_parts(self):\n\n self.parts = []\n\n # User passes a single SupplierPart ID\n if 'part' in self.request.GET:\n try:\n self.parts.append(SupplierPart.objects.get(pk=self.request.GET.get('part')))\n except (ValueError, SupplierPart.DoesNotExist):\n pass\n\n elif 'parts[]' in self.request.GET:\n\n part_id_list = self.request.GET.getlist('parts[]')\n\n self.parts = SupplierPart.objects.filter(id__in=part_id_list)",
"def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]",
"def get_pieces(self) -> list:\r\n return self.pieces",
"def get_assessment_parts(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AssessmentPartList(self._results, runtime=self._runtime)",
"def test_get_parts(self):\n pass",
"def update_parts():\n syt.log_info(\"$$$ Get Rebrickable Part info\")\n part_list = [x[0] for x in reapi.pull_all_pieces()] # ['piece_id', 'descr', 'category')\n part_list.pop(0) # Remove the header\n secondary_parts.add_parts_to_database(part_list, type=\"re\")\n # Todo: need to create a scraper for rebrickable piece num information\n syt.log_info(\"%%% Rebrickable Part info added to parts table\")",
"def get_all_parts(self, max_parts=None, part_number_marker=None):\r\n self._parts = []\r\n query_args = 'uploadId=%s' % self.id\r\n if max_parts:\r\n query_args += '&max_parts=%d' % max_parts\r\n if part_number_marker:\r\n query_args += '&part-number-marker=%s' % part_number_marker\r\n response = self.bucket.connection.make_request('GET', self.bucket.name,\r\n self.key_name,\r\n query_args=query_args)\r\n body = response.read()\r\n if response.status == 200:\r\n h = handler.XmlHandler(self, self)\r\n xml.sax.parseString(body, h)\r\n return self._parts",
"def _get_wells(self):\n wells = []\n for well in self.plate_meta['wells']:\n wells.append(well['path'])\n self.wells = wells",
"def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])",
"def list_parts(Bucket=None, Key=None, MaxParts=None, PartNumberMarker=None, UploadId=None, RequestPayer=None):\n pass",
"def getPieces( self ):\n return self.__pieces;",
"def usableparts(self):\n # First get the partition type-id for all hard disk partitions\n partid = {}\n for pline in self.fdiskl():\n partid[pline[0]] = pline[4]\n ups = {}\n for s in self.xlist(\"get-blkinfo\")[1]:\n mo = re.match(r'(/dev/[^:]*):(?: LABEL=\"([^\"]*)\")?(?:'\n ' UUID=\"([^\"]*)\")?(?: TYPE=\"([^\"]*)\")?', s)\n if mo:\n dev, label, uuid, fstype = mo.groups()\n if fstype in (None, \"linux_raid_member\", \"LVM2_member\"):\n continue\n if dev.startswith(\"/dev/loop\"):\n continue\n rem = None\n if dev.startswith(\"/dev/sd\"):\n if partid.get(dev) == \"fd\":\n # This test seems to be necessary because blkid\n # sometimes returns an fs-type, rather than\n # linux_raid_member\", for the the first device\n # in a formatted raid array\n continue\n rem = self.xlist(\"removable\", dev)[1][0].strip() == \"1\"\n ups[dev] = (fstype, label, uuid, rem)\n return ups",
"def SetNbParts(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitSurfaceArea_SetNbParts(self, *args)",
"def _parts(self):\n return [part for part in Package.__walkparts(self.__relationships)]",
"def part_lister(mpupload, part_number_marker=None):\r\n more_results = True\r\n part = None\r\n while more_results:\r\n parts = mpupload.get_all_parts(None, part_number_marker)\r\n for part in parts:\r\n yield part\r\n part_number_marker = mpupload.next_part_number_marker\r\n more_results= mpupload.is_truncated",
"def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def __init__(self, parts):\n self.parts = parts\n self.length = sum([p.length for p in parts])",
"def get_parts_count(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetPartsCount', self.handle)",
"def assemble_parts(self):\n self.parts['whole'] = self.output\n self.parts['encoding'] = self.document.settings.output_encoding\n self.parts['version'] = docutils.__version__",
"def __init__(self):\n # Load in light information.\n self.lights_dictionary = utils.load_dictionary(self.LIGHTS_JSON)\n # Load in nice name information.\n self.nice_name_dictionary = utils.load_dictionary(self.NICE_JSON)\n # Set an initial order flag.\n self.part_order = 0\n # Construct part information.\n self.part_cache = {}\n self.part_reference = {}\n\n # Create default part pack.\n self.available_packs = [(\"Parts\", self.MODEL_PATH)]\n\n # Find any mods with model packs inside.\n if os.path.exists(self.MODS_PATH):\n mod_folders = os.listdir(self.MODS_PATH)\n for mod_folder in mod_folders:\n full_mod_path = os.path.join(self.MODS_PATH, mod_folder)\n if \"models\" in os.listdir(full_mod_path):\n full_model_path = os.path.join(\n self.MODS_PATH,\n mod_folder,\n \"models\"\n )\n self.available_packs.append((mod_folder, full_model_path))\n\n # Find Parts and build a reference dictionary.\n for (pack_name, pack_folder) in self.available_packs:\n for category in self.get_categories(pack=pack_name):\n parts = self.get_objs_from_category(category, pack=pack_name)\n for part in parts:\n # Get Unique ID.\n unique_id = os.path.splitext(part)[0]\n # Construct full path.\n search_path = pack_folder or self.MODEL_PATH\n part_path = os.path.join(search_path, category, part)\n # Place part information into reference.\n self.part_reference[unique_id] = {\n \"category\": category,\n \"full_path\": part_path,\n \"pack\": pack_name\n }",
"def build_parts_from_dict(self, data, skip_power_controls=False):\n \n # Validate Objects information.\n if \"Objects\" not in data:\n return\n\n # Start creating parts.\n parts = []\n for part_data in data[\"Objects\"]:\n part = part_data[\"ObjectID\"].replace(\"^\", \"\")\n timestamp = part_data[\"Timestamp\"]\n user_data = part_data[\"UserData\"]\n part_position = part_data[\"Position\"]\n up_vec = part_data[\"Up\"]\n at_vec = part_data[\"At\"]\n # Build the item.\n item = self.build_item(\n part,\n timestamp,\n user_data,\n part_position,\n up_vec,\n at_vec,\n skip_power_controls\n )\n parts.append(item)\n\n return parts",
"def unpack(self):\n elements = []\n self.InitPathTraversal()\n parts = self.GetParts()\n parts.InitTraversal()\n for i in range(parts.GetNumberOfItems()):\n ele = parts.GetItemAsObject(i)\n elements.append(ele)\n\n # gr.InitPathTraversal()\n # for _ in range(gr.GetNumberOfPaths()):\n # path = gr.GetNextPath()\n # print([path])\n # path.InitTraversal()\n # for i in range(path.GetNumberOfItems()):\n # a = path.GetItemAsObject(i).GetViewProp()\n # print([a])\n\n return elements",
"def _config_sections(self):\n data = []\n section_data = []\n for index, line in enumerate(self.running_config):\n if self._nextline_startswith_space(index):\n section_data.append(line)\n else:\n if len(section_data) > 0:\n section_data.append(line)\n data.append(section_data)\n section_data = []\n return data",
"def deserialize_pieces(self):\n # pieces_dict = {}\n self.gen = PieceGenerator(self)\n for piece_string in self.pieces_string.split(','):\n piece_dict = dict(zip(self.piece_cols, piece_string.split('/')))\n piece_dict[\"move_pattern\"] = [int(num) for num in piece_dict[\"move_pattern\"]]\n piece_dict[\"jump_pattern\"] = [int(num) for num in piece_dict[\"jump_pattern\"]]\n piece_dict[\"lose_on_capture\"] = piece_dict[\"lose_on_capture\"].isupper()\n self.gen.add_piece(piece_dict)\n # pieces_dict[piece_string[0]] = PieceGenerator(self, piece_string)\n # self.pieces_dict = pieces_dict",
"def section_list(self):\n return self._config_parser.sections()",
"def num_parts(self):\n return self._num_parts",
"def items(self):\n\t\treturn self.config_parser.items(self.section_name)",
"def setPartsToRegister(self, parts):\n internals.blpapi_ServiceRegistrationOptions_setPartsToRegister(\n self.__handle, parts)",
"def GetNbParts(self):\n return _ShapeUpgrade.ShapeUpgrade_SplitSurfaceArea_GetNbParts(self)"
]
| [
"0.67192507",
"0.5725723",
"0.56452507",
"0.542747",
"0.526864",
"0.52415466",
"0.52362096",
"0.5136002",
"0.50883913",
"0.5082257",
"0.5070895",
"0.504625",
"0.5042836",
"0.50137293",
"0.4963221",
"0.49042815",
"0.49025095",
"0.48889834",
"0.48752812",
"0.48612127",
"0.48544246",
"0.48434",
"0.48423302",
"0.48379815",
"0.48052144",
"0.47926334",
"0.4787339",
"0.47778302",
"0.47771576",
"0.47730884"
]
| 0.7192802 | 0 |
The working directory for the Configuration. Returns str Path to working directory. | def working_dir(cls) -> str:
return cls._working_dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def working_directory(self):\n return self._working_directory",
"def working_dir(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"working_dir\")",
"def get_working_dir(self):\r\n return self.process.get_working_dir()",
"def workDir(self):\n self.debug.printHeader()\n #if hasattr(self.settings, \"workDir\"): toret=self.settings.workDir # 025 todo 143\n if self.settings.config.has_section(\"files\") and self.settings.config.has_option(\"files\",\"workDir\"):\n # toret=self.settings.get(\"files\",\"workDir\") 025\n toret=self.settings.workDir\n else: toret=os.environ['HOME']+'/xxz'\n # Also could write workdir back to settings.\n return toret",
"def get_working_dir():\n working_dir = os.path.dirname(os.path.abspath(__file__))\n return working_dir",
"def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))",
"def workdir(self) -> str:\n return self._workdir",
"def get_working_dir(self):\n return self.role.directory",
"def work_dir(self):\n return self._work_dir",
"def get_working_directory():\n return os.getcwd()",
"def configPath(self):\n return os.path.dirname(__file__)",
"def workdir(self):\n\n return self._workdir",
"def config_dir(self) -> Path:\n return self._config_dir",
"def get_workdir() -> str:\n Config.__get()\n assert Config.__config is not None\n return get_abspath(Config.__config.get('wsgi', 'workdir').strip())",
"def confDir(self):\r\n return self._confDir",
"def cwd(self):\n if self.dryrun:\n return self._cwd\n else:\n return os.getcwd()",
"def conf_dir(self):\r\n return self._conf_dir",
"def workingCopyPath(self):\n \n return self._workingCopyPath",
"def config_directory(self):\n\n return self.get_raw(\"config_directory\")",
"def GetCurrentWorkingDirectory(self):\n return _gmat_py.FileManager_GetCurrentWorkingDirectory(self)",
"def module_path(self):\n return self.config['cwd']",
"def curdir(self):\n return self.var('getcwd()')",
"def set_working_dir():\n cwd = os.getcwd()\n nwd = cwd[:cwd.find(config.project)+len(config.project)]\n os.chdir(nwd)\n return nwd",
"def config_dir(template_file_path=None):\n if template_file_path:\n return os.path.dirname(template_file_path)\n\n return os.getcwd()",
"def config_dir(self) -> str:\n if not self._config_dir:\n self._config_dir = self._detect_config_dir()\n return self._config_dir",
"def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)",
"def GetCurrentDir(self) -> str:\n ...",
"def get_remote_working_dir(self):\n return self.remote_working_dir",
"def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))",
"def get_cwd():\n return os.getcwd()"
]
| [
"0.8272883",
"0.80822647",
"0.80707604",
"0.7958291",
"0.7886885",
"0.7864339",
"0.786264",
"0.78127676",
"0.7802864",
"0.77705896",
"0.77094704",
"0.7702679",
"0.7647526",
"0.75679433",
"0.75248295",
"0.74807364",
"0.7449656",
"0.74102527",
"0.739319",
"0.7369031",
"0.73565394",
"0.72994184",
"0.7293777",
"0.72689456",
"0.7263761",
"0.7242607",
"0.7155137",
"0.7136609",
"0.71185935",
"0.704797"
]
| 0.84283805 | 0 |
The default parameter type that will be assigned to all components and assemblies attribute params. Returns str String path to the param type. | def default_param_type(cls) -> str:
return cls._default_param_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_default_parameters(self, default_type):\n return self._default_parameters.get(default_type, {})",
"def get_param(params, key, defaults_type):\n if key in params:\n return params[key]\n else:\n if defaults_type == 'interpolation_slicing':\n return compas_slicer.parameters.interpolation_slicing_default_param(key)\n elif defaults_type == 'gcode':\n return compas_slicer.parameters.gcode_default_param(key)\n elif defaults_type == 'layers':\n return compas_slicer.parameters.layers_default_param(key)\n elif defaults_type == 'print_organization':\n return compas_slicer.parameters.gcode_default_param(key)\n else:\n raise ValueError('The specified parameter type : ' + str(defaults_type) + ' does not exist.')",
"def default_current_type(context):\n return context.current_parameters.get('type')",
"def type_name(self):\n return \"%s %s\" % (self.param_type, self.name)",
"def _formal_params(self, doclet):\n def format_default_according_to_type_hints(value, declared_types):\n \"\"\"Return the default value for a param, formatted as a string\n ready to be used in a formal parameter list.\n\n JSDoc is a mess at extracting default values. It can unambiguously\n extract only a few simple types from the function signature, and\n ambiguity is even more rife when extracting from doclets. So we use\n any declared types to resolve the ambiguity.\n\n :arg value: The extracted value, which may be of the right or wrong type\n :arg declared_types: A list of types declared in the doclet for\n this param. For example ``{string|number}`` would yield ['string',\n 'number'].\n\n \"\"\"\n def first(list, default):\n try:\n return list[0]\n except IndexError:\n return default\n\n declared_type_implies_string = first(declared_types, '') == 'string'\n\n # If the first item of the type disjunction is \"string\", we treat\n # the default value as a string. Otherwise, we don't. So, if you\n # want your ambiguously documented default like ``@param\n # {string|Array} [foo=[]]`` to be treated as a string, make sure\n # \"string\" comes first.\n if isinstance(value, string_types): # JSDoc threw it to us as a string in the JSON.\n if declared_types and not declared_type_implies_string:\n # It's a spurious string, like ``() => 5`` or a variable name.\n # Let it through verbatim.\n return value\n else:\n # It's a real string.\n return dumps(value) # Escape any contained quotes.\n else: # It came in as a non-string.\n if declared_type_implies_string:\n # It came in as an int, null, or bool, and we have to\n # convert it back to a string.\n return '\"%s\"' % (dumps(value),)\n else:\n # It's fine as the type it is.\n return dumps(value)\n\n if self._explicit_formal_params:\n return self._explicit_formal_params\n\n # Harvest params from the @param tag unless they collide with an\n # explicit formal param. Even look at params that are really\n # documenting subproperties of formal params. Also handle param default\n # values.\n params = []\n used_names = []\n MARKER = object()\n\n for name, default, type in [(param['name'].split('.')[0],\n param.get('defaultvalue', MARKER),\n param.get('type', {'names': []}))\n for param in doclet.get('params', [])]:\n if name not in used_names:\n params.append(rst.escape(name) if default is MARKER else\n '%s=%s' % (rst.escape(name),\n rst.escape(format_default_according_to_type_hints(default, type['names']))))\n used_names.append(name)\n\n # Use params from JS code if there are no documented params:\n if not params:\n params = [rst.escape(p) for p in doclet['meta']['code'].get('paramnames', [])]\n\n return '(%s)' % ', '.join(params)",
"def param_kind(self) -> Optional['outputs.ParamKind']:\n return pulumi.get(self, \"param_kind\")",
"def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default",
"def param_kind(self) -> Optional['outputs.ParamKindPatch']:\n return pulumi.get(self, \"param_kind\")",
"def typeString(self):\n return Parameter.string_dict[self._field.type]",
"def get_default_params_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / RUN_PARAMS_FILENAME",
"def FormatParamType(self, param):\n return self.ToPpapiType(param.type_, optional=param.optional)",
"def get_parameter_type(self, name):\n raise NotImplementedError()",
"def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value",
"def getTypeCode(self):\n return _libsbml.Parameter_getTypeCode(self)",
"def _detect_source_param_class(self, param):\n if self.config[\"source\"][\"parameter\"][\"name\"] == param:\n return self.config[\"source\"][\"parameter\"][\"class\"]\n else:\n return None",
"def get_parameter_type(self, name):\n phil_scope = phil.parse(\n \"\"\"include scope dials.command_line.scale.phil_scope\"\"\",\n process_includes=True,\n )\n obj = phil.find_scope(phil_scope, name)\n if not obj:\n raise ValueError(\n \"\"\"Unable to resolve %s in the phil scope, make sure full phil path\nis provided. For example, physical.decay_correction rather than decay_correction\"\"\"\n % name\n )\n return obj.type.phil_type # a str: \"int\", \"bool\" etc",
"def _encode_runtime_parameter(param: data_types.RuntimeParameter) -> str:\n if param.ptype is int:\n type_enum = pipeline_pb2.RuntimeParameter.INT\n elif param.ptype is float:\n type_enum = pipeline_pb2.RuntimeParameter.DOUBLE\n else:\n type_enum = pipeline_pb2.RuntimeParameter.STRING\n type_str = pipeline_pb2.RuntimeParameter.Type.Name(type_enum)\n return f'{param.name}={type_str}:{str(dsl.PipelineParam(name=param.name))}'",
"def fix_default_param(defparam, classname):\n if (classname + '::') == defparam[0:len(classname)+2:]:\n return defparam[len(classname)+2::]\n #if defparam[len(defparam)-1] == \"f\":\n # return defparam[0:len(defparam)-1]\n return defparam",
"def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__",
"def simple_param(self, in_, name, python_type, optional=False, **kwargs):\n required = not optional\n params = (get_type_base(python_type) or {}).copy()\n params.update(\n name=name,\n in_=in_,\n required=required,\n )\n params.update(kwargs)\n return self.parameter(core.Parameter(**params))",
"def parameter_type_dict():\n return {'filter' : filters.filter_parameters,\n 'global_options' : global_options.global_options_parameters,\n 'input_device' : input_devices.input_device_parameters,\n 'input_stream' : input_streams.input_stream_parameters,\n 'output_device' : output_devices.output_device_parameters,\n 'output_stream' : output_streams.output_stream_parameters}",
"def get_type(self, instance):\n if instance.type == BaseParameter.BOOLEAN_TYPE:\n return \"Boolean\"\n elif instance.type == BaseParameter.STATIC_TYPE:\n return \"Static\"\n elif instance.type == BaseParameter.STRING_TYPE:\n return \"String\"\n elif instance.type == BaseParameter.CHOICE_TYPE:\n return \"Choice\"\n elif instance.type == BaseParameter.TEXT_TYPE:\n return \"Text\"\n elif instance.type == BaseParameter.INTEGER_TYPE:\n return \"Integer\"\n else:\n return \"Float\"",
"def get_param_type(value):\n dtype = type(value).__name__\n if dtype not in ['str', 'float', 'int', 'bool', 'list']:\n dtype = 'str'\n if dtype == 'list':\n try:\n eltype = type(value[0]).__name__\n except IndexError:\n eltype = 'str'\n if eltype not in ['str', 'float', 'int', 'bool']:\n eltype = 'str'\n dtype = eltype + dtype\n return dtype",
"def set_default_param(self, param_name, param, set_val = None, type_of_param = None):\n if (set_val is None):\n set_val = False\n param_index = self.index(param_name);\n\n if (param is None):\n return self.param_default[param_index]\n\n if (type_of_param is None):\n type_of_param = self.type_param[param_index]\n if (type(param) is type_of_param):\n param = [param]\n elif (type(param) is not list):\n error_str = (\n \"\"\"\n Trying to set parameter to non-integer value (or string if instance name, float if eps+rayeps value), and not a list.\n Type of param: %s\n \"\"\" % str(type(param))\n )\n raise TypeError(error_str)\n self.param_default[param_index] = copy.deepcopy(param)\n \n if (set_val):\n self._param_val[param_index] = copy.deepcopy(param)\n return param",
"def getStrParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n return value",
"def DefaultPath(self) -> str:\n return self.m_def_path",
"def get_parameter_type(self, topic):\n \n for attr in self.parm_list:\n if attr.topic == topic:\n return type(attr.value)\n\n self.logging.error(\"Can't find topic: \"+topic)\n return type(None)",
"def param_ref(self) -> Optional['outputs.ParamRef']:\n return pulumi.get(self, \"param_ref\")",
"def override(self, default: Optional[str] = None) -> Optional[str]:\n return self.type_override if self.type_override else default",
"def getParamsType(self):\n\t\treturn [\"int\", \"int\"]"
]
| [
"0.66451085",
"0.6565646",
"0.64847505",
"0.64698064",
"0.6365341",
"0.6052358",
"0.59003174",
"0.58954114",
"0.58831567",
"0.58472455",
"0.575767",
"0.5744395",
"0.5729673",
"0.56840295",
"0.5677127",
"0.5653063",
"0.5642644",
"0.5619081",
"0.56077486",
"0.56068385",
"0.55746174",
"0.5573505",
"0.5546692",
"0.5546395",
"0.5509859",
"0.54957646",
"0.54904276",
"0.5467437",
"0.5456915",
"0.54421645"
]
| 0.8177016 | 0 |
Sets the temporary directory and also changes the log handler to write in this directory. | def temp_dir(cls, value: Union[str, Path]):
start_message = (
f"Configuration Details\n\n{pprint.pformat(cls.to_dict(), indent=4)}"
)
cls._temp_dir = value
change_handler(f"{value}/run.log")
run_log.info(start_message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setTmpDir(self):\n\t\tif os.name != 'nt':\n\t\t\t# On unix use /tmp by default\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"/tmp\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\telse:\n\t\t\t# On Windows use the current directory\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\t\tself.tmpDir = os.environ.get(\"TEMP\", self.tmpDir)\n\t\tif not os.path.isdir(self.tmpDir):\n\t\t\tself.tmpDir = \"\"\n\t\telif not os.access(self.tmpDir, os.F_OK + os.W_OK):\n\t\t\tself.tmpDir = \"\"",
"def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]",
"def _use_temp_directory(self):\n if not self._is_temp_dir:\n self._orig_base_data_dir = self._base_data_dir\n self._orig_base_logs_dir = self._base_logs_dir\n temp_dir = Path(tempfile.mkdtemp())\n self._base_data_dir = temp_dir / \"data\"\n self._base_logs_dir = temp_dir / \"logs\"\n self.db.change_path(\":memory:\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = True\n return temp_dir",
"def setUp(self):\n self.path = tempfile.mkdtemp()\n self.log = log.Log(self.path)",
"def makeTempDir(self):\n try:\n os.mkdir(self.temp_dir)\n except FileExistsError:\n pass",
"def setUp(self):\n self.tmp = TemporaryDirectory()",
"def chdir_tmp(self):\n dirname = make_tempdir()\n os.chdir(dirname)\n\n return dirname",
"def chdir_tmp(self):\n dirname = make_tempdir()\n os.chdir(dirname)\n\n return dirname",
"def _temp_dir(self):\n tmp_dir = os.path.join(self.output_dir, self.config.find_tune[\"run_dir\"])\n try:\n os.makedirs(tmp_dir)\n except OSError:\n pass\n os.chdir(tmp_dir)\n self.tmp_dir = \"./\"",
"def __enter__(self):\n self.temporary_directory = tempfile.mkdtemp(**self.options)\n logger.debug(\"Created temporary directory: %s\", self.temporary_directory)\n return self.temporary_directory",
"def setUp(self):\n super(TestCaseWithTempDir, self).setUp()\n self.tempdir = tempfile.mkdtemp()\n self.addCleanup(shutil.rmtree, self.tempdir)",
"def setUp(self):\n chdir(mkdtemp())",
"def setUp(self):\n chdir(mkdtemp())",
"def _TempDirSetup(self, prefix='tmp', update_env=True, base_dir=None):\n # Stash the old tempdir that was used so we can\n # switch it back on the way out.\n self.tempdir = tempfile.mkdtemp(prefix=prefix, dir=base_dir)\n os.chmod(self.tempdir, 0o700)\n\n if update_env:\n with tempfile._once_lock:\n self._tempdir_value = tempfile._get_default_tempdir()\n self._tempdir_env = tuple((x, os.environ.get(x))\n for x in _TEMPDIR_ENV_VARS)\n # Now update TMPDIR/TEMP/TMP, and poke the python\n # internal to ensure all subprocess/raw tempfile\n # access goes into this location.\n os.environ.update((x, self.tempdir) for x in _TEMPDIR_ENV_VARS)\n # Finally, adjust python's cached value (we know it's cached by here\n # since we invoked _get_default_tempdir from above). Note this\n # is necessary since we want *all* output from that point\n # forward to go to this location.\n tempfile.tempdir = self.tempdir",
"def setUpClass(cls) -> None:\n cls.tempdir = tempfile.TemporaryDirectory()\n cls.path = cls.tempdir.name",
"def secure_temp_dir(context):\n tmpd = tempfile.TemporaryDirectory()\n context.tempdir = tmpd",
"def temp_dir():\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n return _temp_dir",
"def setUp(self):\n self.tmpdir = mkdtemp()",
"def setPath(logPath):\n GlobalLogger.logger.close()\n GlobalLogger.logger = FileLogger(logPath)",
"async def set_log_dir(self, ctx, log_dir):\n self.log_dir = log_dir\n await ctx.send(f\"Successfully set log directory to {log_dir}\")",
"def _make_tempdir(self):\n self._clean_tempdir()\n os.mkdir(self._get_tempdir())\n assert os.path.exists(self._get_tempdir())\n rospy.on_shutdown(self._clean_tempdir)\n rospy.on_shutdown(self._clear_cache)",
"def _initialize_log_file(config):\n for settings in config[\"handlers\"].values():\n if _is_file_handler(settings):\n log_path = Path(settings[\"filename\"])\n log_path.parent.mkdir(parents=True, exist_ok=True)\n log_path.touch(exist_ok=True)",
"def test_temp_dir(self):\r\n temp_dir = get_qiime_temp_dir()\r\n\r\n self.assertTrue(exists(temp_dir),\r\n \"temp_dir does not exist: %s\" % temp_dir)\r\n self.assertTrue(isdir(temp_dir),\r\n \"temp_dir is not a directory: %s\" % temp_dir)\r\n self.assertTrue(access(temp_dir, W_OK),\r\n \"temp_dir is not writable: %s\" % temp_dir)",
"def setUp(self):\n tempDir.safe_mkdir(parents=True)\n os.chdir(tempDir.as_posix())",
"def make_tempdir():\n global _TEMPDIR\n if not _TEMPDIR:\n _TEMPDIR = tempfile.mkdtemp()\n return _TEMPDIR",
"def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)",
"def setUp(self):\n self._dir = tempfile.mkdtemp(prefix=f\"miniwdl_test_{self.id()}_\")",
"def set_log_dir(dir):\r\n LogOptions._LOG_DIR = dir",
"def get_new_temp_dir(self):\n return self.useFixture(fixtures.TempDir())",
"def _setup_file_logger(self):\n if self._file_log_handler is not None:\n raise RuntimeError(\"{}: File logger already exists\".format(self))\n\n # Note that in unit test driver's runpath might not be set\n if self.cfg.file_logger and self.runpath is not None:\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s %(message)s\"\n )\n self._file_log_handler = logging.FileHandler(\n os.path.join(self.runpath, self.cfg.file_logger)\n )\n self._file_log_handler.setFormatter(formatter)\n self.logger.addHandler(self._file_log_handler)\n self.logger.propagate = False # No console logs"
]
| [
"0.7089073",
"0.69517523",
"0.68200475",
"0.6736389",
"0.66263765",
"0.65425605",
"0.6539801",
"0.6539801",
"0.6538104",
"0.643624",
"0.6405769",
"0.64057046",
"0.64057046",
"0.6402267",
"0.63498914",
"0.63418335",
"0.63274956",
"0.628209",
"0.6277255",
"0.62277234",
"0.616906",
"0.6096666",
"0.60893023",
"0.6081605",
"0.60674155",
"0.60479826",
"0.6024754",
"0.6011471",
"0.6000662",
"0.59824085"
]
| 0.6963477 | 1 |
The plot directory for outputs. Returns str Path to the plot directory. | def plot_dir(cls) -> Union[str, Path]:
if cls._plot_dir is None:
msg = "plot_dir not supplied, defaulting to working_dir"
run_log.warning(msg)
return cls.working_dir
else:
return cls._plot_dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)",
"def output_dir(self):\n return os.path.join(self._sandbox, 'output' + os.path.sep)",
"def create_plots() -> str:\r\n return _find_or_create_dir(PLOTS_FOLDER)",
"def get_output_path():\n return os.getcwd() + \"/output/\"",
"def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")",
"def outputdir():\n return __OUTPUT_DIR__",
"def get_output_dir(self):\n return self.output_dir",
"def create_plot_dir(base_dir: str) -> str:\n time_str = datetime.now().strftime('%Y%b%d-%H%M%S') \n plot_dir = os.path.join(res_dir, 'fig_'+time_str)\n# plot_dir = os.path.join(res_dir, 'plot')\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n return plot_dir",
"def output_dir(self):\n ep, pp = (\n maybe_path(os.getenv(\"BRIGHTWAY2_OUTPUT_DIR\")),\n maybe_path(config.p.get(\"output_dir\")),\n )\n if ep and ep.is_dir():\n return ep\n elif pp and pp.is_dir():\n return pp\n else:\n return self.request_directory(\"output\")",
"def _set_output_dir(self):\n return os.path.join(self.outputDir,\n datetime.datetime.utcnow().strftime(\"%Y%m%d\"))",
"def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path",
"def GetOutputPath(self):\n self.outputDir = raw_input(\"What path should be outputted to?\\n\\r>>> \")\n if self.outputDir is \"\":\n self.outputDir = \"C:\\Users\\Lucas\\Pictures\\GraphOutput\"\n bob = os.path.isabs(self.inputDir)\n if not bob:\n print \"that was not an excepted path name. Try again\"\n self.GetOutputPath()",
"def out_dir(self) -> str:\n return self._out_dir",
"def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)",
"def get_output_directory(self):\n return self.__output_dir",
"def getTradeOutputDir():\n\tglobal config\n\treturn config['directory']['output']",
"def fig_dir(self):\n return self._fig_dir",
"def get_output_dir(self):\n return self.OUTPUT_DIRECTORY",
"def __get_output_dir(self, conf):\n return conf[self.conf_item.get_output_dir()]",
"def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path",
"def getOutputDirectory(self):\n return self._outputDir_",
"def save_plot(self):\r\n\t\t# Generate the plot\r\n\t\tself.generate_plot()\r\n\t\t# Create save directory\r\n\t\tdirectory = self.dir + '/%s/' % str(int(self.universe.init_time))\r\n\t\tif not path_exists(directory):\r\n\t\t\tmakedirs(directory)\r\n\t\t# Save image file\r\n\t\tself.fig.savefig(directory+str(self.universe.time))",
"def outputPath():\n scenePath = bpy.data.filepath\n # If the scene hasn't been saved yet the path is empty.\n # Returning an empty path prompts the user for saving the scene.\n if not scenePath:\n return\n renderPath = os.path.join(os.path.dirname(scenePath), \"{}_thumbs\".format(NAME))\n return renderPath",
"def _dir_out(self):\n ens_label = utils.io.ens_label(self.ens_mem)\n return os.path.join(self.proj.direc_out, self.proj.name,\n self.model.name, self.run.name,\n ens_label, self.name)",
"def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path",
"def output_file_path(self):\n return self.__output_file_path",
"def get_output_folder(self):\n return os.path.join(self.root_output_folder, self.base_fish_folder)",
"def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)",
"def output_dir(self):\n return os.path.join(self.checkpoint_dir, self.model_dir)",
"def simulation_dir(self):\n try:\n return (self.output_directory / self.sim_id).expand()\n except AttributeError:\n return Path()"
]
| [
"0.7019947",
"0.68157727",
"0.6806294",
"0.6789725",
"0.6714439",
"0.6688091",
"0.66791457",
"0.66611266",
"0.66524935",
"0.6600386",
"0.65862453",
"0.6566615",
"0.65542203",
"0.65312684",
"0.6526321",
"0.6523932",
"0.64745474",
"0.64738804",
"0.6455824",
"0.64281875",
"0.6425538",
"0.6360436",
"0.63070804",
"0.6290646",
"0.6237903",
"0.61871624",
"0.6180986",
"0.6158742",
"0.6134939",
"0.61344177"
]
| 0.8171626 | 0 |
defines the config file. The config can be loaded from a supplied dictioanry or from a path. The intention in using a classmethod is that the config can be imported at any stage in a process after initialisation without reloading. | def define_config(
cls, config_dict: dict = {}, config_path: Optional[Union[str, Path]] = None
):
config = {}
if config_path is not None:
with open(Path(config_path), "r") as f:
config = json.load(f)
UpdateDict(config, config_dict)
cls.update_config(**config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, config_file_name=\"config.json\"):\n self.config_file_name = config_file_name\n self._config = self._open_config_file()",
"def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file",
"def __init__(self, config_file='/etc/sfa/ldap_config.py'):\n\n try:\n execfile(config_file, self.__dict__)\n\n self.config_file = config_file\n # path to configuration data\n self.config_path = os.path.dirname(config_file)\n except IOError:\n raise IOError, \"Could not find or load the configuration file: %s\" \\\n % config_file",
"def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)",
"def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)",
"def __init__(self, config_path, normalize=False):\n self.config = {}\n _config_dict = {}\n self._config_path = Utils.expand_path(config_path)\n self.update = None\n self.normalize = normalize",
"def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)",
"def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)",
"def __init__(self, __file):\n\n\t\tself.fileName = __file\n\t\tif (os.path.isfile(self.fileName)):\n\t\t\t# config.ini found, load it\n\t\t\tself.config.read(self.fileName)\n\t\t\tself.default = False\n\t\telse:\n\t\t\t# config.ini not found, generate a default one\n\t\t\tself.generateDefaultConfig()\n\t\t\tself.default = True",
"def __init__(self, name=None):\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config = OrderedDict() # temporary configuration\r\n self._path = Path() # current configuration path\r\n self._default_path = Path() # default configuration path\r\n self._conversion_dict = None\r\n self._auto_cast = None\r\n self._write_flags = None\r\n self._force_load = None\r\n self._load_empty = None\r\n self._ask_path = None\r\n self._search_in_default_config = None\r\n self._init_count = 0\r\n self._policies = defaultdict(bool) # by default every modification is forbidden # WIP\r\n if args or kwargs:\r\n self.init(*args, **kwargs)\r\n logger.debug(\"Config object created.\")",
"def load_config(self):\n pass",
"def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)",
"def __init__(self, config_file: str = \"config.json\"):\n path_to_config = (Path(sys.modules[self.__module__].__file__).parent\n / config_file)\n with open(path_to_config, \"r\") as f:\n self.options = json.load(f)",
"def __init__(self, config_path=None):\n config_path = config_path or CONF.api_paste_config\n if os.path.exists(config_path):\n self.config_path = config_path\n else:\n self.config_path = CONF.find_file(config_path)",
"def __init__(self, config_file, verbose):\r\n self.loadConfig(config_file)\r\n self.verbose = verbose",
"def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)",
"def __init__(self, conf_file_location: str, template_dir: str, target_dir: str, hard_reset: bool):\n self.config: Config = yaml_loader.load(conf_file_location, Config)\n self.massage_config_file()\n self.config_dict: Dict = as_dict(self.config)\n self.template_dir = template_dir\n self.target_dir = target_dir\n self.hard_reset = hard_reset",
"def __init__(self, config_file=None):\n self.file = config_file\n self.parser = SafeConfigParser()\n if isinstance(self.file, (str, list)):\n self.parser.read(self.file)\n else: # assume file object was given instead\n self.parser.read_file(self.file)\n self._flask_cache = None\n self._assets_cache = None\n self._gridrealm_cache = None",
"def __init__(self, filename=None):\n if filename:\n if not os.path.exists(filename):\n raise Exception(\"No configuration found at %s\" % filename)\n super(Configuration, self).__init__(filename)",
"def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)",
"def __init__(self, filename):\n if filename is None:\n self.config = toml.load('configs/default.conf')\n return\n self.config = toml.load(filename)\n self.config['directory'] = {}\n self.config['directory']['root'] = os.path.dirname(\n os.path.realpath(__file__))\n self.config['directory']['datasets'] = os.path.join(\n self.config['directory']['root'], 'datasets')",
"def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]",
"def __init__(self):\n\n self.config = load_config()\n self.set_env_var()",
"def __init__(self, config_file=None):\n\t\tself.options = {}\n\n\t\tif config_file:\n\t\t\tself.set_file(config_file)",
"def __init__(self, load_config):\n super().__init__()\n self._load_config = load_config",
"def __init__(self, config_file = 'config.yaml'):\n\n self.name = ''\n self.img_dir = ''\n self.out_dir = ''\n self.cam_file = ''\n self.options_file = ''\n self.output_xml_file = ''\n\n # If there is an options file, it will overwrite the defaults \n if config_file is not None:\n self.load(config_file)",
"def __init__(self, cfg=None, **kwargs):\n self.__dir = KITConfig.configDir\n self.__cfgFile = \"\"\n\n self.__cfg = {}\n self.__default = KITConfig.defaultConfig\n\n self.__setupLogger()\n\n if cfg is not None:\n self.__cfgFile = cfg\n self.load(cfg)"
]
| [
"0.75234306",
"0.7439641",
"0.7432341",
"0.7370511",
"0.7290338",
"0.7287195",
"0.72335243",
"0.71800995",
"0.7165214",
"0.71425086",
"0.69821703",
"0.69821703",
"0.69770926",
"0.6962824",
"0.69518054",
"0.6917742",
"0.6908645",
"0.68950397",
"0.68228227",
"0.68127984",
"0.680922",
"0.67913055",
"0.67883205",
"0.6769251",
"0.67557055",
"0.6739934",
"0.6730046",
"0.67229205",
"0.6720708",
"0.67110336"
]
| 0.7500606 | 1 |
Updates the config from given key word arguments. As Config utilises classmethods an classmethod is required to update it. | def update_config(cls, **kwargs):
for key, val in kwargs.items():
setattr(cls, key, val) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)",
"def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n dic = self\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)",
"def update(self, **kwargs):\n for k, v in kwargs.items():\n if k not in VALID_CONFIG_KEYS:\n cprint(\"war\", f\"'{k}' is not a valid key, skipping...\")\n continue\n\n if v:\n v = self._validate_option(k, v)\n self.data[k] = v",
"def update(self, arg=None, **kwargs):\n if arg:\n if hasattr(arg, 'keys'):\n for k in arg: self[k] = arg[k]\n else:\n for k, v in arg: self[k] = v\n\n if kwargs:\n for k in kwargs: self[k] = kwargs[k]",
"def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config",
"def update_config(config, args):\n if args.cfg:\n _update_config_from_file(config, args.cfg)\n config.defrost()\n if args.dataset:\n config.DATA.DATASET = args.dataset\n if args.batch_size:\n config.DATA.BATCH_SIZE = args.batch_size\n config.DATA.BATCH_SIZE_EVAL = args.batch_size\n if args.batch_size_eval:\n config.DATA.BATCH_SIZE_EVAL = args.batch_size_eval\n if args.image_size:\n config.DATA.IMAGE_SIZE = args.image_size\n if args.accum_iter:\n config.TRAIN.ACCUM_ITER = args.accum_iter\n if args.data_path:\n config.DATA.DATA_PATH = args.data_path\n if args.output:\n config.SAVE = args.output\n if args.eval:\n config.EVAL = True\n if args.pretrained:\n config.MODEL.PRETRAINED = args.pretrained\n if args.resume:\n config.MODEL.RESUME = args.resume\n if args.last_epoch:\n config.TRAIN.LAST_EPOCH = args.last_epoch\n if args.amp: # only for training\n config.AMP = not config.EVAL\n config.freeze()\n return config",
"def update(instance, args):\n for key in args.keys():\n setattr(instance, key, args[key])\n return instance",
"def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)",
"def config_set(self,args):\n data = args\n try:\n for i in data: \n self.sname = i[0]\n self.kname = i[1]\n self.vname = i[2]\n self.config.set(self.sname,self.kname,self.vname)\n logger.info('Kname: '+self.kname+' was set.')\n return \n except Exception as e:\n logger.error(e)\n return 1",
"def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)",
"def __init__(self, *args, **kwargs):\n wrap = lambda v: Config(v) if type(v) is dict else v\n kvdict = {k: wrap(v) for k, v in dict(*args, **kwargs).items()}\n super(Config, self).__init__(kvdict)\n self.__dict__ = self",
"def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)",
"def FetchAndUpdateConfigFromCloud(self, *args, **kwargs):\r\n\t\tpayload = { \"Arg1\": self.href }\r\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\r\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\r\n\t\treturn self._execute('fetchAndUpdateConfigFromCloud', payload=payload, response_object=None)",
"def __clsfn_args_kwargs(config, key, base_module=None, args=None, kwargs=None):\n logger = logging.getLogger('pytorch_lm.utils.config')\n logger.config('config: {}, key: {}, base_module: {}, args: {}, kwargs: {}'.format(\n config, key, base_module, args, kwargs))\n args = args or []\n kwargs = kwargs or {}\n module_name, _, object_name = config[key].rpartition('.')\n if base_module and not module_name:\n module = importlib.import_module(base_module)\n else:\n module = importlib.import_module(module_name)\n obj = getattr(module, object_name)\n args += config.get('args', [])\n kwargs.update(**config.get('kwargs', {}))\n return obj, args, kwargs",
"def config( **kwargs ):",
"def update(self, config):\n # find keys are in config but not in self.config\n extra_keys = set(config.keys()) - set(self.config.keys())\n if len(extra_keys) > 0:\n raise ValueError(\"keys {} in config are not in Config.config\".format(extra_keys))\n # update self.config by config\n else:\n self.config.update(config)",
"def update(self, *args, **kwargs):\n if args:\n arg_order = [\"id\", \"size\", \"x\", \"y\"]\n for index, arg in enumerate(args):\n setattr(self, arg_order[index], arg)\n elif kwargs:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def update_from_args(self, args):\n args = vars(args)\n for key in args:\n if isinstance(getattr(self, key), tf.Variable):\n getattr(self, key).assign(args[key])\n else:\n setattr(self, key, args[key])\n \n # Set the config on the data class\n self.data = DataConfig(\n self.xml_annotation_path,\n self.csv_annotation_path,\n self.oxford_annotations_path,\n self.oxford_images_path,\n )",
"def update(self, args):\n pass",
"def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']",
"def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']",
"def merge_config(args: Iterable[str], config: Config) -> Config:\n # Apply override args\n for arg in args:\n arg = arg.lstrip(\"-\")\n param, value = arg.split(\"=\")\n param_keys = param.split(\"/\")\n subconfig = config\n for idx, key in enumerate(param_keys):\n try:\n if idx == len(param_keys) - 1:\n # Cast value to type of field value and set attribute\n field_type = type(getattr(subconfig, key))\n try:\n deserialised_value = jsons.loads(value, field_type)\n except jsons.exceptions.DecodeError:\n deserialised_value = field_type(value)\n setattr(subconfig, key, deserialised_value)\n else:\n # Get subconfig from key\n subconfig = getattr(subconfig, key)\n except AttributeError as ex: # Ensure the attribute exists\n raise ValueError(\n f\"Invalid argument {arg}. Could not merge with config object.\"\n ) from ex\n except (ValueError, TypeError) as ex:\n raise ValueError(\n f\"Invalid argument {arg}. Value could not be converted to \\\n type {field_type}\"\n ) from ex\n return config",
"def configure(self, args):\n pass",
"def config(self, **kwargs):\n\n # our options that we deal with\n entry = options[\"entry\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)",
"def _update(self, config_dict, allow_new_keys=True):\r\n if not config_dict:\r\n return\r\n\r\n for k, v in six.iteritems(config_dict):\r\n if k not in self.__dict__:\r\n if allow_new_keys:\r\n self.__setattr__(k, v)\r\n else:\r\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\r\n else:\r\n if isinstance(self.__dict__[k], Config) and isinstance(v, dict):\r\n self.__dict__[k]._update(v, allow_new_keys)\r\n elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):\r\n self.__dict__[k]._update(v.as_dict(), allow_new_keys)\r\n else:\r\n self.__setattr__(k, v)",
"def with_config_options(cls, *args, **kwargs):\n opts = cls.__dict__.get(_config_options)\n if opts is None:\n opts = {}\n setattr(cls, _config_options, opts)\n for k, v in kwargs.items():\n if k not in _legal_options:\n raise ConfigException(\"Unrecognized option: {}\".format(k))\n opts[k] = v",
"def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)",
"def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))",
"def update_config(self, config):\n self.config = {\n \"key\": \"\",\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['github_api_key']"
]
| [
"0.770021",
"0.7601066",
"0.6671581",
"0.6545972",
"0.65303266",
"0.6520369",
"0.6518705",
"0.6492443",
"0.6485912",
"0.6472447",
"0.64480734",
"0.63874626",
"0.63595873",
"0.6353414",
"0.63087",
"0.62969655",
"0.6281655",
"0.6270197",
"0.6191441",
"0.6169528",
"0.6153322",
"0.6153322",
"0.6117084",
"0.6113916",
"0.6095753",
"0.60936135",
"0.60796297",
"0.6069738",
"0.6064236",
"0.6060459"
]
| 0.7695604 | 1 |
Converts the configuration into a dictionary format. A check is used so that properties, instances and variables within the BaseCLass are not output. Returns dict A dictionary containing all the variables specific to the class. | def to_dict(cls) -> dict:
variables = dict()
for key, val in vars(cls).items():
check = [
isinstance(val, property),
isinstance(val, classmethod),
key in vars(BaseClass).keys(),
key == "_login_details",
]
if not any(check):
variables[key] = val
return variables | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config(self) -> Dict[str, Any]:\n return {\n 'num_classes': self.num_classes,\n 'name': self.name,\n 'dtype': self.dtype,\n 'sparse_y_true': self.sparse_y_true,\n 'sparse_y_pred': self.sparse_y_pred,\n 'axis': self.axis,\n }",
"def as_dict(self) -> dict:\n return dict(vars(self))",
"def get_full_configuration(self) -> dict:\n\n return {\n input_instance.key: input_instance.argument_value\n for input_instance in self.all_input_instances\n }",
"def as_dict(self):\n return dict(method=self.method.get_config(),\n network=self.net.get_config(),\n dataset=self.dataset.get_config(),\n optimizer=self.opt.get_config(),\n log_dir=self.log_dir)",
"def as_dict(self):\n return {k: v for k, v in vars(self).items()}",
"def config(self) -> Dict[str, Any]:",
"def as_dict(self) -> Dict[str, Any]:\n\n configDict: Dict[str, Any] = {\n 'entities': self._entities,\n 'actorIdleTimeout': self._actor_idle_timeout,\n 'actorScanInterval': self._actor_scan_interval,\n 'drainOngoingCallTimeout': self._drain_ongoing_call_timeout,\n 'drainRebalancedActors': self._drain_rebalanced_actors,\n }\n\n if self._reentrancy:\n configDict.update({'reentrancy': self._reentrancy.as_dict()})\n\n if self._reminders_storage_partitions:\n configDict.update(\n {'remindersStoragePartitions': self._reminders_storage_partitions})\n\n return configDict",
"def config(self) -> dict:\n if self.__class__.__name__ == 'MyFunctionTransformer':\n pass\n else:\n check_is_fitted(self)\n\n _config = {}\n for attr in self.config_paras:\n _config[attr] = getattr(self, attr)\n\n return {\"params\": self.get_params(),\n \"config\": _config}",
"def _to_dict(self):\n return self.analysis_config",
"def _to_dict(self):\n return self.analysis_config",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'description') and self.description is not None:\n _dict['description'] = self.description\n if hasattr(self, 'address') and self.address is not None:\n _dict['address'] = self.address\n if hasattr(self, 'enabled') and self.enabled is not None:\n _dict['enabled'] = self.enabled\n if hasattr(self, 'health') and self.health is not None:\n _dict['health'] = self.health\n if hasattr(self, 'health_failure_reason') and self.health_failure_reason is not None:\n _dict['health_failure_reason'] = self.health_failure_reason\n return _dict",
"def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}",
"def get_config(self) -> dict:\n out = {}\n for name in self.CONFIG_DEFAULTS:\n out[name] = self.__getattribute__(name)\n return out",
"def as_dict(self) -> dict:\n return self._config",
"def to_dict(self) -> dict:\n return dict(\n class_str=f\"{self.class_object.__module__}.{self.class_object.__name__}\",\n run=self.method_str,\n args=self.arg_to_str_list(),\n kwargs=self.kwarg_to_str_dict(),\n )",
"def _deco_class_settings_dict(self) -> OrderedDict:\n return self._classname2SettingsData_dict[self.deco_class.__name__]",
"def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder",
"def to_dict(self):\n return vars(self)",
"def get_properties(self, config: Config) -> Dict[str, Scalar]:\n _ = (self, config)\n return {}",
"def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties",
"def config_to_dict(training_config: TrainingConfig) -> Dict[str, Any]:\n from neuralteleportation.experiments.teleport_training import __training_configs__\n training_config_cls_label = {v: k for k, v in __training_configs__.items()}[training_config.__class__]\n result = {\"teleport\": training_config_cls_label}\n for field in [f for f in fields(training_config) if f.name not in _SERIALIZATION_EXCLUDED_FIELDS]:\n field_value = getattr(training_config, field.name)\n if callable(field_value):\n field_value = field_value.__name__\n else:\n if type(field_value) is tuple:\n # Tuples cannot be loaded back by the yaml module\n field_value = list(field_value)\n field_value = copy.deepcopy(field_value)\n result[field.name] = field_value\n return result",
"def get_config(self):\n return {}",
"def to_dict(self) -> Dict[str, Any]:\n\n fields: Dict[str, Any] = {}\n if hasattr(self, \"url\"):\n fields[\"url\"] = self.url\n if hasattr(self, \"last_check\"):\n fields[\"last_check\"] = self.last_check\n if hasattr(self, \"param1\"):\n fields[\"param1\"] = self.param1\n return fields",
"def to_dict(self):\n dictionary = {}\n for a in dir(self):\n if a[0] is not '_':\n a_type = str(type(getattr(self, a)))\n if 'util.performance' in a_type:\n dictionary[a] = getattr(self, a).to_dict()\n elif 'int' in a_type or 'float' in a_type:\n dictionary[a] = getattr(self, a)\n return dictionary",
"def get_dic(self):\n dic = {\n 'size': self.size,\n 'bounds': self.bounds,\n 'visible': self.visible,\n 'is_static': self.is_static,\n 'options': self.options,\n 'primitive_type': self.primitive_type,\n 'constrain_ratio': self.constrain_ratio,\n 'constrain_navigation': self.constrain_navigation,\n 'framebuffer': self.framebuffer,\n # 'beforeclear': self.beforeclear,\n 'variables': self.get_variables_list(),\n 'vertex_shader': self.vertex_shader,\n 'fragment_shader': self.fragment_shader,\n }\n return dic",
"def variables_declared (self) :\r\n\t\treturn {}",
"def toDict(self):\n return vars(self)",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'language') and self.language is not None:\n _dict['language'] = self.language\n if hasattr(self, 'notification_language') and self.notification_language is not None:\n _dict['notification_language'] = self.notification_language\n if hasattr(self, 'allowed_ip_addresses') and self.allowed_ip_addresses is not None:\n _dict['allowed_ip_addresses'] = self.allowed_ip_addresses\n if hasattr(self, 'self_manage') and self.self_manage is not None:\n _dict['self_manage'] = self.self_manage\n return _dict",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'description') and self.description is not None:\n _dict['description'] = self.description\n if hasattr(self, 'address') and self.address is not None:\n _dict['address'] = self.address\n if hasattr(self, 'enabled') and self.enabled is not None:\n _dict['enabled'] = self.enabled\n return _dict",
"def configuration(self) -> Dict[str, Any]:\n return {self.__class__.__qualname__: self._param_names}"
]
| [
"0.68040526",
"0.63548714",
"0.63082737",
"0.62583035",
"0.6253618",
"0.62368757",
"0.62050885",
"0.6196911",
"0.6142708",
"0.6142708",
"0.6119696",
"0.6111287",
"0.6092527",
"0.6048673",
"0.60294574",
"0.60033596",
"0.6000471",
"0.59962076",
"0.5983463",
"0.5973468",
"0.59676397",
"0.5962551",
"0.5951763",
"0.59425384",
"0.5938003",
"0.59344983",
"0.5927031",
"0.5922268",
"0.59119505",
"0.58782774"
]
| 0.71711314 | 0 |
Inputs login details. This method stores login details if they are required. | def input_login_details(cls, domain: str = ""):
cls._login_details["username"] = str(input("username: "))
cls._login_details["password"] = str(getpass())
cls._login_details["domain"] = domain | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fake_login(self):\n self.username = self.console.input('Please type your username here:')\n self.password = self.console.input('Please type your password here:')",
"def login(self):\n\n self.__login_if_required()",
"def login():",
"def login():",
"def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res",
"def get_login_info(self):\n username = raw_input(\"Username: \")\n password = getpass.getpass(\"Password:\")\n return (username, password)",
"def _login(self, *args, **kwargs):\n pass",
"def login(self):\n\t\twhile True:\n\t\t\tos.system('clear')\n\t\t\tprint(\"1. Sign in\")\n\t\t\tprint(\"2. Sign up\")\n\t\t\tchoice = input()\n\t\t\tif choice == \"1\":\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself._sign_up()\n\n\t\twhile self._input():\n\t\t\tos.system(\"clear\")\n\t\t\tprint(\"Wrong username or password! Please re-enter.\")",
"def login_menu(self):\n print(\"\\nPlease enter your email and password\")\n email = self.validate_email()\n password = self.validate_password()\n self.authenticate_user(email, password)",
"def perform_login(self, user_name, user_pass):\n if self.api_type == 'real':\n self.input_user_name(user_name)\n self.input_user_pass(user_pass)\n self.click_login_button()",
"def login(self):\n\t\treturn",
"def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)",
"def login(self):",
"def login(self, username: Optional[str], password: Optional[str]) -> None:\n self.username_field.fill(username)\n self.password_field.fill(password)\n\n self.submit_button.click()",
"def login(self):\r\n \r\n # Get the csrf token from the main URL\r\n csrf = self.extract_csrf(API.url_login)\r\n \r\n # Construnct the payload\r\n payload = self.cfg['payload']['login'][0]\r\n payload['csrfmiddlewaretoken'] = csrf\r\n\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/login.json', test=payload)\r\n\r\n # Login request \r\n requests.post(API.url_login, payload, headers={'Referer' : API.url_login})",
"def log_in(self, ctx: Context):\n email = json.loads(ctx.users)['username']\n password = json.loads(ctx.users)['password']\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"email_input\", email\n )\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"password_input\", password\n )\n ClickFunctions.click_element_by_name(ctx, self.locators, \"login_button\")\n ClickFunctions.click_element_by_name(ctx, self.locators, \"back_to_content\")",
"def login(self):\n #raise NotImplementedError(\"This method must be overridden\")",
"def login(self, email, password):\r\n self.provide_info(email, password)\r\n self.submit()",
"def process_login():\n\n\temail2 = request.form.get('email')\n\tpassword2 = request.form.get('password')\n\tnote = \"\"\n\n\tprint \"email and pass\", email2, password2\n\t\n\tif email2:\n\t\tuser = model.get_user(email2, password2)\n\t\t#if user is correctly identified in the system\n\t\tif user == True:\n\t\t\tsession['email'] = email2\n\t\t\tnote = \"Welcome %s\" %(email2)\n\t\telse: #bad password\n\t\t\tnote = \"Please make sure you correctly entered your email and password\"\n\n\treturn render_template(\"login.html\")",
"def post(self):\n args = login_parser.parse_args()\n if request.form:\n username = request.form['username']\n password = request.form['password']\n else:\n username = args['username'] # form['username']\n password = args['password'] # form['password']\n\n return self.try_login(username, password)",
"def step_impl_1(context, username, pwd):\n\n br = context.browser\n br.get(context.server_url + '/accounts/login/')\n\n user = br.find_element_by_id(\"username\")\n pswd = br.find_element_by_id(\"password\")\n\n user.send_keys(username)\n pswd.send_keys(pwd)\n br.find_element_by_id(\"submit\").click()",
"def log_in(self):\n\n # Get login page.\n self.get_endpoint(endpoint=self.config['paths']['login'])\n\n # Post log-in data.\n email_form = self.browser.find_element_by_xpath(\"//input[@id='email']\")\n pw_form = self.browser.find_element_by_xpath(\"//input[@id='password']\")\n email_form.send_keys(self.credentials['email'])\n pw_form.send_keys(self.credentials['password'])\n\n # Initial log-in returns /private endpoint.\n self.browser.find_element_by_xpath(\"//input[@type='submit']\").click()",
"def login(self):\n\n # Fetch and parse hidden inputs from login page\n # Use specific CA bundle to fix SSL verify problems if set as env.\n verify = True\n\n override_ca_bundle = os.getenv('OVERRIDE_CA_BUNDLE')\n if override_ca_bundle:\n verify = override_ca_bundle\n\n req = self.session.get(self.BASE_URL + '/im/login/privat',\n verify=verify)\n\n # Get the login form\n soup = BeautifulSoup(req.content, 'html.parser')\n login_form = soup.select('#pPin_form')\n\n # Post login to current URL\n login_post_url = req.url\n\n # Build POST data with login settings and hidden inputs\n data = self._hidden_inputs_as_dict(login_form)\n data['pPin_inp'] = self.personal_identity_number\n data['pPinKod_inp'] = self.pin_code\n\n # Login request\n req = self.session.post(login_post_url, data=data)\n self.last_req_body = req.content\n\n self._parse_tokens(req.text)\n\n return True",
"def loginFunc(self):\n username = (\n self.lineEdit.text()\n ) # Get the text from the username & password lineedit\n password = self.lineEdit_2.text() #\n # Check if password and username isnt empty, if it is, popup\n if DB.verify_login(username, password) \\\n and not DB.new_customer(username):\n self.customer.budget.set_budget(DB.get_income(self.customer.email),\n DB.get_variable_expenses(self.customer.email),\n DB.get_fixed_expenses(self.customer.email))\n self.customer.budget.set_buffert(DB.get_buffert(username))\n self.displayUi = MenuScreen()\n self.hide()\n self.displayUi.show()\n elif DB.verify_login(username, password) and DB.new_customer(username):\n self.displayUi = FirstLoginScreen()\n self.hide()\n self.displayUi.show()\n else:\n self.popUp.exec_()",
"def login(self):\n request = self.REQUEST\n response = request['RESPONSE']\n\n login = request.get('__ac_name', '')\n password = request.get('__ac_password', '')\n\n pas_instance = self._getPAS()\n\n if pas_instance is not None:\n pas_instance.updateCredentials(request, response, login, password)",
"def login_details(cls) -> dict:\n if None in list(cls._login_details.values()):\n cls.input_login_details()\n return cls._login_details",
"def login(self):\n if self._cookie_cached(self.login_email):\n self.cookie_login(self.login_email)\n else:\n self.new_login(self.login_email, self.login_pass)",
"def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()",
"def login():\r\n if not request.is_json or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n return login_user(request)",
"def login(self):\n input_username = self.username_var.get()\n input_password = self.password_entry.get()\n\n db = self.pager_frame.master_root.db # gets database obj from main root of application\n user_type = ('Staff', 'Student')[int(self.is_student)]\n logging.debug(f'A {user_type.lower()} user attempted to log in with username \"{input_username}\"')\n\n if user_type == 'Staff':\n # noinspection PyTypeChecker\n login_table_obj: data_handling.StaffTable = db.get_table_by_name('StaffTable')\n else: # is Student\n # noinspection PyTypeChecker\n login_table_obj: data_handling.StudentLoginTable = db.get_table_by_name('StudentLoginTable')\n\n if input_username in login_table_obj.row_dict.keys(): # if username is valid, verifies pwd\n login_obj = login_table_obj.row_dict[input_username]\n if password_logic.verify_pwd_str(input_password, login_obj.password_hash):\n logging.info(f'Username \"{input_username}\" '\n f'successfully logged into {user_type.lower()} application')\n\n if user_type == 'Staff':\n self.pager_frame.change_to_page(\n destination_page=ui.staff.StudentOverview,\n staff=login_obj\n )\n\n elif user_type == 'Student':\n # noinspection PyUnresolvedReferences\n user_id = login_obj.student_id\n\n # noinspection PyTypeChecker\n # gets Student obj specified by logged in username\n logged_in_student: data_handling.Student = db.get_table_by_name('StudentTable').row_dict[user_id]\n\n # changes page appropriately, providing StudentAwardDashboard\n # frame with the Student obj information to update text\n self.pager_frame.change_to_page(\n destination_page=ui.student.StudentAwardDashboard,\n student=logged_in_student,\n username=input_username,\n )\n\n return # ends function so error below is not displayed\n\n msg.showerror('Login Failed', 'Username and/or password incorrect.\\n'\n 'Make sure you are on the correct login page')"
]
| [
"0.7298498",
"0.7107218",
"0.69868815",
"0.69868815",
"0.6956484",
"0.6897951",
"0.6897406",
"0.6877067",
"0.68717444",
"0.6866643",
"0.6825106",
"0.6739646",
"0.67386276",
"0.67325664",
"0.6723227",
"0.66795963",
"0.6673001",
"0.66649806",
"0.6650597",
"0.6638834",
"0.66247314",
"0.66064996",
"0.66033345",
"0.6597117",
"0.658241",
"0.64956266",
"0.6493311",
"0.6485407",
"0.64403653",
"0.6416825"
]
| 0.76552 | 0 |
Runs the login details update if any of the values are None. Returns dict The populated login details. | def login_details(cls) -> dict:
if None in list(cls._login_details.values()):
cls.input_login_details()
return cls._login_details | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def input_login_details(cls, domain: str = \"\"):\n cls._login_details[\"username\"] = str(input(\"username: \"))\n cls._login_details[\"password\"] = str(getpass())\n cls._login_details[\"domain\"] = domain",
"def update_user_login_data():\n if not 'user' in session:\n raise InvalidUsage(\"Access denied\", 401)\n\n data = request.json\n if 'email' not in data or not data['email']:\n raise InvalidUsage(\"Email must not be empty\", 422)\n if 'currentPassword' not in data or len(data['currentPassword']) < 6:\n raise InvalidUsage(\"Current password must have more then 5 characters\", 422)\n if 'newPassword' not in data or len(data['newPassword']) < 6:\n raise InvalidUsage(\"New password must have more then 5 characters\", 422)\n if 'newPasswordAgain' not in data or data['newPassword'] != data['newPasswordAgain']:\n raise InvalidUsage(\"New password does not match\", 422)\n\n database = mysql.get_db()\n cursor = database.cursor()\n activeUser = session.get('user')\n\n query = '''SELECT password\n FROM users\n WHERE users.id = %s'''\n\n cursor.execute(query, (activeUser['id']))\n user = cursor.fetchone()\n\n if not bcrypt.check_password_hash(user['password'], data['currentPassword']):\n raise InvalidUsage(\"Wrong current password\", 401)\n\n query = '''SELECT id\n FROM users\n WHERE users.email = %s AND users.id != %s'''\n\n cursor.execute(query, (data['email'], activeUser['id']))\n cursor.fetchone()\n\n if cursor.rowcount != 0:\n raise InvalidUsage(\"User with this email already exists\", 422)\n\n query = '''UPDATE users\n SET email = %s, password = %s\n WHERE id = %s'''\n\n hashed_password = bcrypt.generate_password_hash(data['newPassword'])\n cursor.execute(query, (data['email'], hashed_password, session.get('user')['id']))\n database.commit()\n\n activeUser['email'] = data['email']\n session['user'] = activeUser\n\n return jsonify({'message': 'Successfully updated'}), 200",
"def get_login_info(self):\n username = raw_input(\"Username: \")\n password = getpass.getpass(\"Password:\")\n return (username, password)",
"def __makeLoginDict(self, loginName, password, data=None):\n dict = {\n 'accountName': loginName,\n 'password': password\n }\n if data:\n dict.update(data)\n return dict",
"def login(self):\n request = self.REQUEST\n response = request['RESPONSE']\n\n login = request.get('__ac_name', '')\n password = request.get('__ac_password', '')\n\n pas_instance = self._getPAS()\n\n if pas_instance is not None:\n pas_instance.updateCredentials(request, response, login, password)",
"def define_login_password():\n file_path = 'login.txt'\n logger.debug('Loading login information')\n login_info = {}\n f = open(file_path, 'rb')\n for l in f.readlines():\n if 'login' in l:\n login_info['login'] = l.split('=')[1].strip()\n else:\n login_info['pwd'] = l.split('=')[1].strip()\n return login_info",
"def post_login_processing(params: KeeperParams, resp: proto.LoginResponse):\n params.user = resp.primaryUsername\n params.account_uid_bytes = resp.accountUid\n session_token = CommonHelperMethods.bytes_to_url_safe_str(resp.encryptedSessionToken)\n params.session_token = session_token\n\n login_type_message = LoginV3Flow.get_data_key(params, resp)\n params.password = None\n params.clone_code = utils.base64_url_encode(resp.cloneCode)\n loader.store_config_properties(params)\n\n LoginV3Flow.populateAccountSummary(params)\n\n if resp.sessionTokenType != proto.NO_RESTRICTION:\n # This is not a happy-path login. Let the user know what's wrong.\n if resp.sessionTokenType in (proto.PURCHASE, proto.RESTRICT):\n params.session_token = None\n msg = (\n 'Your Keeper account has expired. Please open the Keeper app to renew or visit the Web '\n 'Vault at https://keepersecurity.com/vault'\n )\n raise Exception(msg)\n elif resp.sessionTokenType == proto.ACCOUNT_RECOVERY:\n print('Your Master Password has expired, you are required to change it before you can login.\\n')\n if LoginV3Flow.change_master_password(params):\n return False\n else:\n params.clear_session()\n raise Exception('Change password failed')\n elif resp.sessionTokenType == proto.SHARE_ACCOUNT:\n logging.info('Account transfer required')\n accepted = api.accept_account_transfer_consent(params)\n if accepted:\n return False\n else:\n params.clear_session()\n raise Exception('Account transfer logout')\n else:\n raise Exception('Please log into the web Vault to update your account settings.')\n\n if params.license and 'account_type' in params.license:\n if params.license['account_type'] == 2:\n try:\n rs = api.communicate_rest(params, None, 'enterprise/get_enterprise_public_key', rs_type=breachwatch_proto.EnterprisePublicKeyResponse)\n if rs.enterpriseECCPublicKey:\n params.enterprise_ec_key = crypto.load_ec_public_key(rs.enterpriseECCPublicKey)\n if rs.enterprisePublicKey:\n params.enterprise_rsa_key = crypto.load_rsa_public_key(rs.enterprisePublicKey)\n except Exception as e:\n logging.debug('Get enterprise public key: %s', e)\n\n if params.license and params.license.get('breach_watch_enabled', False) and not params.license.get('breach_watch_feature_disable', False):\n params.breach_watch = BreachWatch()\n if params.enforcements and 'booleans' in params.enforcements:\n bw_audit = next((x.get('value') for x in params.enforcements['booleans'] if x.get('key') == 'send_breach_watch_events'), None)\n if bw_audit:\n params.breach_watch.send_audit_events = True\n\n logging.info(bcolors.OKGREEN + \"Successfully authenticated with \" + login_type_message + \"\" + bcolors.ENDC)\n return True",
"def get_login_form_data(self, login: str, password: str) -> dict:\n return {'username': login, 'password': password, 'autologin': 1, 'redirect': '', 'login': 'pushed'}",
"def init_login():\n print(\"init_login\")\n # get one user\n users = uis.get_all()\n # print(user)\n for user in users:\n if user:\n mobile = Mobile(user[2])\n mobile.android_id = user[11]\n mobile.mac = user[10]\n mobile.brand = user[9]\n mobile.os = user[8]\n ktt = KTT(mobile)\n ktt.device_code = user[6]\n ktt.get_api_start()\n time.sleep(5)\n ktt.post_login()\n\n # balance (string), coin (int), token (string), device_code(string), uid (int)\n user_info = [(user[4], user[5], ktt.token, ktt.device_code, user[0])]\n # update user info\n print(user_info)\n uis.update(user_info)\n time.sleep(10)",
"def update(login, passwd):\r\n if login is None or passwd is None:\r\n print (\"Give me login and password of Comunio to update the database.\")\r\n exit(1)\r\n\r\n update_all(login, passwd)",
"def testUpdateCredentials(self):\r\n \r\n credentials = dict()\r\n credentials[\"username\"] = \"\"\r\n credentials[\"password\"] = \"\"\r\n self._factory.updateCredentials(credentials)",
"def _login(self, *args, **kwargs):\n pass",
"def login(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Hi!\\nI am your sauna control bot.')\n try:\n __login.login_user(context.args[0])\n update.message.reply_text(__login.get_log_status_text())\n\n except (IndexError, ValueError):\n update.message.reply_text(__login.get_log_status_text())\n return",
"def login():",
"def login():",
"def update(self, response: requests.Response) -> None:\n # logger.info(\"Recieved headers %s\", response.headers)\n # If a pin or autologin is returned, save it.\n # Autologin is provided when authenticating with password\n # Pin should be provided when authenticating with password/autologin\n if \"X-Pin\" in response.headers:\n self.pin = response.headers[\"X-Pin\"]\n if \"X-Autologin\" in response.headers:\n self.autologin = response.headers[\"X-Autologin\"]",
"def get_basic_userinfo(self, loginID, my_id):\n info = {'loginID': '', 'firstName': '', 'lastName': '', 'orderCount': 0, 'books_purchased': 0,\n 'num_comments': 0,\n 'comments': [], 'books_commented': [], 'trusted': 0, 'untrusted': 0, 'personalStatus': ''}\n self.cursor.execute(\"\"\"SELECT DISTINCT C.loginID, firstName, lastName, COUNT(DISTINCT orderNumber),\n COUNT(DISTINCT commentID) FROM customercredentials C, comment CO, orderlog O \n WHERE C.loginID = %s AND O.loginID = %s AND CO.loginID = %s\"\"\", (loginID, loginID, loginID))\n\n result = self.cursor.fetchone()\n info['loginID'] = result[0]\n info['firstName'] = result[1]\n info['lastName'] = result[2]\n info['orderCount'] = result[3]\n info['num_comments'] = result[4]\n\n self.cursor.execute(\"\"\"SELECT SUM(quantity) FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND loginID=%s\"\"\", (loginID,))\n result = self.cursor.fetchone()\n info['books_purchased'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT * FROM comment WHERE loginID = %s ORDER BY commentDate DESC\"\"\", (loginID,))\n result = self.cursor.fetchall()\n for comment in result:\n info['comments'].append(comment)\n\n for comment in info['comments']:\n info['books_commented'].append(self.get_single_book_info(comment[1]))\n self.cursor.execute(\"\"\"SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='TRUSTED'\"\"\",\n (loginID,))\n result = self.cursor.fetchone()\n info['trusted'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='UNTRUSTED'\"\"\",\n (loginID,))\n result = self.cursor.fetchone()\n info['untrusted'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT trustStatus FROM trusts WHERE loginID=%s AND otherLoginID=%s\"\"\",\n (my_id, loginID))\n result = self.cursor.fetchone()\n if result:\n info['personalStatus'] = result[0]\n return info",
"def update_user_details():\n current_user = get_jwt_identity()\n if not current_user:\n print('uri=/login error=\"Missing username parameter\"')\n return jsonify({\"msg\": \"Missing username parameter\"}), 400\n\n if not request.is_json:\n print('uri=/login error=\"Missing JSON in request\"')\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n\n username = request.json.get('username', None)\n first_name = request.json.get('firstName', None)\n last_name = request.json.get('lastName', None)\n dispositions = request.json.get('dispositions', None)\n good_with_animals = request.json.get('goodWithAnimals', None)\n good_with_children = request.json.get('goodWithChildren', None)\n animal_leashed = request.json.get('animalLeashed', None)\n animal_preference = request.json.get('animalPreference', None)\n\n if not username:\n print('uri=/login error=\"Missing username parameter\"')\n return jsonify({\"msg\": \"Missing username parameter\"}), 400\n if not first_name:\n print('uri=/login error=\"Missing first name parameter\"')\n return jsonify({\"msg\": \"Missing first name parameter\"}), 400\n if not last_name:\n print('uri=/login error=\"Missing last name parameter\"')\n return jsonify({\"msg\": \"Missing last name parameter\"}), 400\n\n response = {\n 'update_user_detail_result': False,\n 'update_dispositions': False,\n 'update_preference': False\n }\n\n if UserDetail.get_user_detail(User.get_username_by_id(current_user)):\n result = UserDetail.update_user_detail(\n current_user,\n username=username,\n first_name=first_name,\n last_name=last_name\n )\n if result:\n response['update_user_detail_result'] = result\n\n if animal_preference:\n adopter = Adopter.get_adopter_by_name(username)\n assign_animal_preference_result = adopter.assign_animal_preference_by_name(animal_preference)\n response['animal_preference'] = assign_animal_preference_result\n\n if not dispositions:\n dispositions = []\n if good_with_animals:\n dispositions.append('Good with other animals')\n if good_with_children:\n dispositions.append('Good with children')\n if animal_leashed:\n dispositions.append('Animal must be leashed at all times')\n\n if UserDetail.get_user_detail(username):\n dispo_result = UserDetail.update_user_dispositions(\n username=username,\n dispositions=dispositions\n )\n response['assign_dispositions'] = dispo_result\n else:\n response['assign_dispositions'] = False\n\n if response['update_user_detail_result'] or response['update_dispositions'] or response['update_preference']:\n return jsonify(message=response), 200\n else:\n return jsonify(message=response), 500",
"def create_login_form_data(self, password: Optional[str] = None) -> dict:\n if password is None:\n password = self.user_password\n\n return {'name': self.user_name, 'password': password, 'submit': True}",
"def customer_login_put(user_details):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n UPDATE `user_login` \n SET `old_password`= `password`, \n `password` = SHA('%(password)s'), \n `change_password_date` = NOW() \n WHERE `user_id` = \\\"%(user_id)s\\\"\n \"\"\" % (user_details)\n\n cursor = db.cursor()\n result = {\"success\" : 0, \"message\" : \"Customer Login not updated\"}\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Login updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer Login can not be created. Error \\\"\\'%s\\'\\\" \" % (e) }\n finally:\n cursor.close()\n db.close()\n\n return result",
"def extractCredentials(self, request):\n \n creds = {}\n# import pdb\n# pdb.set_trace()\n if self.jid_auth_header in request.keys():\n dn = request.get(self.jid_auth_header, '')\n if not bool(dn):return creds\n # fetch remote ip\n creds['clientip'] = get_ip(request)\n # Looking into the cookie first...\n if self.cookie_name in request.keys():\n try:\n creds[\"cookie\"] = binascii.a2b_base64(\n request.get(self.cookie_name)\n )\n except binascii.Error:\n # If we have a cookie which is not properly base64 encoded it\n # can not be ours.\n return creds\n else:\n ticket = creds[\"cookie\"] \n ticket_data = self._validateTicket(ticket)\n if ticket_data is not None:\n (digest, userid, tokens, user_data, timestamp) = ticket_data\n creds[\"login\"] = userid\n creds[ 'password' ] = userid\n creds['init_login'] = False\n creds[\"source\"] = \"emc.session\" \n return creds \n \n login_pw = self.extractAuthGWInfo(dn) \n if login_pw is not None:\n id, name, idnumber = login_pw\n creds[ 'login' ] = id\n creds[ 'password' ] = idnumber \n creds[\"cookie\"] = \"\"\n creds['init_login'] = True\n creds[\"url\"] = request['URL']\n creds[\"source\"] = \"emc.session\"\n return creds\n\n else:\n if self.cookie_name in request.keys():\n\n try:\n creds[\"cookie\"] = binascii.a2b_base64(\n request.get(self.cookie_name)\n )\n except binascii.Error:\n # If we have a cookie which is not properly base64 encoded it\n # can not be ours.\n return creds\n else:\n ticket = creds[\"cookie\"] \n ticket_data = self._validateTicket(ticket)\n if ticket_data is not None:\n# (digest, userid, tokens, user_data, timestamp) = ticket_data\n #fire a logout event and call resetCredentials\n logging.info(\"logout\")\n from plone import api\n url = \"%s/index.html\" % api.portal.get().absolute_url()\n if url == request['URL']:\n logout(request)\n self.resetCredentials(request, request['RESPONSE']) \n return creds\n else:\n return creds\n \n else:\n return creds",
"def credentials(bot, update, user_data):\n chat_id = update.message.chat_id\n # If message contains less or more than 2 arguments, send message and stop.\n try:\n Student_ID, password = update.message.text.split()\n except ValueError:\n messageContent = textwrap.dedent(\"\"\"\n Oops, you made a mistake! \n You must send the Student_ID and password in a single line, separated by a space.\n This is what valid login credentials look like:\n `123name4567 password`\n \"\"\")\n bot.send_chat_action(chat_id=chat_id, action='typing')\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent, parse_mode='markdown')\n return\n\n if not check_login(Student_ID, password):\n messageContent = textwrap.dedent(\"\"\"\n Looks like your credentials are incorrect! Give it one more shot.\n This is what valid login credentials look like:\n `123name4567 password`\n \"\"\")\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent, parse_mode='markdown')\n return\n\n # Create an object of Class <Chat> and store Student_ID, password, and Telegram\n # User ID, Add it to the database, commit it to the database.\n\n userChat = Chat(PID=Student_ID, password=password, chatID=chat_id)\n db_session.add(userChat)\n db_session.commit()\n\n\n messageContent = textwrap.dedent(\"\"\"\n Now enter your Date of Birth (DOB) in the following format:\n `DD/MM/YYYY`\n \"\"\")\n update.message.reply_text(messageContent, parse_mode='markdown')\n user_data['Student_ID'] = Student_ID\n return PARENT_LGN",
"def authentUser(login, password, pcID):\n\n serverDatetime = datetime.now()\n\n serverDatetime = str(serverDatetime)\n\n db = DataBaseConnection()\n\n if db:\n query = f\"SELECT * FROM {Account.tablename} WHERE {Account.loginCol} = ?\"\n\n try:\n db.cursor.execute(query, login)\n except Exception as error:\n return {\"flag\": \"queryError\", \"message\": f\"{error} | {query}\"}\n else:\n row = db.cursor.fetchone()\n\n if row:\n accountObj = Account(*row)\n\n if password == accountObj.password:\n if not accountObj.firstPCID:\n status, message = accountObj.modifyColumn(column=accountObj.firstPCIDCol, newValue=pcID)\n\n return {\"flag\": \"activated\", \"message\": f\"activated first succefuul | {status} | {message} | {row}\"} if status else {\"flag\": \"updateError\", \"message\": \"error to update first user infos\"}\n\n elif not accountObj.secondPCID:\n status, message = accountObj.modifyColumn(column=accountObj.secondPCIDCol, newValue=pcID)\n\n return {\"flag\": \"activated\", \"message\": f\"activated second succefuul | {status} | {message} | {row}\"} if status else {\"flag\": \"updateError\", \"message\": \"error to update second user infos\"}\n\n\n else: # all keys are used\n return {\"flag\": \"allKeysUsed\", \"Userinfos\": [accountObj.firstPCID, accountObj.secondPCID]}\n \n else: # invalide password\n return {\"flag\": f\"invalidPassword\", \"message\": \" {query} | {row}\"}\n \n else:\n return {\"flag\": f\"invalidLogin\", \"message\": f\"{query}\"}",
"def login(self):\n\t\treturn",
"def build_passwords(self, project_update, runtime_passwords):\n passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)\n if project_update.credential:\n passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')\n passwords['scm_username'] = project_update.credential.get_input('username', default='')\n passwords['scm_password'] = project_update.credential.get_input('password', default='')\n return passwords",
"def get_login():\n try:\n data = []\n\n details_regex = r\"([a-zA-Z]+\\s[a-zA-Z]+\\s+[0-9]+\\s[0-9]+:[0-9]+)\\s+(.*)\"\n username_regex = r\"^[a-zA-Z0-9]+\\s\"\n\n output = subprocess.check_output(\"last\")\n output = output.decode(\"utf-8\").split(\"\\n\")\n\n for line in output:\n username = re.findall(username_regex, line)\n if username != []:\n username = username[0].strip(\" \")\n if username != \"reboot\":\n details = re.findall(details_regex, line)\n if details != []:\n date = details[0][0]\n status = details[0][1].strip(\"-\")\n status = status.strip(\" \")\n login_row = {\"name\": username, \"date\": date, \"status\": status}\n data.append(login_row)\n return jsonify(data=data), 200\n except Exception as e:\n print(e)\n return \"404\", 404",
"def setCredentials( self, login=MissingValue, password=MissingValue ):\n if login is not MissingValue:\n self._login = login\n if password is not MissingValue:\n self._password = password",
"def users_login(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not LOGIN_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (LOGIN_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (LOGIN_MANDATORY_FIELDS - set(content.keys()))), 400\n try:\n login_dict = self.auth_server.user_login(email=content[\"email\"], plain_password=content[\"password\"])\n except InvalidCredentialsError:\n self.logger.debug(messages.WRONG_CREDENTIALS_MESSAGE)\n return messages.ERROR_JSON % messages.WRONG_CREDENTIALS_MESSAGE, 403\n except UnexistentUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % content[\"email\"])\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % content[\"email\"]), 404\n\n if \"notification_token\" in content:\n self.notification_database.set_notification_token(content[\"email\"], content[\"notification_token\"])\n\n return json.dumps(login_dict), 200",
"def login():\n data = request.get_json()\n email = data.get('email')\n password = data.get('pwrd')\n user = SQLModel.get_by_attrs(('email', 'pwrd'), 'users', 'email', email)\n try:\n user_pw = user[0][1]\n user_nick = user[0][0]\n if password == user_pw:\n stuff = SQLModel.get_by_attrs(('login', 'pwrdHash', 'type', 'name'), 'users', 'login', login)\n return jsonify(stuff)\n else:\n return 'fail'\n except:\n return 'fail'",
"def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res"
]
| [
"0.62196344",
"0.5900246",
"0.58829457",
"0.5867684",
"0.58169377",
"0.56897306",
"0.56831723",
"0.5669447",
"0.56359315",
"0.5604115",
"0.55840516",
"0.5552345",
"0.55519444",
"0.55302685",
"0.55302685",
"0.55230725",
"0.5508681",
"0.5496048",
"0.5488227",
"0.547313",
"0.545915",
"0.5447315",
"0.5426915",
"0.5422156",
"0.5403748",
"0.5401981",
"0.54011023",
"0.5399596",
"0.539836",
"0.5394194"
]
| 0.71085674 | 0 |
The variables in the data. Returns np.ndarray An array of the dataframe index, if exist. | def vars(self) -> np.ndarray:
if isinstance(self.data, pd.DataFrame) is False:
return np.array([])
else:
return np.array(self.data.index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variables(self):\n return self.dataset.data_vars",
"def get_variables(self) -> np.array:\n pass",
"def index(self, variables):\n return [self._variables.index(v) for v in variables]",
"def variables(self):\n return np.array(list(self._match_result_dict.keys()))",
"def get_data_idx(self)->list:\n return self.__data_idx",
"def data_variable_names(self):\n data_names = []\n mesh = self.mesh_names()\n prefix = mesh[0]+'_'\n for vname in self.nc.variables.keys():\n if vname.startswith(prefix):\n if self.nc.dimensions.has_key(vname):\n continue\n if hasattr(self.nc.variables[vname],'cf_role'):\n continue\n data_names.append( vname[len(prefix):] )\n return data_names",
"def index(self):\n return self.data.index.values",
"def FindIdxValues(X):\n data = X.select_dtypes(include=[\"float64\"])\n idx = np.argwhere(~np.isnan(data.values))\n idx[:, 1] += 4 # add ID variable columns\n StoE = pd.read_csv(\"msresist/data/MS/CPTAC/IDtoExperiment.csv\")\n assert all(StoE.iloc[:, 0] == data.columns), \"Sample labels don't match.\"\n StoE = StoE.iloc[:, 1].values\n tmt = [[StoE[idx[ii][1] - 4]] for ii in range(idx.shape[0])]\n return np.append(idx, tmt, axis=1)",
"def _populate_dff_data(self):\n all_data = np.load(self.results_file)[\"F_dff\"]\n if self.with_labeling is None:\n return all_data, np.arange(all_data.shape[0])\n\n indices = np.load(self.colabel_file)\n if self.with_labeling:\n return all_data[indices], indices\n if not self.with_labeling:\n all_indices = np.arange(all_data.shape[0])\n remaining_indices = np.delete(all_indices, indices)\n remaining_traces = all_data[remaining_indices]\n return remaining_traces, remaining_indices",
"def var_data(self, index):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True)\n if index is not None:\n for i in range(self.n_levels()):\n if self.profile_data[i]['variables'][index]['Missing']: continue\n data[i] = self.profile_data[i]['variables'][index]['Value']\n return data",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j",
"def extract_var_data(self, var_names):\n variable_dict = {} # Declaring dictionary used to store key-val pairs, var_name as key and the array as the value\n try:\n for var in var_names:\n variable_dict[var] = self.dataset[var].values\n return variable_dict\n except Exception as e:\n print(\"An Error occured:\", e)\n raise e",
"def get_variables(self) -> np.array:\n return np.array([self.a, self.b, self.c])",
"def dataValidation(self, in_nc):\r\n vars_oi_index = None\r\n data_nc = NET.Dataset(in_nc)\r\n dims = list(data_nc.dimensions)\r\n if dims not in self.dims_oi:\r\n raise Exception(self.errorMessages[1])\r\n\r\n vars = list(data_nc.variables)\r\n\r\n if vars == self.vars_oi[0]:\r\n vars_oi_index = 0\r\n elif vars == self.vars_oi[1]:\r\n vars_oi_index = 1\r\n elif vars == self.vars_oi[2]: # Line Added/Modified CJB 20190108\r\n vars_oi_index = 2 # Line Added/Modified CJB 20190108\r\n else: \r\n raise Exception(self.errorMessages[2])\r\n\r\n return vars_oi_index",
"def index(self):\n return self.dataset.index",
"def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names",
"def getSelectedVariables(self):\r\n\r\n variables = []\r\n\r\n if self.ui.variablesStackedWidget.currentWidget() == self.ui.treePage:\r\n for index in self.ui.treeView.selectionModel().selectedRows():\r\n sourceIndex = self.treeFilterModel.mapToSource(index)\r\n treeItem = sourceIndex.internalPointer()\r\n if treeItem.variable is not None:\r\n variables.append(treeItem.variable)\r\n else:\r\n for index in self.ui.tableView.selectionModel().selectedRows():\r\n sourceIndex = self.tableFilterModel.mapToSource(index)\r\n variable = sourceIndex.internalPointer()\r\n variables.append(variable)\r\n\r\n return variables",
"def time_indices(self, df):\r\n X = np.empty((df.shape[0], 3), dtype=np.float64)\r\n X[:, 0] = df.index.year\r\n X[:, 1] = df.index.month\r\n X[:, 2] = df.index.day\r\n return X",
"def _df_meta_to_arr(df):\n\n if len(df.columns):\n if isinstance(df.columns[0], str):\n columns = df.columns.values.astype(\"S\")\n else:\n columns = df.columns.values\n else:\n columns = []\n\n if len(df.index):\n if isinstance(df.index[0], str):\n index = df.index.values.astype(\"S\")\n else:\n index = df.index.values\n else:\n index = []\n\n return columns, index",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result",
"def get_variables(self) -> np.array:\n return np.array([self.m, self.c])",
"def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]",
"def get_variables(self) -> np.array:\n return np.array([self.mu, self.sig])",
"def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])",
"def data(self):\n return self._data.iloc[:, : self.n_genes]",
"def indices(self):\n return self.index.indices",
"def _var_names_to_indices(self, var_names: List):\n\n if type(var_names) is not type([]):\n # Return empty array if input is not 'list' type\n print(\"[_var_names_to_indices] Provided input is not of type list.\")\n return []\n\n indices_array = []\n names_array = []\n for name in var_names:\n if name not in self.vars_to_idx.keys():\n print(\"[_var_names_to_indices] Invalid variable name '{}' has been skipped.\".format(name))\n continue\n indices_array.append(self.vars_to_idx[name])\n names_array.append(name)\n\n if not len(var_names) > 0:\n print(\"[_var_names_to_indices] No (valid) states have been provided.\")\n\n return indices_array, names_array",
"def extract_letor_arrays(frame):\n # the last two columns are y and ids, so don't include them\n if len(frame.columns) > 2:\n X = frame[frame.columns[:-2]].values\n else:\n X = None\n y = frame['y'].values\n ids = frame['ids'].values\n return X, y, ids"
]
| [
"0.664484",
"0.6407513",
"0.62923473",
"0.6233907",
"0.61113024",
"0.6098684",
"0.6098561",
"0.60291743",
"0.6015471",
"0.5951717",
"0.5893516",
"0.58810705",
"0.5769446",
"0.5763522",
"0.57079065",
"0.56854033",
"0.5684478",
"0.5661651",
"0.56546617",
"0.56515044",
"0.5629818",
"0.5617208",
"0.55953866",
"0.5581567",
"0.55707985",
"0.55688244",
"0.554169",
"0.55334336",
"0.5529142",
"0.55256164"
]
| 0.7774008 | 0 |
The column count in the wrapped dataframe. Returns int Interger if a dataframe exists. Note As the DataFrame is checked and mypy finds it to return Any, the output of the shape is also found to return any. | def col_count(self):
if isinstance(self.data, pd.DataFrame) is False:
return None
else:
return self.data.shape[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def columnCount(self, index=QtCore.QModelIndex()):\n # speed comparison:\n # In [23]: %timeit len(df.columns)\n # 10000000 loops, best of 3: 108 ns per loop\n\n # In [24]: %timeit df.shape[1]\n # 1000000 loops, best of 3: 440 ns per loop\n return len(self._dataFrame.columns)",
"def columns_count(self):\n if self.value.count != 0:\n return len(self.value[0])\n else:\n return 0",
"def _get_lengths(df):\n try:\n return len(df)\n # Because we sometimes have cases where we have summary statistics in our\n # DataFrames\n except TypeError:\n return 0",
"def get_number_of_header_columns_in_df(self, df: pd.DataFrame) -> int:\n\n for i in range(len(df)):\n try:\n _ = df.iloc[i:].apply(pd.to_numeric).reset_index(drop=True)\n return i\n except ValueError:\n pass\n raise DataHeaderError(msg=\"Could find the count of header columns in dataframe\")",
"def generic_record_count(data_df: Optional[DataFrame]) -> int:\n return len(data_df)",
"def GetNumColumns(self):\n return len(self.columns)",
"def size(self):\n\t\t# Better to have this as a method rather than property, as self._dataframe may change\n\t\treturn self._dataframe.shape[0]",
"def _num_of_cols(self):\n if not self.connection:\n self.close()\n noc = C_SHORT()\n ret = ODBC_API.SQLNumResultCols(self.stmt_h, ADDR(noc))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n return noc.value",
"def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count",
"def dim(df: DataFrame) -> DataFrame: \n \n return df.shape",
"def num_cols(self):\n return len(self.column_names())",
"def columnCount( self ):\n if not self.o_data is None:\n if self.isItemMultiDimensional():\n return len(self.o_data)\n else:\n return 1\n else:\n return 1",
"def getNbColumns(self):\n return self.data.shape[0]",
"def column_count(self):\n return self.column_length",
"def getNoOfCols(self):\n return _patchExtractor.patchExtractor_getNoOfCols(self)",
"def number_of_columns(self):\n return len(self._columns)",
"def __len__(self):\n if self.df is None:\n return 0\n elif len(self.df.keys()) == 0:\n return 0\n else:\n return len(self.df[self.df.keys()[0]])",
"def GetNumberCols(self):\n return len(self.__colsKey)",
"def __len__(self):\n return self.df.shape[0]",
"def ncolumns(self):\n return len(self.__column_list)",
"def getColumnCount(self) -> int:\n ...",
"def col_count(self):\n return self.__col_count",
"def num_cols(self):\n return len(self.rows[0])",
"def no_of_columns(self): \n return len(self.columns) + (1 if self.serialize else 0)",
"def columnCount(self, _parent=None):\n return len(self._attr_cols)",
"def get_num_cols(self):\n return self._num_cols",
"def rowCount(self, index=QtCore.QModelIndex()):\n # len(df.index) is faster, so use it:\n # In [12]: %timeit df.shape[0]\n # 1000000 loops, best of 3: 437 ns per loop\n # In [13]: %timeit len(df.index)\n # 10000000 loops, best of 3: 110 ns per loop\n # In [14]: %timeit df.__len__()\n # 1000000 loops, best of 3: 215 ns per loop\n \n n_rows = len(self._dataFrame.index)\n \n if n_rows <= self.rowsLoaded:\n return n_rows\n else:\n return self.rowsLoaded",
"def size(self) -> int:\n num_columns = len(self._internal.data_spark_columns)\n if num_columns == 0:\n return 0\n else:\n return len(self) * num_columns # type: ignore[arg-type]",
"def num_cols(self):\n return (len(self.rows[0]))",
"def cols(self) -> int:\n if self.is_empty():\n raise ValueError(\"Empty DataView contains no columns\")\n\n return len(self[0])"
]
| [
"0.7479284",
"0.7457271",
"0.74545646",
"0.7218371",
"0.72052294",
"0.7198004",
"0.7125306",
"0.71171653",
"0.71032107",
"0.70787036",
"0.7048535",
"0.70310104",
"0.69621015",
"0.6886637",
"0.68838155",
"0.68789786",
"0.68624115",
"0.68600327",
"0.6812109",
"0.6770684",
"0.676097",
"0.6691696",
"0.665738",
"0.6656699",
"0.6648856",
"0.6642156",
"0.6631414",
"0.6609872",
"0.6607531",
"0.65665555"
]
| 0.8040465 | 0 |
Compiles all dataframes for a given storage_str into a mutable top level dataframe. | def compile_all_df(self, assembly: Any, child_str: str):
self.compiled = child_str
storages = np.array(
[
output[child_str]
for key, output in assembly.lookup(child_str).items()
if output[child_str] is not None
and key != assembly.ref
and hasattr(output[child_str].data, "empty")
and not output[child_str].data.empty
]
)
for stores_in_component in storages:
self.assign(stores_in_component.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')",
"def create_df(datadir: str, ext: str='txt') -> pd.DataFrame:\n\n datalist = []\n for name in os.listdir(datadir):\n filename = '/'.join([datadir, name])\n if os.path.isfile(filename) and ext in name[-len(ext):]:\n row_data = []\n content = read_file.read_file(filename)\n row_data.append(read_file.extract_name(content))\n row_data.append(read_file.extract_year(content))\n row_data.append(read_file.extract_form_factor(content))\n row_data.append(read_file.extract_max_power(content))\n row_data.append(read_file.extract_min_power(content))\n row_data.append(read_file.extract_cpu_speed(content))\n row_data.append(read_file.extract_core_num(content))\n for ind in range(10, 100, 10):\n row_data.append(read_file.extract_int_power(content, ind))\n datalist.append(row_data)\n\n return pd.DataFrame(data=datalist, columns=[\n 'Name', 'Year', 'FormFac', 'MaxPower', 'IdlePower', 'CPU speed',\n 'NumCores'\n ]+[''.join([str(ind), '%Power']) for ind in range(10, 100, 10)])",
"def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM",
"def data_frame_creator(self):\n sequence_folder = [\n '/SEQ1', '/SEQ2', '/SEQ3', '/SEQ4', '/SEQ5', '/SEQ6'\n ]\n rgb_folder = ['/RGBLeft/', '/RGBRight/']\n depth_folder = ['/DepthLeft/', '/DepthRight/']\n segmentation_folder = ['/GTLeft/', '/GTright/']\n rgb_dir = [\n self.dataset_dir + sequence_f + rgb_f for rgb_f in rgb_folder\n for sequence_f in sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_dir + sequence_f + depth_f\n for depth_f in depth_folder\n for sequence_f in sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_dir + sequence_f + segmentation_f\n for segmentation_f in segmentation_folder\n for sequence_f in sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1, random_state=123)\n\n return pd.DataFrame(dataset)",
"def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)",
"def parse_dataframes(\n self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts\n ) -> DataFrame:\n raise NotImplementedError()",
"def readFilesIntoDataFrame(nameTemplate, numOfFiles):\n #https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65\n\n list_of_dfs = []\n for i in range(numOfFiles):\n print ('Processing {0} out of {1} files'.format(i, numOfFiles))\n\n fileToProcess = fileLocation + nameTemplate.format(i)\n print 'fileToProcess=', fileToProcess\n \n if 'feather' in nameTemplate:\n read_df = feather.read_feather(fileToProcess)\n elif 'parquet' in nameTemplate:\n read_df = pd.read_parquet(fileToProcess)\n else:\n print 'This should not happen, nameTemplate is wrong, please check it is in parquet or feather format or that the template correctly describes the existing files, exiting...'\n sys.exit(1)\n\n print read_df.info(memory_usage='deep')\n print '-'*50\n print read_df.describe()\n list_of_dfs.append(read_df)\n \n print 'Start concatenating dataframes, it may take some time'\n comb_df = pd.concat(list_of_dfs, ignore_index=True)\n return comb_df",
"def sql(\n self,\n sql: str,\n return_futures: bool = True,\n dataframes: Dict[str, Union[dd.DataFrame, pd.DataFrame]] = None,\n ) -> Union[dd.DataFrame, pd.DataFrame]:\n if dataframes is not None:\n for df_name, df in dataframes.items():\n self.create_table(df_name, df)\n\n rel, select_names, _ = self._get_ral(sql)\n\n dc = RelConverter.convert(rel, context=self)\n\n if dc is None:\n return\n\n if select_names:\n # Rename any columns named EXPR$* to a more human readable name\n cc = dc.column_container\n cc = cc.rename(\n {\n df_col: df_col if not df_col.startswith(\"EXPR$\") else select_name\n for df_col, select_name in zip(cc.columns, select_names)\n }\n )\n dc = DataContainer(dc.df, cc)\n\n df = dc.assign()\n if not return_futures:\n df = df.compute()\n\n return df",
"def get_dataframes(symbols=(\"sne\", \"goog\", \"tsla\"), source='yahoo', refresh=False):\n symbols = util.make_symbols(list(symbols))\n if refresh:\n symbols_to_refresh = symbols\n else:\n symbols_to_refresh = [sym for sym in symbols if not Equity.objects.filter(symbol=sym).exists()]\n source = source.lower().strip()\n if source in ('yahoo', 'google'):\n source += '_finance'\n if source[:3] == 'fed':\n source = 'federal_reserve_economic_data'\n ccpanda = ccp.ConcurrentPandas()\n # set the data source\n getattr(ccpanda, \"set_source_\" + source)()\n if symbols_to_refresh:\n # tell concurrent pandas which keys/symbols to retrieve\n ccpanda.insert_keys(symbols_to_refresh)\n # start concurrentpandas threads\n ccpanda.consume_keys_asynchronous_threads()\n # FIXME: is there a better/faster iterator to use like `ccpanda.output_map` attribute?\n pseudodict = ccpanda.return_map()\n else:\n pseudodict = {}\n table = {}\n for sym in symbols:\n e, created = None, False\n if not sym in symbols_to_refresh:\n e, created = Equity.objects.get_or_create(symbol=sym)\n if created or not e or not e.time_series or sym in symbols_to_refresh:\n e, created = Equity.objects.get_or_create(\n symbol=sym,\n name=sym, # FIXME: use data source to find equity name!\n time_series=pseudodict[sym].to_json(),\n )\n table[sym] = pd.io.json.read_json(path_or_buf=e.time_series, orient='columns', typ='frame', convert_dates=True)\n return table",
"def create_dataframe(self):\n\n df = pd.DataFrame({'date': [],\n 'RUN': [],\n 'CLONE': [],\n 'GEN': pd.Series(0, index=[], dtype='int'),\n 'frame': pd.Series([], index=[], dtype='int'),\n 'time (ns)': [] }) # set the index\n df.set_index('date')\n print(df)\n\n # Save the DataFrame to disk\n\n ### create a file handle to store the data in (a dict-like) HDF5 format\n store = pd.HDFStore(self.dataframe_path)\n print(store)\n store.put('df', df)\n return store",
"def main(string_df, schema, read_sections):\n global sentinals, section_lens, sentinals_lens\n global parsing_order\n # Proceed to split sections if more than one\n # else return section in a named column\n if len(schema['sections'].keys())> 1:\n section_lens = { section: schema['sections'][section]['header'].get('length') for section in schema['sections'].keys()}\n sentinals = { section: schema['sections'][section]['header'].get('sentinal') for section in schema['sections'].keys()}\n sentinals_lens = { section: len(sentinals.get(section)) if sentinals.get(section) else 0 for section in sentinals.keys()}\n parsing_order = schema['header']['parsing_order']\n # Get sections separated: section dict has a key:value pair for each\n # section in the data model. If the section does not exist in the data,\n # the value is an empty pd.Series\n section_dict = extract_sections(string_df)\n # Paste in order (as read_sections) in a single dataframe with columns\n # named as sections:\n # - Drop unwanted sections\n # - Keep requested but non-existent sections\n df_out = pd.DataFrame()\n for section in read_sections:\n df_out = pd.concat([df_out,section_dict[section].rename(section)],sort = False,axis=1)\n else:\n df_out = string_df\n df_out.columns = read_sections\n\n return df_out",
"def parse_dataframes(genome_gtf, sralist):\n\n def gather_strand_by_geneID_dict(genome_gtf):\n \"\"\"\n Returns dictionary with strand orientation as values and geneIDs as Keys/\n e.g.: {'YAL012W': '+',\n 'YAL069W': '+',\n 'YAL068W-A': '+',\n \"\"\"\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict\n\n\n def import_scikit_data(sralist):\n \"\"\"\n Import results from scikit pipeline for all datasets contained in datsets_names.\n \"\"\"\n scikit_data_dict = {}\n for dataset in sralist:\n with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data:\n scikit_data_dict[dataset] = [json.load(scikit_data)]\n return scikit_data_dict\n\n\n def build_mat_scikit_strandOriented(sralist, scikit_data):\n \"\"\"\n Building of scikit_df based on the output of plot_ribo_density_dict.py script.\n\n C/-/reverse/complementary strand are taken into account and the profile values\n (\"codon_density_profile\", \"codon_triplet\", \"codon_AA\") are reversed. This is\n performed by adding [::-1] to C strands profile ends.\n\n Same profile values are also have their extremities trimmed out of 8 codons.\n (This is because the scikit-ribo pipeline considers 8 extra codons on each end,\n but here we are only interested in the coding sequence). This is performed by\n adding [8:-8] to profile lists ends.\n \"\"\"\n\n scikit_mat = {}\n seq_codons = {}\n seq_aa = {}\n\n for geneID in scikit_data[sralist[0]][0].keys():\n for ix, dataset in enumerate(sralist):\n\n if geneID in scikit_data[dataset][0].keys():\n current_profile = scikit_data[dataset][0].get(geneID, np.nan)\n current_ribo = current_profile[0]\n current_ribo = current_ribo[8:-8]\n N = len(sralist)\n M = len(current_ribo)\n print(geneID, M)\n\n if ix == 0:\n current_matrix = np.zeros((N,M)) * np.nan\n\n current_seq_codons = current_profile[1]\n current_seq_codons = current_seq_codons[8:-8]\n\n current_seq_aa = current_profile[2]\n current_seq_aa = current_seq_aa[8:-8]\n\n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n seq_codons[geneID] = current_seq_codons\n seq_aa[geneID] = current_seq_aa\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n seq_codons[geneID] = current_seq_codons[::-1]\n seq_aa[geneID] = current_seq_aa[::-1]\n \n \n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n current_matrix[ix,:] = current_ribo\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n current_matrix[ix,:] = current_ribo[::-1]\n \n if np.sum(current_matrix) > 0: \n scikit_mat[geneID] = current_matrix\n\n# scikit_df = pd.DataFrame(values_list, columns=columns_list)\n\n return scikit_mat, seq_codons, seq_aa\n\n\n def mean_norm(row):\n codon_dens_prof = row.codon_density_profile\n profile_average = np.average(codon_dens_prof)\n\n return [x/profile_average for x in codon_dens_prof]\n \n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df.apply(mean_norm, axis=1)\n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df['mean_norm_codon_density_profile'].apply(lambda x: x[8:-8])\n\n strand_by_geneID_dict = gather_strand_by_geneID_dict(genome_gtf)\n scikit_data_dict = import_scikit_data(sralist)\n scikit_data_mat, seq_codons_dict, seq_aa_dict = build_mat_scikit_strandOriented(sralist, scikit_data_dict)\n\n with open('../data/processed/scikit_mat.pkl', 'wb') as f:\n \tpickle.dump(scikit_data_mat, f)\n\n with open('../data/processed/scikit_codonseq.pkl', 'wb') as f_seq:\n pickle.dump(seq_codons_dict, f_seq)\n \n\n return scikit_data_mat",
"def initialize_output_dfs() -> Tuple[pd.DataFrame]:\n df_city_no_zip = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_zip_no_city = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_zipCity_no_address = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_address_no_zipCity = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_no_address_at_all = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_invalid_matrices = pd.DataFrame(\n columns=[\"memberid\", \"DataMatrix\", \"source\", \"action\"]\n )\n df_employees = pd.DataFrame(\n columns=[\"memberid\", \"MemberName\", \"MemberStatus\", \"source\", \"action\"]\n )\n return (\n df_city_no_zip,\n df_zip_no_city,\n df_zipCity_no_address,\n df_address_no_zipCity,\n df_no_address_at_all,\n df_invalid_matrices,\n df_employees,\n )",
"def create_df(files_list=my_files):\n\n all_records = list()\n\n for file in files_list:\n all_records += zr_parser(path.join(my_dir, file))\n\n return pd.DataFrame(all_records)",
"def build_df(path_orig = r'.\\chest_xray', orig_file_ext = 'jpeg', path_seg = r'.\\segmentation', seg_file_ext = 'png', save_path = '.\\df_all.csv'):\n \n read_df = 'C'\n list_df = [] \n \n if os.path.exists(save_path):\n read_df = input('DataFrame was found, would you like to read it (R) or recreate it (C) (default Read)?\\n') or 'R'\n if read_df == 'R':\n df = pd.read_csv(save_path, index_col = 0)\n return df\n \n if read_df == 'C':\n for dirname, _, filenames in os.walk(path_orig):\n for filename in tqdm(filenames, disable=len(filenames)==0):\n if ('.' + orig_file_ext) in filename:\n list_val = []\n list_val.append('PNEUMONIA' if 'PNEUMONIA' in dirname else 'NORMAL')\n list_val.append(1 if 'PNEUMONIA' in dirname else 0)\n list_val.append('bacteria' if 'bacteria' in filename.lower() else 'virus' if 'virus' in filename.lower() else 'normal')\n list_val.append(1 if 'bacteria' in filename.lower() else 2 if 'virus' in filename.lower() else 0)\n list_val.append(filename)\n list_val.append(os.path.join(dirname, filename)) \n list_val.append(filename.replace(orig_file_ext, seg_file_ext))\n list_val.append(os.path.join(dirname.replace(path_orig, path_seg), filename.replace(orig_file_ext, seg_file_ext)))\n list_df.append(list_val)\n\n df = pd.DataFrame(list_df, columns = ['Label_name', 'Label_int', 'Label_pathology', 'Label_pathology_int', 'Filename_orig', 'Filepath_orig', 'Filename_seg', 'Filepath_seg'])\n df.to_csv(save_path)\n \n print('Done')\n \n return df",
"def compile_dataframe_default(self):\n\t\tdata = [\n\t\t\t['ford','mustang','coupe','A'],\n\t\t\t['chevy','camaro','coupe','B'],\n\t\t\t['ford','fiesta','sedan','C'],\n\t\t\t['ford','focus','sedan','A'],\n\t\t\t['ford','taurus','sedan','B'],\n\t\t\t['toyota','camry','sedan','B']\n\t\t]\n\n\t\tself.data = pd.DataFrame(data, columns = self.data_cols)",
"def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)",
"def sims_to_df(self, focus=['string', 'module'], cutoff=False):\n Vs = []\n Is = []\n temps = []\n irrs = []\n mode = []\n level = []\n\n if 'substring' in focus:\n if len(self.multilevel_ivdata['module'].keys()) > 0:\n for mod_key in self.multilevel_ivdata['module'].keys():\n for substr_id in range(1, 4):\n v_s = self.multilevel_ivdata['module'][\n mod_key][f'substr{substr_id}']['V']\n i_s = self.multilevel_ivdata['module'][\n mod_key][f'substr{substr_id}']['I']\n e_s = self.multilevel_ivdata['module'][\n mod_key][f'substr{substr_id}']['E']\n t_s = self.multilevel_ivdata['module'][\n mod_key][f'substr{substr_id}']['T']\n\n Vs += v_s\n Is += i_s\n irrs += e_s\n temps += t_s\n level += ['substring'] * len(v_s)\n mode += [mod_key] * len(v_s)\n\n if 'module' in focus:\n if len(self.multilevel_ivdata['module'].keys()) > 0:\n # Module definitions\n for mod_key in self.multilevel_ivdata['module'].keys():\n v_s = self.multilevel_ivdata['module'][mod_key]['V']\n i_s = self.multilevel_ivdata['module'][mod_key]['I']\n e_s = self.multilevel_ivdata['module'][mod_key]['E']\n t_s = self.multilevel_ivdata['module'][mod_key]['T']\n\n Vs += v_s\n Is += i_s\n irrs += e_s\n temps += t_s\n level += ['module'] * len(v_s)\n mode += [mod_key] * len(v_s)\n\n if 'string' in focus:\n if len(list(self.string_cond.keys())) > 0:\n # String definitions\n for str_key in self.string_cond:\n v_s = self.multilevel_ivdata['string'][str_key]['V']\n i_s = self.multilevel_ivdata['string'][str_key]['I']\n e_s = self.multilevel_ivdata['string'][str_key]['E']\n t_s = self.multilevel_ivdata['string'][str_key]['T']\n\n Vs += v_s\n Is += i_s\n irrs += e_s\n temps += t_s\n level += ['string'] * len(v_s)\n mode += [str_key] * len(v_s)\n\n if cutoff:\n cut_Vs = []\n cut_Is = []\n for V, I in zip(Vs, Is):\n v_, i_ = iv_cutoff(V, I, 0)\n cut_Vs.append(v_)\n cut_Is.append(i_)\n return pd.DataFrame({'current': cut_Is,\n 'voltage': cut_Vs,\n 'E': irrs,\n 'T': temps,\n 'mode': mode,\n 'level': level})\n\n else:\n return pd.DataFrame({'current': Is,\n 'voltage': Vs,\n 'E': irrs,\n 'T': temps,\n 'mode': mode,\n 'level': level})",
"def _build_variable_mapping_df(magnet_strings, length_constants):\n LOG.debug(\" Building Dataframe Mapping\")\n var_to_mag = {}\n for magnet in magnet_strings:\n for order, value_string in magnet_strings[magnet].iteritems():\n if order not in var_to_mag:\n var_to_mag[order] = tfs.TfsDataFrame()\n\n k_dict = _eval_magnet_strength(value_string, length_constants)\n var_to_mag[order] = var_to_mag[order].append(\n tfs.TfsDataFrame([k_dict.values()],\n index=[magnet],\n columns=k_dict.keys()\n )).fillna(0)\n return var_to_mag",
"def get_data(paths, df_names, categorical_feats, groupby=None, exclude_classes=[], rel_cols=None, sep=\",\"):\n\n def _load_data(path, sep=sep):\n \"\"\"small function to load according to the dataformat. (excel or csv)\"\"\"\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df\n\n # initialize list to store dataframes in\n dfs = []\n\n # Handle single path input\n if groupby and (len(paths) == 1 or isinstance(paths, str)):\n\n # load data depending on if the single path is given in a list of as string\n if isinstance(paths, str):\n data = _load_data(paths, sep)\n elif isinstance(paths, list):\n data = _load_data(*paths, sep)\n else:\n raise ValueError(\"It seems like the input was a single path. Please input path as string or inside a list.\")\n\n grouping = data.groupby(groupby)\n\n # split dataframe groups and create a list with all dataframes\n for name, grp in grouping:\n # skip class if it should be excluded\n if name in exclude_classes:\n continue\n\n df = grouping.get_group(name)[::]\n\n # consider all columns as relevant is no rel_cols given.\n if rel_cols is None:\n rel_cols = list(df)\n\n # consider the relevant columns\n dfs.append(df[rel_cols])\n\n # Handle multiple paths input\n elif len(paths) > 1:\n for path in paths:\n df = _load_data(path)\n dfs.append(df)\n\n return DataCollection(dfs, df_names, categorical_feats)",
"def get_compound_df(idx, compound_dict, standard_types):\n\n if standard_types is None or compound_dict['standard_type'] is None:\n is_valid_std_type = False\n else:\n compound_std_type = compound_dict['standard_type'].lower()\n is_valid_std_type = any([std_type.lower() in compound_std_type\n for std_type in standard_types])\n\n if is_valid_std_type:\n # Drop unused columns\n compound_dict.pop('activity_properties', None)\n # Capture Ligand Efficiency\n lig_efficiency = compound_dict.pop('ligand_efficiency', None)\n\n compound_df = pd.DataFrame(compound_dict, index=[0])\n if lig_efficiency is not None:\n lig_efficiency_df = pd.DataFrame(lig_efficiency, index=[0])\n lig_efficiency_df.columns = ['ligand_efficiency_%s' % col\n for col in lig_efficiency_df.columns]\n compound_df = pd.concat([compound_df, lig_efficiency_df], axis=1)\n else:\n compound_df = pd.DataFrame(columns=compound_dict.keys())\n\n print_progress_bar(idx + 1, self.number_retrieved_compounds,\n prefix='Progress:', suffix='Complete', length=50)\n return compound_df",
"def load_all_dfs(clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n print('loading data')\n first_clf = clf_list[0]\n df = pd.read_csv('Pikki'+first_clf+'.csv')\n df['df'] = first_clf\n\n df = df.set_index(['id','df'])\n\n for clf in clf_list[1:]:\n file_name = 'Pikki' + clf + '.csv'\n df_tmp = pd.read_csv(file_name)\n df_tmp['df'] = clf\n\n df_tmp = df_tmp.set_index(['id','df'])\n\n df = pd.concat([df,df_tmp])\n\n \n df['std'] = df.apply(np.std,axis=1,raw = True)\n end = time.clock()\n print(end-start)\n return df#.swaplevel(0,1)",
"def lists_to_data_frames(bc_class, temp_data):\n if 'bc_string_list' in temp_data:\n labels = ['CARD', 'ID', 'ID_0', 'ID_1']\n df = pd.DataFrame.from_records(temp_data['bc_string_list'], columns=labels)\n for x in range(1, len(labels)):\n df[labels[x]] = df[labels[x]].astype(dtype='Int64')\n bc_class.boundary_strings = df\n if 'bc_list' in temp_data:\n labels = ['CARD', 'CARD_2', 'STRING_ID', 'XY_ID1', 'XY_ID2', 'XY_ID3']\n df = pd.DataFrame.from_records(temp_data['bc_list'], columns=labels)\n for x in range(2, len(labels)):\n df[labels[x]] = df[labels[x]].astype(dtype='Int64')\n bc_class.solution_controls = df\n if 'nb_sdr_list' in temp_data:\n labels = ['CARD', 'CARD_1', 'S_ID', 'COEF_A', 'COEF_B', 'COEF_C', 'COEF_D', 'COEF_E']\n df = pd.DataFrame.from_records(temp_data['nb_sdr_list'], columns=labels)\n bc_class.stage_discharge_boundary = df\n if 'fr_list' in temp_data:\n labels = ['CARD', 'CARD_2', 'STRING_ID', 'REAL_01', 'REAL_02', 'REAL_03', 'REAL_04', 'REAL_05']\n df = pd.DataFrame.from_records(temp_data['fr_list'], columns=labels)\n bc_class.friction_controls = df\n if 'br_list' in temp_data:\n labels = ['CARD', 'CARD_1', 'C_0', 'C_1', 'C_2', 'C_3', 'C_4', 'C_5', 'C_6', 'C_7', 'C_8']\n df = pd.DataFrame.from_records(temp_data['br_list'], columns=labels)\n bc_class.breach_controls = df\n if 'wrs_list' in temp_data:\n labels = ['CARD', 'NUMBER', 'S_UPSTREAM', 'S_DOWNSTREAM', 'WS_UPSTREAM', 'WS_DOWNSTREAM', 'LENGTH',\n 'CREST_ELEV', 'HEIGHT']\n df = pd.DataFrame.from_records(temp_data['wrs_list'], columns=labels)\n bc_class.weirs = df\n if 'fgt_list' in temp_data:\n labels = ['CARD', 'NUMBER', 'USER', 'S_UPSTREAM', 'S_DOWNSTREAM', 'FS_UPSTREAM', 'FS_DOWNSTREAM', 'COEF_A',\n 'COEF_B', 'COEF_C', 'COEF_D', 'COEF_E', 'COEF_F', 'LENGTH']\n df = pd.DataFrame.from_records(temp_data['fgt_list'], columns=labels)\n bc_class.flap_gates = df\n if 'sls_list' in temp_data:\n labels = ['CARD', 'NUMBER', 'S_UPSTREAM', 'S_DOWNSTREAM', 'SS_UPSTREAM', 'SS_DOWNSTREAM', 'LENGTH',\n 'TS_OPENING']\n df = pd.DataFrame.from_records(temp_data['sls_list'], columns=labels)\n bc_class.sluice_gates = df\n if 'cn_con_list' in temp_data:\n labels = ['CARD', 'CARD_1', 'ID', 'CONC']\n df = pd.DataFrame.from_records(temp_data['cn_con_list'], columns=labels)\n bc_class.constituent_properties.general_constituents = df\n if 'cn_snd_list' in temp_data:\n labels = ['CARD', 'CARD_1', 'ID', 'C_0', 'C_1', 'C_2', 'C_3']\n df = pd.DataFrame.from_records(temp_data['cn_snd_list'], columns=labels)\n bc_class.constituent_properties.sediment = df\n if 'flx_list' in temp_data:\n labels = ['CARD', 'S_ID']\n df = pd.DataFrame.from_records(temp_data['flx_list'], columns=labels)\n bc_class.output_control.output_flow_strings = df",
"def df():\n fs.df()",
"def load_df_from_files():\n with open(\"legislators-historical.json\") as f:\n data_old = json.load(f)\n\n with open(\"legislators-current.json\") as f:\n data_new = json.load(f)\n\n data = data_old + data_new\n\n rows = []\n for person in data:\n try:\n these_rows = make_rows(person)\n except:\n print(person)\n rows.extend(these_rows)\n\n df = pd.DataFrame(rows)\n return df",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def create_dataframe(base_dir):\n\n if not os.path.exists(base_dir):\n return pd.DataFrame()\n\n df_list = [\n create_dataframe_from_dir(os.path.join(base_dir, directory))\n for directory in os.listdir(base_dir)\n if os.path.isdir(os.path.join(base_dir, directory))\n ]\n\n df_basedir = create_dataframe_from_dir(base_dir)\n if df_basedir is not None:\n df_list.append(df_basedir)\n\n final_df = concat_dataframes(df_list)\n\n return final_df",
"def _setup_dataframe(self, serie, metadata=None):\n header = self.get_data_header(serie, dataset='cnv')\n df = self.get_data_in_frame(serie, header, dataset='cnv')\n df = self.df_handler.map_column_names_of_dataframe(df)\n\n return df",
"def make_dataframe(self, dataframe_path, corpus_path):\n directory_list = os.listdir(corpus_path)\n pub_year = []\n pii = []\n doi = []\n title = []\n authors = []\n num_authors = []\n abstract = []\n journal_name = []\n\n for i in trange(len(directory_list)):\n directory = directory_list[i]\n json_dict = self.load_journal_json(f'{corpus_path}/{directory}/{directory}.json')\n\n for year in json_dict:\n for pub in json_dict[year]:\n pub_year.append(year)\n pii.append(json_dict[year][pub]['pii'])\n doi.append(json_dict[year][pub]['doi'])\n title.append(json_dict[year][pub]['title'])\n authors.append(json_dict[year][pub]['authors'])\n num_authors.append(json_dict[year][pub]['num_authors'])\n abstract.append(json_dict[year][pub]['description'])\n journal_name.append(directory)\n\n columns = ['pub_year', 'pii', 'doi', 'title', 'authors', 'num_authors', 'abstract', 'journal_name']\n df = pd.DataFrame(np.array([pub_year, pii, doi, title, authors, num_authors, abstract, journal_name], dtype=object).transpose(), columns=columns)\n df.to_pickle(dataframe_path + '/dataframe_from_CorpusGenerator' +'.pkl')",
"def init_file_dataframe():\n \n row_names_link = [\"NA\"] * MAX_NUM_OF_FILES\n row_names_name = [\"NA\"] * MAX_NUM_OF_FILES\n row_names_down = [\"NA\"] * MAX_NUM_OF_FILES\n \n for i in range(MAX_NUM_OF_FILES):\n row_names_link[i] = \"link_\" + str(i + 1)\n row_names_name[i] = \"name_\" + str(i + 1)\n row_names_down[i] = \"down_\" + str(i + 1)\n \n df = pd.DataFrame(columns = row_names_link + row_names_name + row_names_down)\n \n return df, row_names_link, row_names_name, row_names_down"
]
| [
"0.57476497",
"0.5689572",
"0.5556659",
"0.55454594",
"0.5482013",
"0.5480292",
"0.5476088",
"0.5457783",
"0.54277915",
"0.5402681",
"0.53266585",
"0.52771425",
"0.5270305",
"0.52694404",
"0.52569366",
"0.5238869",
"0.5226146",
"0.52153397",
"0.5192845",
"0.5190287",
"0.51789695",
"0.5171686",
"0.5167383",
"0.51527214",
"0.5145052",
"0.51412207",
"0.5140804",
"0.51348656",
"0.51004076",
"0.50972587"
]
| 0.6514609 | 0 |
Import a function given the string formatted as `module_name.function_name` (eg `django.utils.text.capfirst`) | def import_function(s):
a = s.split('.')
j = lambda x: '.'.join(x)
return getattr(import_module(j(a[:-1])), a[-1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_function(name: str):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)",
"def import_function(\n name: Optional[str]\n) -> Optional[Callable]:\n\n if name is None:\n return None\n\n module_name, function_name = name.rsplit('.', maxsplit=1)\n function_module = import_module(module_name)\n function = getattr(function_module, function_name)\n\n return function",
"def import_function(spec, kwargs=None):\n spec = spec.split(':')\n module = spec[0]\n fn = spec[1]\n module = import_module(module)\n fn = getattr(module, fn)\n\n if kwargs is not None:\n fn = functools.partial(fn, **kwargs)\n return fn",
"def import_from(full_name):\n module_name, function_name = full_name.rsplit('.', 1)\n mod = import_module(module_name)\n return getattr(mod, function_name)",
"def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def _import_string(import_name):\n if \".\" in import_name:\n module, obj = import_name.rsplit(\".\", 1)\n else:\n return importlib.import_module(import_name)\n return getattr(importlib.import_module(module), obj)",
"def load_function(engine_path):\r\n module_path, _, name = engine_path.rpartition('.')\r\n return getattr(importlib.import_module(module_path), name)",
"def load_function(engine_path):\r\n module_path, _, name = engine_path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def test_import_local_function(self):\n import_function(determine_package(f))\n assert f() == \"My name is f.\"",
"def get_function(function_path):\n try:\n mod_name, func_name = function_path.rsplit('.', 1)\n mod = import_module(mod_name)\n except ImportError as e:\n raise ImproperlyConfigured(('Error importing module %s: \"%s\"' %\n (mod_name, e)))\n return getattr(mod, func_name)",
"def get_callable_from_string(f_name):\n try:\n mod_name, func_name = get_mod_func(f_name)\n if mod_name == \"\" and func_name == \"\":\n raise AttributeError(\"%s couldn't be converted to a module or function name\" % f_name)\n\n module = __import__(mod_name)\n\n if func_name == \"\":\n func_name = mod_name # The common case is an eponymous class\n\n return getattr(module, func_name)\n\n except (ImportError, AttributeError), exc:\n raise RuntimeError(\"Unable to create a callable object for '%s': %s\" % (f_name, exc))",
"def parse_function_str(function_str: str) -> Union[Callable[[object, dict], object], None]:\n\n if function_str is None:\n return None\n\n parsed_function = re.match(\"^([\\.\\w]+)(?:\\(([\\w|,%\\'-: ]*)\\))?$\", function_str)\n if not parsed_function:\n raise RuntimeError(f\"Invalid name for a transform function: '{function_str}'\")\n\n function_name, args = parsed_function.groups()\n args_list = str(args).split(\",\") if (args is not None and args != '') else []\n\n # Check if it is a built-in function\n builtin_function = FunctionBuilder.get_builtin_function(function_name, args_list)\n\n if builtin_function is not None:\n return builtin_function\n\n # Get it as custom function\n return FunctionBuilder.get_custom_function(function_name, args_list)",
"def import_from_string(import_path: str) -> Any:\n\n import_classname = import_path.split(\".\")[-1]\n import_module = \".\".join(import_path.split(\".\")[:-1])\n\n module = importlib.import_module(import_module)\n return getattr(module, import_classname)",
"def get_func(func_name):\n if func_name == '':\n return None\n try:\n parts = func_name.split('.')\n # Refers to a function in this module\n if len(parts) == 1:\n return globals()[parts[0]]\n # Otherwise, assume we're referencing a module under modeling\n module_name = 'modeling.' + '.'.join(parts[:-1])\n module = importlib.import_module(module_name)\n return getattr(module, parts[-1])\n except Exception:\n raise",
"def call_function_from_import_path(import_path: str) -> Any:\n try:\n callback_func = import_attr(import_path)\n except Exception as e:\n raise ValueError(f\"The import path {import_path} cannot be imported: {e}\")\n\n if not callable(callback_func):\n raise TypeError(f\"The import path {import_path} is not callable.\")\n\n try:\n return callback_func()\n except Exception as e:\n raise RuntimeError(f\"The function {import_path} raised an exception: {e}\")",
"def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)",
"def funcname(funcstr):\n ps = funcstr.find('(')\n return funcstr[:ps]",
"def get_func(func_name):\n from importlib import import_module\n if func_name is None:\n return None\n parts = func_name.split('.')\n module_name = '.'.join(parts[:-1])\n module = import_module(module_name)\n return getattr(module, parts[-1])",
"def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)",
"def import_object(import_str, *args, **kwargs):\r\n return import_class(import_str)(*args, **kwargs)",
"def import_declared_function(name):\n nname = SqlFunction.normalize_name(name)\n module = SqlFunction._definitions.get(nname)\n if not isinstance(module, str):\n raise KGTKException(f'not a forward-declared SQL function: {name}')\n # we have a proper forward-declaration to a defining module, import it:\n exec(f'import {module}')\n fun = SqlFunction._definitions.get(nname)\n if not isinstance(fun, SqlFunction):\n raise KGTKException(f'missing definition for declared SQL function: {name}')\n return fun",
"def get_func(self, name_str):\n return self.func.get(name_str)",
"def importObject(importStr, *args, **kwargs):\n return importClass(importStr)(*args, **kwargs)",
"def get_custom_function(function_name: str, args_list: List[str]):\n\n parts = function_name.split(\".\")\n module_path = \".\".join(parts[:-1])\n function_name = parts[-1]\n try:\n mod = importlib.import_module(module_path)\n transform_function = getattr(mod, function_name)\n except ModuleNotFoundError:\n raise InvalidFunctionError(f\"Invalid module for a custom function: '{function_name}'\")\n except ValueError:\n raise InvalidFunctionError(f\"Invalid module for a custom function: '{function_name}'\")\n except AttributeError:\n raise InvalidFunctionError(f\"Invalid name for a custom function: '{function_name}'\")\n\n return transform_function(*args_list)",
"def import_string(import_name):\n # force the import name to automatically convert to strings\n # __import__ is not able to handle unicode strings in the fromlist\n # if the module is a package\n import_name = str(import_name).replace(':', '.')\n\n try:\n __import__(import_name)\n except ImportError:\n if '.' not in import_name:\n raise\n else:\n return sys.modules[import_name]\n\n module_name, obj_name = import_name.rsplit('.', 1)\n try:\n module = __import__(module_name, None, None, [obj_name])\n except ImportError:\n # support importing modules not yet set up by the parent module\n # (or package for that matter)\n module = import_string(module_name)\n\n try:\n return getattr(module, obj_name)\n except AttributeError as e:\n raise ImportError(e)",
"def import_object(import_str, *args, **kw):\n try:\n __import__(import_str)\n return sys.modules[import_str]\n except ImportError:\n cls = import_class(import_str)\n return cls(*args, **kw)",
"def parse_function_line(line: str):\n without_call = (line.split(\"function\")[1]).strip()\n split_array = without_call.split()\n function = split_array[0]\n return function",
"def funcstring(funcname):\n s = str(funcname)[10:] #chop off '<function '\n spi = s.index(' ')\n return s[:spi]"
]
| [
"0.7775118",
"0.71505874",
"0.6975363",
"0.6941111",
"0.6877164",
"0.6877164",
"0.6877164",
"0.6561287",
"0.6522173",
"0.6454434",
"0.6385483",
"0.6366869",
"0.636331",
"0.6330619",
"0.6164649",
"0.60861504",
"0.6066642",
"0.6003852",
"0.5984985",
"0.59510297",
"0.59288377",
"0.5925172",
"0.5857315",
"0.58451",
"0.5831198",
"0.58016294",
"0.57922864",
"0.5776276",
"0.57347745",
"0.57292616"
]
| 0.78207976 | 0 |
Sort the list of graph nodes according to their Degree. | def sorted_nodes_list(self):
full_sorted_node_list = map(lambda k: k[0], sorted(self.graph.degree(),
key=lambda k: k[1], reverse=True))
return full_sorted_node_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort_nodes(center_node, nodes):\n def cmp(n1, n2):\n angle1 = (math.degrees(center_node.angle_to(n1)) + 360.) % 360\n angle2 = (math.degrees(center_node.angle_to(n2)) + 360.) % 360\n\n if angle1 < angle2:\n return -1\n elif angle1 == angle2:\n return 0\n else:\n return 1\n return sorted(nodes, cmp=cmp)",
"def degeneracy_ordering(self):\n return sorted(self._adjacency_map, key=self.degree)",
"def walk_sort(edges):\n g = nx.Graph()\n g.add_edges_from(edges)\n connected = set()\n degree = nx.degree(g)\n ordering = []\n while degree:\n next = max_degree_node(g, degree, connected)\n if next is not None:\n ordering.append(next)\n else:\n break\n return ordering",
"def topological_sort(self):\n in_degree = {}\n for node in self.graph:\n in_degree[node] = 0\n\n for from_node in self.graph:\n for to_node in self.graph[from_node]:\n in_degree[to_node] += 1\n\n queue = deque()\n for node in in_degree:\n if in_degree[node] == 0:\n queue.appendleft(node)\n\n sorted_nodes = []\n while queue:\n independent_node = queue.pop()\n sorted_nodes.append(independent_node)\n for next_node in self.graph[independent_node]:\n in_degree[next_node] -= 1\n if in_degree[next_node] == 0:\n queue.appendleft(next_node)\n\n if len(sorted_nodes) == len(self.graph):\n return sorted_nodes\n else:\n raise ValueError('graph is not acyclic')",
"def sorted_nodes(self):\n if self._sorted_nodes is None:\n self.sorting()\n return self._sorted_nodes",
"def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order",
"def topological_sort(self):\n \n visited = set()\n sorted_node = [] \n\n # sort all the node in the graph\n for i in self.node_set: \n if i not in visited: \n visited = self.topological_sort_helper(i, visited, sorted_node) \n \n visited.clear()\n return sorted_node",
"def angle_sort_adjacent_nodes(self,n,ref_nbr=None):\n nbrs=self.node_to_nodes(n)\n if len(nbrs)==0:\n return []\n diffs=self.nodes['x'][nbrs] - self.nodes['x'][n]\n angles=np.arctan2(diffs[:,1],diffs[:,0])\n nbrs=nbrs[np.argsort(angles)]\n if ref_nbr is not None: \n i=list(nbrs).index(ref_nbr)\n nbrs=np.roll(nbrs,-i)\n return nbrs",
"def resort(self):\n self.items.sort(key=lambda node: node.path_weight, reverse=True)",
"def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')",
"def sort(self, by='dependency'):\n\n nodes_ordered = []\n if by == \"dependency\":\n for node in self.nodes:\n insert_idx = len(nodes_ordered)\n for node_dependency in node.dependencies:\n for idx, node_ordered in enumerate(nodes_ordered):\n if (idx <= insert_idx) and (node_dependency.id == node_ordered.id):\n insert_idx = idx + 1 # place the node after the dependency\n nodes_ordered.insert(insert_idx, node)\n else:\n err_msg = \"Sorting strategy '{}' unknown \".format(by)\n raise ValueError(err_msg)\n\n return Graph.from_list(nodes_ordered)",
"def toposort(adj):\n # Memoize for visited vertex\n used = [0] * len(adj)\n order = []\n # write your code here\n # Traverse through each vertex\n for i in range(len(adj)):\n if not used[i]:\n # If not visited, run dfs\n dfs(adj, used, order, i)\n\n # Reverse the order list to show in descending order\n order.reverse()\n return order",
"def nodes(self):\n data = list(self._nodes.values())\n data.sort()\n return data",
"def sortEdges( self, listEdges ):\r\n changed = True\r\n while changed:\r\n changed = False\r\n for i in range( len(listEdges)-1 ):\r\n if listEdges[ i ].cost > listEdges[ i+1 ].cost:\r\n changed = True\r\n aux = listEdges[ i ]\r\n listEdges[ i ] = listEdges[ i+1 ]\r\n listEdges[ i+1 ] = aux",
"def sort_nodes(self):\n non_terminal_nodes = []\n for node in self.nodes:\n if not node.entries:\n assert self.start is None, (\n 'there are more than one node with no incoming arcs')\n self.start = node\n elif not node.exits:\n assert self.end is None, (\n 'there are more than one node with no outgoing arcs')\n self.end = node\n else:\n non_terminal_nodes.append(node)\n assert self.start is not None and self.end is not None, (\n 'no start or end node')\n self.nodes = ([self.start]\n + sorted(non_terminal_nodes,\n key=lambda x: (x.entry, x.sym))\n + [self.end])\n for n in self.nodes:\n n.exits.sort(key=lambda x: (x.dest.entry, x.dest.sym))",
"def getDegrees(self):\n l = []\n for node in self.getNodes():\n l.append((node, len(self.graph[node])))\n\n return l",
"def sort_nodes(self):\n nodes = self._chain.root_node.ordered_subnodes_hierarchy()\n self._chain.nodes = nodes",
"def targeted_order(ugraph):\r\n # copy the graph\r\n new_graph = copy_graph(ugraph)\r\n \r\n order = [] \r\n while len(new_graph) > 0:\r\n max_degree = -1\r\n for node in new_graph:\r\n if len(new_graph[node]) > max_degree:\r\n max_degree = len(new_graph[node])\r\n max_degree_node = node\r\n \r\n neighbors = new_graph[max_degree_node]\r\n new_graph.pop(max_degree_node)\r\n for neighbor in neighbors:\r\n new_graph[neighbor].remove(max_degree_node)\r\n\r\n order.append(max_degree_node)\r\n return order",
"def nodes(self):\n return self.sort_dict(self.trajectory_data)",
"def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes",
"def find_topo_sort(node_list):\r\n visited = set()\r\n topo_order = []\r\n #print(node_list)\r\n for node in node_list:\r\n topo_sort_dfs(node, visited, topo_order)\r\n return topo_order",
"def sort(points):\n if len(points) == 0:\n return []\n \n starting_vertex = min(points)\n reference_point = starting_vertex + Point2D(0, 1)\n \n return sorted(points, key=partial(\n get_angle_and_distance, point_2=starting_vertex, point_3=reference_point\n ))",
"def topological_sort(self, graph=None):\n if graph is None:\n graph = self.graph\n\n in_degree = {}\n for u in graph:\n in_degree[u] = 0\n\n for u in graph:\n for v in graph[u]:\n in_degree[v] += 1\n\n queue = deque()\n for u in in_degree:\n if in_degree[u] == 0:\n queue.appendleft(u)\n\n l = []\n while queue:\n u = queue.pop()\n l.append(u)\n for v in graph[u]:\n in_degree[v] -= 1\n if in_degree[v] == 0:\n queue.appendleft(v)\n\n if len(l) == len(graph):\n return l\n else:\n raise ValueError(\"graph is not acyclic\")",
"def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n\n order = []\n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n\n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order",
"def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n\n order = []\n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n\n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order",
"def _sort_nodes_by_height(self):\n self.node_high_to_low = np.argsort(self.height)[::-1]\n\n # Also to sort neighbour node array by height\n\n neighbour_array_lo_hi = self.neighbour_array.copy() # easiest way to get size / structure right\n\n for node in range(0,self.tri.npoints):\n heights = self.height[self.neighbour_array[node]]\n neighbour_array_lo_hi[node] = self.neighbour_array[node][np.argsort(heights)]\n \n self.neighbour_array_lo_hi = neighbour_array_lo_hi",
"def in_degree_sequence(self):\n return sorted(self.in_degree_iterator(), reverse=True)",
"def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order",
"def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n order = []\n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n \n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n new_graph[neighbor].remove(max_degree_node)\n \n order.append(max_degree_node)\n return order",
"def sorted_nodes(self):\r\n def is_source(node, connections):\r\n for connection in connections:\r\n if node == connection[1]:\r\n return False\r\n return True\r\n\r\n def source_connections(node, connections):\r\n conns = set()\r\n for connection in connections:\r\n if node == connection[0]:\r\n conns.add(connection)\r\n return conns\r\n\r\n nodes = set(self.nodes.values())\r\n connections = self.connections.copy()\r\n sorted_nodes = []\r\n\r\n # Find source nodes:\r\n source_nodes = set([n for n in nodes if is_source(n, connections)])\r\n\r\n # while S is non-empty do\r\n while source_nodes:\r\n # remove a node n from S\r\n node = source_nodes.pop()\r\n # insert n into L\r\n sorted_nodes.append(node)\r\n\r\n # for each node m with an edge e from n to m do\r\n s_connections = source_connections(node, connections)\r\n for connection in s_connections:\r\n # remove edge e from the graph\r\n m = connection[1]\r\n connections.remove(connection)\r\n # if m has no other incoming edges then\r\n # insert m into S\r\n if is_source(m, connections):\r\n source_nodes.add(m)\r\n\r\n # if graph has edges then\r\n # output error message (graph has at least one cycle)\r\n # else\r\n # output message (proposed topologically sorted order: L)\r\n\r\n if connections:\r\n raise Exception(\"Steram has at least one cycle (%d connections left of %d)\" % (len(connections), len(self.connections)))\r\n\r\n return sorted_nodes"
]
| [
"0.69372827",
"0.690695",
"0.66759104",
"0.6506415",
"0.6314595",
"0.62190104",
"0.6212803",
"0.6203003",
"0.6167888",
"0.6120634",
"0.61173314",
"0.6100976",
"0.6086741",
"0.6075209",
"0.6054904",
"0.6052559",
"0.59813607",
"0.5955287",
"0.5944533",
"0.5939927",
"0.59241736",
"0.5920332",
"0.5906263",
"0.59025496",
"0.59025496",
"0.5899518",
"0.5888427",
"0.58697635",
"0.5866966",
"0.5798919"
]
| 0.7583487 | 0 |
Utility function that reads test_case info from json file. | def __read_test_case(test_case):
# type: (str) -> Optional[dict]
with open('data/calculator.json') as json_file:
data = json.load(json_file)
return data[test_case] if data[test_case] else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_load_file_contents():\n\n file_name = 'test_fooof_all'\n loaded_data = load_json(file_name, TEST_DATA_PATH)\n\n # Check settings\n for setting in OBJ_DESC['settings']:\n assert setting in loaded_data.keys()\n\n # Check results\n for result in OBJ_DESC['results']:\n assert result in loaded_data.keys()\n\n # Check results\n for datum in OBJ_DESC['data']:\n assert datum in loaded_data.keys()",
"def test_json_reader_data_contents(process_data):\n json_data = process_data(file_name_or_type='scooter_data.json')\n for val in json_data:\n assert(isinstance(val['id'], int))\n assert(isinstance(val['name'], str))\n assert(isinstance(val['vin_number'], str))\n assert(isinstance(val['electric_scooter'], bool))\n assert(isinstance(val['city'], str))\n assert(isinstance(val['usage'], str))\n assert(isinstance(val['cost_usd'], float))\n assert(isinstance(val['total_years_of_use'], int))",
"def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test",
"def test_json_file(self):\n #response = os.system(\"python3 client.py -f filename.csv\")\n response = client.result(False, 'json', 'unittest',file = 'test_file.csv')\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')",
"def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data",
"def test_load_json_fobj():\n\n file_name = 'test_fooof_all'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'r') as f_obj:\n data = load_json(f_obj, '')\n\n assert data",
"def read_test_data(self):\n with open(self.report_path) as report_file:\n return json.load(report_file)",
"def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())",
"def test_json(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(input_file_path), 'r') as input_file:\n with open(attach_path(answer_file_path), 'r') as answer_file:\n assert str(read_json(input_file.read().strip())) == answer_file.read().strip()",
"def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))",
"def _read_json_file(self):\n with open(self.subcfgfilename) as json_file:\n json_string = json_file.read()\n json_data = json.loads(json_string)\n return(json_data)",
"def test_input_loadjson(self, fname, groups, hosts):\n with open(fname,'r') as fd:\n fcon = json.loads(fd.read())\n inventory = Inventory()\n inventory.load_inventoryjson(fcon)\n assert inventory.count_groups() == len(groups)\n assert inventory.count_hosts() == len(hosts)",
"def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)",
"def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')",
"def _get_cases_from_json_file_given_problem_type(path, problemNumber, caseType):\n return get_cases_from_json(fileops.get_json_dict(path), problemNumber,\n caseType)",
"def get_cases_from_json_file(path):\n # Get the problem number from the path\n problemTypeTuple = _get_file_problemnumber_type_tuple(path)\n\n return _get_cases_from_json_file_given_problem_type(path, \n problemTypeTuple[0], problemTypeTuple[1])",
"def _read_spec_test(filename):\n with open(filename, \"rt\") as file:\n source = file.read()\n return ParsedSpecTest.parse(source)",
"def example_json_file42():\n path = dirname(__file__)\n with open(join(\n path,\n 'data',\n 'datacite-v4.2-full-example.json')) as file:\n return file.read()",
"def example_json_file43():\n path = dirname(__file__)\n with open(join(\n path,\n 'data',\n 'datacite-v4.3-full-example.json')) as file:\n return file.read()",
"def _load_test_configs(filename, required_keys):\n # type: (str, List[str]) -> List[Dict]\n with open(filename, 'r') as f:\n tests = json.loads(f.read())\n _validate_test_configs(tests, filename, required_keys)\n return tests",
"def _json_probe(srcfile):\n return json.loads(__run(srcfile))",
"def getTrialData():\n with open('data/trialdata.txt', 'r') as f:\n data = json.load(f)\n return data",
"def test_get_json_spec(self):\n pass",
"def test_load_jsonlines():\n\n res_file_name = 'test_fooofgroup_res'\n\n for data in load_jsonlines(res_file_name, TEST_DATA_PATH):\n assert data",
"def info(self):\n if self.file:\n parts = str(self.file).split('/')\n return {\n 'test-name': parts[-2],\n 'case-name': parts[-1].split('.')[0],\n }\n return {}",
"def read_json_fixture(filename: str) -> Dict[str, str]:\n fixture_path = Path(__file__).resolve().parents[0] / f\"fixtures/{filename}\"\n with open(fixture_path, mode=\"r\") as f:\n data = json.load(f)\n return data",
"def test_json(self):\n\n cases_dir = pathlib.Path(__file__).parent / 'cases'\n\n asn_strs = {\n asn_path.stem: asn_path.read_text()\n for asn_path in cases_dir.glob('*.asn')\n }\n json_strs = {\n json_path.stem: json_path.read_text()\n for json_path in cases_dir.glob('*.json')\n }\n\n assert set(asn_strs.keys()) == set(json_strs.keys())\n assert len(asn_strs) > 0\n\n for key in asn_strs:\n with self.subTest(key=key):\n res_json = asn1vnparser.parse_asn1_value_assignment(\n asn_strs[key], as_json=True)\n res_py = json.loads(res_json)\n self.maxDiff = None\n self.assertEqual(res_py, json.loads(json_strs[key]))",
"def import_sample(infile):\n deserialized = None\n with open(infile, 'r') as file_handle:\n deserialized = json.load(file_handle, object_hook=decode_sample)\n return deserialized",
"def example_json_file41():\n path = dirname(__file__)\n with open(join(\n path,\n 'data',\n 'datacite-v4.1-full-example.json')) as file:\n return file.read()",
"def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data"
]
| [
"0.6807032",
"0.6623511",
"0.6512052",
"0.64928055",
"0.6431132",
"0.642324",
"0.63693535",
"0.63231933",
"0.6318227",
"0.6315409",
"0.63148236",
"0.6303362",
"0.62995434",
"0.62879974",
"0.6273989",
"0.6255432",
"0.62515926",
"0.62435913",
"0.61618984",
"0.6160712",
"0.6159245",
"0.6155886",
"0.6143569",
"0.6118683",
"0.6105737",
"0.6086799",
"0.60653",
"0.60644007",
"0.60399455",
"0.6039609"
]
| 0.77058786 | 0 |
Print DMA test result and append it to results list. | def _process_dma_result(compute_node, testfunc,
result, results_list, node):
if result:
logger.info(
'Test case for {0} with DMA PASSED on {1}.'.format(
node, testfunc))
else:
logger.error(
'Test case for {0} with DMA FAILED on {1}.'.format(
node, testfunc))
results_list.append((compute_node, "DMA", testfunc, result)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _print_result_of_dma(compute_ids, results):\n compute_node_names = ['Node-{}'.format(i) for i in range(\n len((compute_ids)))]\n all_computes_in_line = ''\n for compute in compute_node_names:\n all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))\n line_of_nodes = '| Test ' + all_computes_in_line + '|'\n logger.info('=' * 70)\n logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')\n logger.info(\n '|' + ' ' * ((9*len(compute_node_names))/2)\n + ' DMA TEST '\n + ' ' * (\n 9*len(compute_node_names) - (9*len(compute_node_names))/2)\n + '|')\n logger.info(\n '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))\n logger.info(line_of_nodes)\n logger.info(\n '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))\n\n testname = \"DMA\"\n print_line = ''\n for id in compute_ids:\n all_result = \\\n 'FAIL' if [\n testfunc for comp_id, testname, testfunc, res in results\n if comp_id == id and not res] else 'PASS'\n print_line += '| ' + all_result + ' '\n logger.info(\n '| {}'.format(testname) + (' ' * (15 - len(testname)))\n + print_line + '|')\n\n for testfunc in ['Server', 'InfoFetch']:\n print_line = ''\n for id in compute_ids:\n if (id, testname, testfunc, True) in results:\n print_line += ' PASS |'\n elif (id, testname, testfunc, False) in results:\n print_line += ' FAIL |'\n else:\n print_line += ' SKIP |'\n logger.info(\n '| {}'.format(testfunc) + (' ' * (14-len(testfunc)))\n + '|' + print_line)\n\n logger.info(\n '+' + ('-' * 16) + '+'\n + (('-' * 8) + '+') * len(compute_node_names))\n logger.info('=' * 70)",
"def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]",
"def print_results(self):\n pass",
"def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()",
"def print_results(self, data: SimData) -> None:\n pass",
"def print_result_list(self, results=None):\n if (results is None) and (self.interesting == 0):\n results = self.interesting\n elif len(self.interesting) == 0:\n # if there are no results print this and end this mehtod,\n # otherwise it would be attempted to iterate over an empty\n # array\n print(\"No results available\")\n return\n for i, item in enumerate(self.interesting, start=self.offset):\n self.print_res(item, i)\n print()",
"def print_results(results):\n print(f\"Intial Entries: {results[0]}\")\n print(f\"Added Entries: {results[1]}\")\n print(f\"Final Entries: {results[2]}\")\n print(f\"Total Run Time: {results[3]}\")\n print(\"\\n\")",
"def printResults(self, resultsList):\n for line in resultsList:\n print str(line)",
"def result(self, result):\n self.stdout.write('RESULT {0}\\n{1}'.format(len(result), result))\n self.stdout.flush()",
"def getTestResults():",
"def result(self, result):\n print(result)",
"def display_results():\n pass",
"def print_results(results):\n print()\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%% RESULTS %%\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\"Route \\t Cells \\t\")\n print(\"Length\\tChecked\\t Time\")\n print(\"--------------------------------\")\n print(\"{0}\\t{1}\\t{2}\".format(*results))\n print()",
"def print_result(self):\n print(\"Final results: \")\n for i in range(1, len(self.agents) + 1):\n agent = self.agents[i-1]\n print(agent.name + \": {} wins\".format(self.results[agent.name]))",
"def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0",
"def print_result(self, header, result, suffix=\"views\"):\n print(header)\n for i in range(len(result)):\n print(' ', i + 1, '.', result[i][0], '--', result[i][1], suffix)\n print(\"\\n\")",
"def run(self, result, debug=False):\n rv = unittest.TestSuite.run(self, result, debug)\n sys.stdout.flush()\n return rv",
"def get_result_printer(self):\n if isinstance(self.get_output, list):\n for value, key in self.get_output:\n print(value, key)\n else:\n for key, value in self.get_output.items():\n print(value, key)",
"def test_present_results_displays_results(self):\n # to test this we don't actually need to write to the database,\n # we just need a list of ordered_dicts in menu.records\n test_records = [\n OrderedDict([\n ('name', 'Test Employee 1'),\n ('date', datetime.date(2018, 5, 1)),\n ('task_name', 'Test Task 1'),\n ('duration', 1),\n ('notes', 'This is a note for the first test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 2'),\n ('date', datetime.date(2018, 5, 2)),\n ('task_name', 'Test Task 2'),\n ('duration', 2),\n ('notes', 'This is a note for the second test task')\n ]),\n ]\n self.menu.records = [test_records[0]]\n f_username = test_records[0]['name']\n f_date = test_records[0]['date'].strftime(\"%Y-%m-%d\")\n f_time_taken = str(test_records[0]['duration'])\n f_task_name = test_records[0]['task_name']\n f_notes = test_records[0]['notes']\n short_form = \"{}: {} ({}m): {} | {}\".format(\n f_username,\n f_date,\n f_time_taken,\n f_task_name,\n f_notes\n )\n expected_output = (\"\\nSearch Results\\n\" +\n \"1) {}\\n\".format(short_form) +\n \"\\n\" +\n \"Available actions:\\n\" +\n \"v) View detail\\n\" +\n \"e) Edit\\n\" +\n \"d) Delete\\n\" +\n \"m) go back to Main menu\\n\" +\n \"q) quit\\n\")\n\n '''The process for capturing `print()` statements and redirecting to\n an accumulating object for later processing has the following steps:\n 1. import io and sys\n 2. in the test function, create a StringIO object\n (this is a buffer object that will be the destination for the\n redirected stdout)\n ```\n captured_output = io.StringIO()\n ```\n 3. point stdout at the capture object\n ```\n sys.stdout = captured_output\n ```\n 4. Run code as normal, any print() statement will go to\n the StringIO object instead of standard out\n 5. Revert stdout (will not affect the contents of the StringIO buffer)\n ```\n sys.stdout = sys.__stdout__\n ```\n 6. Run the rest of the code. The contents of the StringIO buffer can\n be accessed as follows:\n ```\n captured_output.getvalue()\n ```\n '''\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n example_input = 'q'\n with patch('builtins.input', side_effect=example_input):\n self.menu.present_results()\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())",
"def print_results(results):\n data = []\n for idx in results.index:\n item = results.loc[idx]\n row = (str(item.gid), \n re.sub(r'\\n', ' ', item.creators),\n re.sub(r'[\\n\\r]+', ' ', item.title), \n gut_utf8.format(item.gid))\n data.append(row)\n _ = [print('|'.join(row)) for row in data]\n print('#', len(data), \"items returned\")\n return data",
"def print_results(self, out_file):\n extra_results = [\n # Total test methods processed, excluding reruns.\n [\"Test Methods\", len(self.result_events)],\n [\"Reruns\", self.test_method_rerun_count]]\n\n # Output each of the test result entries.\n categories = [\n # result id, printed name, print matching tests?, detail label\n [EventBuilder.STATUS_SUCCESS,\n \"Success\", False, None],\n [EventBuilder.STATUS_EXPECTED_FAILURE,\n \"Expected Failure\", False, None],\n [EventBuilder.STATUS_FAILURE,\n \"Failure\", True, \"FAIL\"],\n [EventBuilder.STATUS_ERROR,\n \"Error\", True, \"ERROR\"],\n [EventBuilder.STATUS_EXCEPTIONAL_EXIT,\n \"Exceptional Exit\", True, \"ERROR\"],\n [EventBuilder.STATUS_UNEXPECTED_SUCCESS,\n \"Unexpected Success\", True, \"UNEXPECTED SUCCESS\"],\n [EventBuilder.STATUS_SKIP, \"Skip\", False, None],\n [EventBuilder.STATUS_TIMEOUT,\n \"Timeout\", True, \"TIMEOUT\"],\n [EventBuilder.STATUS_EXPECTED_TIMEOUT,\n # Intentionally using the unusual hyphenation in TIME-OUT to\n # prevent buildbots from thinking it is an issue when scanning\n # for TIMEOUT.\n \"Expected Timeout\", True, \"EXPECTED TIME-OUT\"]\n ]\n\n # Partition all the events by test result status\n result_events_by_status = self._partition_results_by_status(\n categories)\n\n # Print the details\n have_details = self._has_printable_details(\n categories, result_events_by_status)\n if have_details:\n self._print_banner(out_file, \"Issue Details\")\n for category in categories:\n self._report_category_details(\n out_file, category, result_events_by_status)\n\n # Print the summary\n self._print_summary_counts(\n out_file, categories, result_events_by_status, extra_results)\n\n if self.options.dump_results:\n # Debug dump of the key/result info for all categories.\n self._print_banner(out_file, \"Results Dump\")\n for status, events_by_key in result_events_by_status.items():\n out_file.write(\"\\nSTATUS: {}\\n\".format(status))\n for key, event in events_by_key:\n out_file.write(\"key: {}\\n\".format(key))\n out_file.write(\"event: {}\\n\".format(event))",
"def getResults():",
"def test_get_results_verbose(self):\n\t\tpass",
"def _print_results(results, title=''):\n pstr = '[' + title + ']: ' if title else ''\n for k, v in results.items():\n pstr += '\\t{}: {}'.format(k, v)\n print(pstr)",
"def _collect_test_result(duthost, ptfhost, request):\n logger.info(\"Collecting test result and related information.\")\n # TODO : collect DUT test report\n _collect_sonic_os_and_platform_info(duthost, request)\n _collect_sai_test_report_xml(ptfhost, request)",
"def get_results(verbose=True):\n if verbose:\n for result in results:\n print(\"%s\\t%s\" % (result, results[result][\"description\"]))\n return results",
"def print_scan_result(target_list):\n print(\" IP\\t\\t\\t MAC Address\\n-----------------------------------------\")\n for element in target_list:\n print(element[\"ip\"] + \"\\t\\t\" + element[\"mac\"])",
"def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)",
"def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")",
"def print_result(result, label):\n print(\"\\n\", label)\n print(\"\\n\", result)\n # your code"
]
| [
"0.73346746",
"0.6699405",
"0.66695684",
"0.6512745",
"0.6477566",
"0.64454085",
"0.6340183",
"0.63031113",
"0.623756",
"0.6223393",
"0.6161598",
"0.6154046",
"0.6126515",
"0.60709935",
"0.6060305",
"0.60369337",
"0.6033305",
"0.60281086",
"0.593483",
"0.58824515",
"0.58492583",
"0.5838389",
"0.5801633",
"0.5786272",
"0.57379943",
"0.5732748",
"0.57295346",
"0.5725578",
"0.57143134",
"0.5703907"
]
| 0.725888 | 1 |
Print results of DMA. | def _print_result_of_dma(compute_ids, results):
compute_node_names = ['Node-{}'.format(i) for i in range(
len((compute_ids)))]
all_computes_in_line = ''
for compute in compute_node_names:
all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
line_of_nodes = '| Test ' + all_computes_in_line + '|'
logger.info('=' * 70)
logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
logger.info(
'|' + ' ' * ((9*len(compute_node_names))/2)
+ ' DMA TEST '
+ ' ' * (
9*len(compute_node_names) - (9*len(compute_node_names))/2)
+ '|')
logger.info(
'+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
logger.info(line_of_nodes)
logger.info(
'+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
testname = "DMA"
print_line = ''
for id in compute_ids:
all_result = \
'FAIL' if [
testfunc for comp_id, testname, testfunc, res in results
if comp_id == id and not res] else 'PASS'
print_line += '| ' + all_result + ' '
logger.info(
'| {}'.format(testname) + (' ' * (15 - len(testname)))
+ print_line + '|')
for testfunc in ['Server', 'InfoFetch']:
print_line = ''
for id in compute_ids:
if (id, testname, testfunc, True) in results:
print_line += ' PASS |'
elif (id, testname, testfunc, False) in results:
print_line += ' FAIL |'
else:
print_line += ' SKIP |'
logger.info(
'| {}'.format(testfunc) + (' ' * (14-len(testfunc)))
+ '|' + print_line)
logger.info(
'+' + ('-' * 16) + '+'
+ (('-' * 8) + '+') * len(compute_node_names))
logger.info('=' * 70) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_results(self):\n pass",
"def print_results(self, data: SimData) -> None:\n pass",
"def print_out():\n pass",
"def _process_dma_result(compute_node, testfunc,\n result, results_list, node):\n if result:\n logger.info(\n 'Test case for {0} with DMA PASSED on {1}.'.format(\n node, testfunc))\n else:\n logger.error(\n 'Test case for {0} with DMA FAILED on {1}.'.format(\n node, testfunc))\n results_list.append((compute_node, \"DMA\", testfunc, result))",
"def display_results():\n pass",
"def stdout(self):\n pass",
"def print_results(classes, output_file): \n for DataClass in classes:\n DataClass.datalock.acquire()\n output_file.write(\"--- \" + DataClass.__name__ + \" ---\\n\")\n for data in DataClass.data:\n output_file.write(data + repr(DataClass.data[data]) + '\\n')\n output_file.write('\\n')\n DataClass.datalock.release()\n return",
"def print_queue(self):\n for value in self.data:\n element = f'| {value} |'\n print(element)",
"def printOutput(self):\n pass",
"def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass",
"def print_results(results):\n print()\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%% RESULTS %%\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\"Route \\t Cells \\t\")\n print(\"Length\\tChecked\\t Time\")\n print(\"--------------------------------\")\n print(\"{0}\\t{1}\\t{2}\".format(*results))\n print()",
"def display():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Call the write method with sys.stdout as the file.\n write(file=sys.stdout)",
"def print_result(self, header, result, suffix=\"views\"):\n print(header)\n for i in range(len(result)):\n print(' ', i + 1, '.', result[i][0], '--', result[i][1], suffix)\n print(\"\\n\")",
"def get_result_printer(self):\n if isinstance(self.get_output, list):\n for value, key in self.get_output:\n print(value, key)\n else:\n for key, value in self.get_output.items():\n print(value, key)",
"def print_queue(self):\n for i in self.Obs:\n print(i)",
"def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()",
"def print_result(self):\n print('===' * 25, 'Questão 13'.center(75), '===' * 25, sep='\\n')\n self.process_data()\n print('---' * 25, ' Desenho finalizado com sucesso!', '---' *\n 25, 'Aluno: Francisco Camello'.rjust(75), sep=\"\\n\")\n\n turtle.done()",
"def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]",
"def print_output(data,alignments,file):\n print(\"######################################################################\")\n print(\"Task 1 : IBM model 1 and EM algorithm implementation ,with corpus @\",file)\n print(\"######################################################################\")\n\n for i in range(len(data)):\n print(\"English Sentence : \",data[i][\"en\"])\n print(\"Foreign Sentence : \",data[i][\"fr\"])\n print(\"Alignment : \",alignments[i])\n print(\"----------------------------------------------------------------------\")",
"def print(self, index):\n count=0\n start = self.head\n while start:\n if count==index:\n print(count, ' : ', start.getMember())\n break\n start=start.getLink()\n count+=1",
"def print_all_contents(self, *args, **kwargs):\n while self.has_to_print():\n # Try to print the first element in the queue.\n tar_to_print: str = self.print_queue[0].tar\n self.print_monitor.wait_turn(self, tar_to_print, *args, **kwargs)\n\n # Print all applicable values in the print_queue.\n while self.print_queue and (self.print_queue[0].tar == tar_to_print):\n msg: str = self.print_queue.popleft().msg\n print(msg, end=\"\", flush=True)\n\n # If True, then all of the output for extracting tar_to_print was in the queue.\n # Since we just finished printing all of it, we can move onto the next one.\n if self.is_output_done_enqueuing[tar_to_print]:\n # Let all of the other workers know that this worker is done.\n self.print_monitor.done_dequeuing_output_for_tar(self, tar_to_print)",
"def printResults(self):\n for tweet in self.tweets:\n print(tweet)\n print(\"---------------------\\n\")",
"def visualizar(self):\n print(self.queue)",
"def show(self):\n traverse = self.head\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n print(traverse.data)",
"def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()",
"def print(self):\n self.__print_local(self.dataset, 0)",
"def dma_main(bt_logger, conf, computes):\n global logger\n logger = bt_logger\n\n compute_ids = []\n agent_results = []\n for compute_node in computes:\n node_id = compute_node.get_id()\n compute_ids.append(node_id)\n\n agent_server_running = conf.is_dma_server_running(compute_node)\n agent_infofetch_running = (\n conf.is_dma_infofetch_running(compute_node) and\n conf.is_redis_running(compute_node))\n\n if agent_server_running:\n test_name = 'barotest'\n tmpfile = TEMP_DIR + '/' + test_name + '.conf'\n\n agent_config = conf.get_dma_config(compute_node)\n listen_ip = compute_node.get_ip()\n listen_port = agent_config.get('server').get('listen_port')\n amqp_host = agent_config.get('server').get('amqp_host')\n amqp_port = agent_config.get('server').get('amqp_port')\n amqp_user = agent_config.get('server').get('amqp_user')\n amqp_passwd = agent_config.get('server').get('amqp_password')\n rest_client = RestDMAClient(\n listen_ip, listen_port, '', '')\n pub_client = PubDMAClient(\n amqp_host, amqp_port, amqp_user,\n amqp_passwd)\n\n all_res = True\n for client in [rest_client, pub_client]:\n tests.test_dma_server_set_collectd(\n compute_node, tmpfile, logger, client)\n sleep_time = 1\n logger.info(\n 'Sleeping for {} seconds'.format(sleep_time)\n + ' before DMA server test...')\n time.sleep(sleep_time)\n res = conf.check_dma_dummy_included(\n compute_node, test_name)\n all_res = all_res and res\n\n _process_dma_result(\n compute_node.get_id(), 'Server',\n all_res, agent_results, compute_node.get_name())\n\n if agent_infofetch_running:\n test_name = 'barotest'\n resources = conf.create_testvm(compute_node, test_name)\n sleep_time = 5\n logger.info(\n 'Sleeping for {} seconds'.format(sleep_time)\n + ' before DMA infofetch test...')\n time.sleep(sleep_time)\n res = conf.test_dma_infofetch_get_data(\n compute_node, test_name)\n conf.delete_testvm(resources)\n\n _process_dma_result(\n compute_node.get_id(), 'InfoFetch',\n res, agent_results, compute_node.get_name())\n\n _print_result_of_dma(compute_ids, agent_results)\n\n for res in agent_results:\n if not res[3]:\n logger.error('Some tests have failed or have not been executed')\n logger.error('DMA test is Fail')\n return 1\n else:\n pass\n return 0",
"def printAll(self, cards, output):\r\n for (card, num) in cards.items():\r\n self.ts.addCards(card, num)\r\n self.ts.writeCards(output)",
"def Display(self, unused_args, result):\n util.PrettyPrint(result)",
"def Display(self, unused_args, result):\n util.PrettyPrint(result)"
]
| [
"0.6548923",
"0.62272567",
"0.61158216",
"0.60032797",
"0.5872914",
"0.5870539",
"0.5866138",
"0.5863228",
"0.5836675",
"0.58352697",
"0.5804191",
"0.5803823",
"0.57618093",
"0.57494736",
"0.57438606",
"0.57366985",
"0.57135874",
"0.5665557",
"0.5658675",
"0.5656593",
"0.56444263",
"0.5643268",
"0.5639476",
"0.5626029",
"0.56212395",
"0.56089413",
"0.5606695",
"0.5575191",
"0.55747294",
"0.55747294"
]
| 0.7032239 | 0 |
Check DMA of each compute node. | def dma_main(bt_logger, conf, computes):
global logger
logger = bt_logger
compute_ids = []
agent_results = []
for compute_node in computes:
node_id = compute_node.get_id()
compute_ids.append(node_id)
agent_server_running = conf.is_dma_server_running(compute_node)
agent_infofetch_running = (
conf.is_dma_infofetch_running(compute_node) and
conf.is_redis_running(compute_node))
if agent_server_running:
test_name = 'barotest'
tmpfile = TEMP_DIR + '/' + test_name + '.conf'
agent_config = conf.get_dma_config(compute_node)
listen_ip = compute_node.get_ip()
listen_port = agent_config.get('server').get('listen_port')
amqp_host = agent_config.get('server').get('amqp_host')
amqp_port = agent_config.get('server').get('amqp_port')
amqp_user = agent_config.get('server').get('amqp_user')
amqp_passwd = agent_config.get('server').get('amqp_password')
rest_client = RestDMAClient(
listen_ip, listen_port, '', '')
pub_client = PubDMAClient(
amqp_host, amqp_port, amqp_user,
amqp_passwd)
all_res = True
for client in [rest_client, pub_client]:
tests.test_dma_server_set_collectd(
compute_node, tmpfile, logger, client)
sleep_time = 1
logger.info(
'Sleeping for {} seconds'.format(sleep_time)
+ ' before DMA server test...')
time.sleep(sleep_time)
res = conf.check_dma_dummy_included(
compute_node, test_name)
all_res = all_res and res
_process_dma_result(
compute_node.get_id(), 'Server',
all_res, agent_results, compute_node.get_name())
if agent_infofetch_running:
test_name = 'barotest'
resources = conf.create_testvm(compute_node, test_name)
sleep_time = 5
logger.info(
'Sleeping for {} seconds'.format(sleep_time)
+ ' before DMA infofetch test...')
time.sleep(sleep_time)
res = conf.test_dma_infofetch_get_data(
compute_node, test_name)
conf.delete_testvm(resources)
_process_dma_result(
compute_node.get_id(), 'InfoFetch',
res, agent_results, compute_node.get_name())
_print_result_of_dma(compute_ids, agent_results)
for res in agent_results:
if not res[3]:
logger.error('Some tests have failed or have not been executed')
logger.error('DMA test is Fail')
return 1
else:
pass
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _process_dma_result(compute_node, testfunc,\n result, results_list, node):\n if result:\n logger.info(\n 'Test case for {0} with DMA PASSED on {1}.'.format(\n node, testfunc))\n else:\n logger.error(\n 'Test case for {0} with DMA FAILED on {1}.'.format(\n node, testfunc))\n results_list.append((compute_node, \"DMA\", testfunc, result))",
"def _print_result_of_dma(compute_ids, results):\n compute_node_names = ['Node-{}'.format(i) for i in range(\n len((compute_ids)))]\n all_computes_in_line = ''\n for compute in compute_node_names:\n all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))\n line_of_nodes = '| Test ' + all_computes_in_line + '|'\n logger.info('=' * 70)\n logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')\n logger.info(\n '|' + ' ' * ((9*len(compute_node_names))/2)\n + ' DMA TEST '\n + ' ' * (\n 9*len(compute_node_names) - (9*len(compute_node_names))/2)\n + '|')\n logger.info(\n '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))\n logger.info(line_of_nodes)\n logger.info(\n '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))\n\n testname = \"DMA\"\n print_line = ''\n for id in compute_ids:\n all_result = \\\n 'FAIL' if [\n testfunc for comp_id, testname, testfunc, res in results\n if comp_id == id and not res] else 'PASS'\n print_line += '| ' + all_result + ' '\n logger.info(\n '| {}'.format(testname) + (' ' * (15 - len(testname)))\n + print_line + '|')\n\n for testfunc in ['Server', 'InfoFetch']:\n print_line = ''\n for id in compute_ids:\n if (id, testname, testfunc, True) in results:\n print_line += ' PASS |'\n elif (id, testname, testfunc, False) in results:\n print_line += ' FAIL |'\n else:\n print_line += ' SKIP |'\n logger.info(\n '| {}'.format(testfunc) + (' ' * (14-len(testfunc)))\n + '|' + print_line)\n\n logger.info(\n '+' + ('-' * 16) + '+'\n + (('-' * 8) + '+') * len(compute_node_names))\n logger.info('=' * 70)",
"def _gpu_and_random(self, exprs):\n if not GPU:\n return False\n if not all(tell_deterministic(i) for i in exprs):\n return True\n\n return False",
"def check_cuda():\n if OS_VERSION[0] == \"Linux\":\n check_cuda_linux()\n elif OS_VERSION[0] == \"Windows\":\n check_cuda_windows()",
"def healthcheck(self):\n while True:\n time.sleep(NAMENODE_HEALTH_CHECK_INTERVAL)\n self.check_datanodes()",
"def test_get_node_hardware_fast(self):\n pass",
"def check(self):\n badCachePath = list()\n badCacheNode = list()\n cacheIn = getCacheInfoFromMaya()\n cacheInScene = cacheIn.getCacheFromScene()\n # get the templates\n\n if not TYPE == 'MULTI':\n cacheWorkTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_work')\n cachePublishTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_publish')\n mayaCachePublishTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_publish_cache_multi')\n mayaCacheWorkTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_work_cache_multi')\n else:\n cacheWorkTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_work')\n cachePublishTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_publish')\n mayaCachePublishTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_publish_cache')\n mayaCacheWorkTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_work_cache')\n\n for cacheFrom, cacheVal in cacheInScene.iteritems():\n\n fileNode = cacheVal\n for nodes, nodeVal in cacheVal.iteritems():\n for cacheNumber, cacheVal in nodeVal.iteritems():\n filePath = cacheVal['path']\n\n if cacheWorkTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif mayaCacheWorkTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif cachePublishTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif mayaCachePublishTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n else:\n badCachePath.append(pm.Path(filePath))\n badCacheNode.append(nodes)\n continue\n\n if not badCachePath:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = badCacheNode\n for node in badCachePath:\n self.addError(\"%s is not in the library\" % node)\n\n self.errorMessage = \"%s Cache not in library\" % (len(badCachePath))",
"def check(data_out_dfe, data_out_cpu, size):\n status = 0\n for i in range(size):\n if data_out_dfe[i] != data_out_cpu[i]:\n print str(data_out_dfe[i]) + \" != \" + str(data_out_cpu[i])\n status = status + 1\n return status",
"def check(iters):\n\treturn check_hypernet(iters[0]) and check_other(iters[1])",
"def is_compute(self, nb_iterations):\n return nb_iterations % self.nb_iterations_between_compute == 0",
"def check_component_transfer_completion(self):\n while self.check_transfer_component_map:\n for dest_node_obj, value in \\\n self.check_transfer_component_map.items():\n if value == True or value == \"Failed\":\n del self.check_transfer_component_map[dest_node_obj]\n self.logger.info(\"Processed one node transfer : %s, id : \\\n %s, status : %s\" %(dest_node_obj, \\\n dest_node_obj.get_id(), value))\n else:\n continue\n time.sleep(5)",
"def test_compute_node_get_all_uuid_marker(self):\n # create the compute nodes in the non-cell0 cells\n count = 0\n for cell in self.cell_mappings[1:]:\n for x in range(3):\n compute_node_uuid = getattr(uuids, 'node_%s' % count)\n with context.target_cell(self.ctxt, cell) as cctxt:\n node = objects.ComputeNode(\n cctxt, uuid=compute_node_uuid, host=compute_node_uuid,\n vcpus=2, memory_mb=2048, local_gb=128, vcpus_used=0,\n memory_mb_used=0, local_gb_used=0, cpu_info='{}',\n hypervisor_type='fake', hypervisor_version=10)\n node.create()\n count += 1\n\n # create a host mapping for the compute to link it to the cell\n host_mapping = objects.HostMapping(\n self.ctxt, host=compute_node_uuid, cell_mapping=cell)\n host_mapping.create()\n\n # now start paging with a limit of two per page; the first page starts\n # with no marker\n compute_nodes = self.host_api.compute_node_get_all(self.ctxt, limit=2)\n # assert that we got two compute nodes from cell1\n self.assertEqual(2, len(compute_nodes))\n for compute_node in compute_nodes:\n host_mapping = objects.HostMapping.get_by_host(\n self.ctxt, compute_node.host)\n self.assertEqual(uuids.cell1, host_mapping.cell_mapping.uuid)\n\n # now our marker is the last item in the first page\n marker = compute_nodes[-1].uuid\n compute_nodes = self.host_api.compute_node_get_all(\n self.ctxt, limit=2, marker=marker)\n # assert that we got the last compute node from cell1 and the first\n # compute node from cell2\n self.assertEqual(2, len(compute_nodes))\n host_mapping = objects.HostMapping.get_by_host(\n self.ctxt, compute_nodes[0].host)\n self.assertEqual(uuids.cell1, host_mapping.cell_mapping.uuid)\n host_mapping = objects.HostMapping.get_by_host(\n self.ctxt, compute_nodes[1].host)\n self.assertEqual(uuids.cell2, host_mapping.cell_mapping.uuid)\n\n # now our marker is the last item in the second page; make the limit=3\n # so we make sure we've exhausted the pages\n marker = compute_nodes[-1].uuid\n compute_nodes = self.host_api.compute_node_get_all(\n self.ctxt, limit=3, marker=marker)\n # assert that we got two compute nodes from cell2\n self.assertEqual(2, len(compute_nodes))\n for compute_node in compute_nodes:\n host_mapping = objects.HostMapping.get_by_host(\n self.ctxt, compute_node.host)\n self.assertEqual(uuids.cell2, host_mapping.cell_mapping.uuid)",
"def test_discovered_nodes_enabled(self):\n with Nodes()as n:\n for node in n.nodes_discovered:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node enabled')",
"def cuda_test():\n # This flag enable the inbuilt cudnn auto-tuner\n torch.backends.cudnn.benchmark = True\n\n print('\\n__Python VERSION :', sys.version)\n print('__pyTorch VERSION :', torch.__version__)\n print('__CUDA VERSION : ', torch.version.cuda)\n print('__CUDNN VERSION : ', torch.backends.cudnn.version())\n print('__Number CUDA Devices : ', torch.cuda.device_count())\n print('__Devices : ')\n\n call([\"nvidia-smi\", \"--format=csv\", \n \"--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free\"])\n\n print('Active CUDA Device: GPU', torch.cuda.current_device())\n print ('Available devices ', torch.cuda.device_count())\n print ('Current cuda device ', torch.cuda.current_device())\n\n return torch.cuda.is_available()",
"def test_01(self, test):\r\n\r\n api = self.apis[NODE_API_KEY]\r\n\r\n if CONFIG.DNS_SD_MODE != \"multicast\":\r\n return test.DISABLED(\"This test cannot be performed when DNS_SD_MODE is not 'multicast'\")\r\n\r\n ServiceBrowser(self.zc, \"_nmos-node._tcp.local.\", self.zc_listener)\r\n # Wait for n seconds for the Node to recognize it should adopt peer-to-peer operation\r\n start_time = time.time()\r\n while time.time() < start_time + CONFIG.DNS_SD_ADVERT_TIMEOUT:\r\n properties = None\r\n time.sleep(CONFIG.DNS_SD_BROWSE_TIMEOUT)\r\n node_list = self.zc_listener.get_service_list()\r\n # Iterate in reverse order to check the most recent advert first\r\n for node in reversed(node_list):\r\n port = node.port\r\n if port != api[\"port\"]:\r\n continue\r\n for address in node.addresses:\r\n address = socket.inet_ntoa(address)\r\n if address != api[\"ip\"]:\r\n continue\r\n properties = self.convert_bytes(node.properties)\r\n break\r\n if properties:\r\n break\r\n # If the Node is still advertising as for registered operation, loop around\r\n if properties and \"ver_slf\" in properties:\r\n for ver_txt in [\"ver_slf\", \"ver_src\", \"ver_flw\", \"ver_dvc\", \"ver_snd\", \"ver_rcv\"]:\r\n if ver_txt not in properties:\r\n return test.FAIL(\"No '{}' TXT record found in Node API advertisement.\".format(ver_txt))\r\n try:\r\n version = int(properties[ver_txt])\r\n if version < 0:\r\n return test.FAIL(\"Version ('{}') TXT record must be greater than or equal to zero.\"\r\n .format(ver_txt))\r\n elif version > 255:\r\n return test.WARNING(\"Version ('{}') TXT record must be less than or equal to 255.\"\r\n .format(ver_txt))\r\n except Exception:\r\n return test.FAIL(\"Version ('{}') TXT record is not an integer.\".format(ver_txt))\r\n\r\n # Other TXT records only came in for IS-04 v1.1+\r\n if self.is04_utils.compare_api_version(api[\"version\"], \"v1.1\") >= 0:\r\n if \"api_ver\" not in properties:\r\n return test.FAIL(\"No 'api_ver' TXT record found in Node API advertisement.\")\r\n elif api[\"version\"] not in properties[\"api_ver\"].split(\",\"):\r\n return test.FAIL(\"Node does not claim to support version under test.\")\r\n\r\n if \"api_proto\" not in properties:\r\n return test.FAIL(\"No 'api_proto' TXT record found in Node API advertisement.\")\r\n elif properties[\"api_proto\"] != self.protocol:\r\n return test.FAIL(\"API protocol ('api_proto') TXT record is not '{}'.\".format(self.protocol))\r\n\r\n if self.is04_utils.compare_api_version(api[\"version\"], \"v1.3\") >= 0:\r\n if \"api_auth\" not in properties:\r\n return test.FAIL(\"No 'api_auth' TXT record found in Node API advertisement.\")\r\n elif properties[\"api_auth\"] != str(self.authorization).lower():\r\n return test.FAIL(\"API authorization ('api_auth') TXT record is not '{}'.\"\r\n .format(str(self.authorization).lower()))\r\n\r\n return test.PASS()\r\n return test.FAIL(\"No matching mDNS announcement found for Node with IP/Port {}:{}. Peer to peer mode will not \"\r\n \"function correctly.\".format(api[\"ip\"], api[\"port\"]),\r\n NMOS_WIKI_URL + \"/IS-04#nodes-peer-to-peer-mode\")",
"def get_devices_per_node(self):\n\n for i in self._nodes.items():\n node = i[1]\n # Update the interface data\n\n self._get_device(node)\n\n self.updateconfig()",
"def wait_gpus(gpus: dict, collect=True, pre_release=True, sync=True):\r\n if gpus is None:\r\n logger.warn('`gpus` is None, potential bug in gpu management module')\r\n return\r\n gpus_not_ready = True\r\n blocks = dict()\r\n collected = dict()\r\n while gpus_not_ready:\r\n time.sleep(5)\r\n gpus_not_ready = False\r\n queries = GPUStatCollection.new_query()\r\n\r\n for cuda_id, gpu_id in gpus.items():\r\n if collected.get(gpu_id, False):\r\n continue\r\n q = queries[gpu_id]\r\n if q.processes is not None and len(q.processes) > 0:\r\n if sys.platform == 'win32':\r\n # on windows, GPU memory are managed by WDDM subsystems\r\n # nvidia-smi cannot get GPU memory usage of each process\r\n # so, all the detected python.exe will be considered\r\n processes = [\r\n p for p in q.processes\r\n if p['command'].lower() == 'python.exe'\r\n ]\r\n else:\r\n processes = [\r\n p for p in q.processes\r\n if p['gpu_memory_usage'] is not None\r\n ]\r\n processes = [\r\n p for p in processes\r\n if p['gpu_memory_usage'] > 0\r\n ]\r\n if len(processes) > 0:\r\n logger.info(\r\n f'GPU {gpu_id} is used, check again in 5 seconds'\r\n )\r\n gpus_not_ready = True\r\n break\r\n else:\r\n if q.processes is None:\r\n logger.warn(f'GPU {gpu_id} processes is None')\r\n logger.info(f'GPU {gpu_id} is ready')\r\n if collect:\r\n blocks[gpu_id] = collect_cuda_device(\r\n cuda_id, q.memory_total\r\n )\r\n\r\n logger.info(f'GPUs {gpus.values()} are ready!')\r\n\r\n if sync:\r\n synchronize()\r\n\r\n del blocks\r\n if pre_release:\r\n torch.cuda.empty_cache()",
"async def test_node_many(self):\n node_list = [Node() for i in range(100)]\n\n # check uids are unique\n uid_list = [n.uid() for n in node_list]\n self.assertEqual(len(uid_list), len(set(uid_list)))\n\n running_list = []\n try:\n for n in node_list:\n # create node and join upon random node in network\n await n.run_node()\n if len(running_list) > 0:\n ct_n, _ = random.choice(running_list)\n await n.join_network(ct_n.nid())\n\n msg = Network_Message()\n n.attach_broadcast_callback(msg.msg_callback)\n n.attach_direct_message_callback(msg.msg_callback)\n running_list.append((n, msg))\n\n for i in range(0, 3):\n await asyncio.sleep(constants.SHU_TIME + 5)\n\n while len(running_list) > 1:\n # broadcast and check all nodes received message\n await running_list[0][0].send_broadcast('hello world')\n for node, msg in running_list[1:]:\n msg_suc = await msg.wait_msg(15)\n self.assertTrue(msg_suc)\n self.assertEqual(msg.nid, running_list[0][0].nid())\n self.assertEqual(msg.msg, 'hello world')\n\n # kill half of the nodes\n kill_list = running_list[:len(running_list) // 2]\n for n, _ in kill_list:\n n.exit_node()\n await asyncio.gather(*[n.wait_stopped() for n, _ in kill_list])\n\n new_list = running_list[len(running_list) // 2:]\n for _, msg in new_list:\n msg.reset()\n\n running_list = new_list\n await asyncio.sleep(constants.SHU_TIME + 5)\n finally:\n for n in node_list:\n n.exit_node()\n await asyncio.gather(*[n.wait_stopped() for n in node_list])",
"def test_offline_nodes_disabled(self):\n with Nodes()as n:\n for node in n.nodes_offline:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node disabled')",
"def test_get_node_hardware(self):\n pass",
"def test_auto_transfer_correct_device(ray_start_4_cpus_2_gpus):\n import nvidia_smi\n\n nvidia_smi.nvmlInit()\n\n def get_gpu_used_mem(i):\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(i)\n info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n return info.used\n\n start_gpu_memory = get_gpu_used_mem(1)\n\n device = torch.device(\"cuda:1\")\n small_dataloader = [(torch.randn((1024 * 4, 1024 * 4)),) for _ in range(10)]\n wrapped_dataloader = ( # noqa: F841\n ray.train.torch.train_loop_utils._WrappedDataLoader(\n small_dataloader, device, True\n )\n )\n\n end_gpu_memory = get_gpu_used_mem(1)\n\n # Verify GPU memory usage increases on the right cuda device\n assert end_gpu_memory > start_gpu_memory",
"def TST_MMD_adaptive_bandwidth(Fea, N_per, N1, Fea_org, sigma, sigma0, alpha, device, dtype):\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, is_smooth=False)\r\n mmd_value = get_item(TEMP[0],is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()",
"def quick_run(node_list, seeds):\n all_nodes = node_list.copy()\n for node in all_nodes:\n node.heal()\n for seed_num in seeds:\n all_nodes[seed_num].infect()\n return sum([1 if node.infected else 0 for node in all_nodes])",
"def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False",
"def validate_kitti(model, args, eval_loader, group, iters=24):\n model.eval()\n epe_list = torch.zeros(2).cuda(device=args.gpu)\n out_list = torch.zeros(2).cuda(device=args.gpu)\n\n for val_id, batch in enumerate(tqdm(eval_loader)):\n image1, image2, flow_gt, valid_gt = batch\n\n image1 = Variable(image1, requires_grad=True)\n image1 = image1.cuda(args.gpu, non_blocking=True)\n\n image2 = Variable(image2, requires_grad=True)\n image2 = image2.cuda(args.gpu, non_blocking=True)\n\n flow_gt = Variable(flow_gt, requires_grad=True)\n flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)\n flow_gt = flow_gt[0]\n\n valid_gt = Variable(valid_gt, requires_grad=True)\n valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)\n valid_gt = valid_gt[0]\n\n padder = InputPadder(image1.shape, mode='kitti')\n image1, image2 = padder.pad(image1, image2)\n\n flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)\n flow = padder.unpad(flow_pr[0])\n\n epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()\n mag = torch.sum(flow_gt**2, dim=0).sqrt()\n\n epe = epe.view(-1)\n mag = mag.view(-1)\n val = valid_gt.view(-1) >= 0.5\n\n out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()\n\n epe_list[0] += epe[val].mean().item()\n epe_list[1] += 1\n\n out_list[0] += out[val].sum()\n out_list[1] += torch.sum(val)\n\n if args.distributed:\n dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)\n dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)\n\n if args.gpu == 0:\n epe = epe_list[0] / epe_list[1]\n f1 = 100 * out_list[0] / out_list[1]\n\n print(\"Validation KITTI: %f, %f\" % (epe, f1))\n return {'kitti-epe': float(epe.detach().cpu().numpy()), 'kitti-f1': float(f1.detach().cpu().numpy())}\n else:\n return None",
"def is_cuda(xs):\n return list(map(lambda x: x.is_cuda, xs))",
"def check_nodes(self) -> bool:\n # check the input-output consistency\n for op_name in self.__ops:\n op = cast(Operator, self.__ops[op_name])\n inputs: Dict[str, Operator] = op.input_ops\n for i in inputs.values():\n if op not in i.output_op_list:\n return False\n\n return True",
"def mem_report(print_all: bool = False) -> None:\n\n def _mem_report(tensors: Iterable, mem_type: str) -> None:\n \"\"\"Print the selected tensors of type\n\n There are two major storage types in our major concern:\n - GPU: tensors transferred to CUDA devices\n - CPU: tensors remaining on the system memory (usually unimportant)\n\n Args:\n - tensors: the tensors of specified type\n - mem_type: 'CPU' or 'GPU' in current implementation \"\"\"\n print(f\"Storage on {mem_type}\")\n print(\"-\" * LEN)\n total_numel = 0\n total_mem = 0\n visited_data: List[Any] = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n if print_all:\n print(f\"{element_type}\\t\\t{size}\\t\\t{mem}\")\n print(\"-\" * LEN)\n print(f\"Total Tensors: {total_numel} \\tUsed Memory Space: {total_mem}\")\n print(\"-\" * LEN)\n\n LEN = 65\n if print_all:\n print(\"=\" * LEN)\n print(\"Element type\\tSize\\t\\t\\tUsed MEM(MBytes)\")\n tensors = []\n for obj in gc.get_objects():\n try:\n if t.is_tensor(obj) or (hasattr(obj, \"data\") and t.is_tensor(obj.data)):\n tensors.append(obj)\n except Exception:\n pass\n cuda_tensors = [tensor for tensor in tensors if tensor.is_cuda]\n host_tensors = [tensor for tensor in tensors if not tensor.is_cuda]\n _mem_report(cuda_tensors, \"GPU\")\n _mem_report(host_tensors, \"CPU\")\n if print_all:\n print(\"=\" * LEN)",
"def check_para_tilling(in_tile, in_node, in_size, ncruns, cas_ncsize):\n # ~~ Default values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ncnode = 1\n if in_node != 0:\n ncnode = max(1, in_node)\n # By default taking the input ncsize\n if in_size != -1:\n ncsize = in_size\n else:\n ncsize = cas_ncsize\n\n # ~~ Special case of nctile ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n nctile = 1\n if in_tile != 0:\n nctile = max(1, in_tile)\n elif ncnode > 1:\n if ncsize > 1:\n nctile = ncsize // ncnode\n elif ncruns > 1:\n nctile = ncruns // ncnode\n\n # ~~ Special case of batching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if in_node == 0:\n # ~~> temporary measure before doing each run in parallel of one another\n ncnode = max(1, ncsize) // nctile\n if ncnode * nctile < max(1, ncsize):\n ncnode = ncnode + 1\n # ~~> valid for runs in parallel of one another\n #ncnode = int( max( 1,ncsize ) * ncruns / nctile )\n #if ncnode * nctile < max( 1,ncsize ) * ncruns: ncnode = ncnode + 1\n\n if ncruns == 1:\n # ~~ Standard cases ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # If the command line options.nctile and options.ncnode are fixed\n if in_tile != 0 and in_node != 0 and ncsize == 0:\n ncsize = ncnode * nctile\n # If options.ncsize is set, it will have priority over the others\n elif in_size != 0:\n # ncnode is an integer of nctile and ncsize is re-ajusted\n if in_tile != 0:\n ncnode = max(1, ncsize) // nctile\n while ncnode * nctile < max(1, ncsize):\n ncnode = ncnode + 1\n # nctile is an integer of ncnode and ncsize is re-ajusted\n if in_node != 0:\n nctile = max(1, ncsize) // ncnode\n while ncnode * nctile < max(1, ncsize):\n nctile = nctile + 1\n # local processor with 1 node and many cores\n if in_tile == 0 and in_node == 0:\n ncnode = 1\n nctile = max(1, ncsize)\n\n return nctile, ncnode, ncsize",
"def flush_compute_cache(config):\n if config.get('flush_compute',False):\n host_names = get_compute_node_host_names(config)\n logging.info('Flushing caches on %d hosts' % len(host_names))\n pjobs = [delayed(flush_linux_caches)(host_name) for host_name in host_names]\n Parallel(n_jobs=len(pjobs))(pjobs)"
]
| [
"0.6117969",
"0.5841875",
"0.53511333",
"0.5347084",
"0.53312343",
"0.52384406",
"0.5163226",
"0.50747496",
"0.5028944",
"0.5019661",
"0.49944606",
"0.49061847",
"0.48506638",
"0.4796332",
"0.4776862",
"0.47689015",
"0.4761088",
"0.47491127",
"0.47488803",
"0.47486717",
"0.47445482",
"0.47441706",
"0.47418985",
"0.47278255",
"0.4720778",
"0.47197446",
"0.4714789",
"0.47142786",
"0.47121134",
"0.47091606"
]
| 0.6362029 | 0 |
return for the current row of the books dataFrame the rate of the first review | def return_first_review_overall(x, review_books_df):
asin = x.name
return review_books_df.query('asin == @asin').sort_values(by=['unixReviewTime']).iloc[0].overall | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rating(self):\n average = self.review.all().aggregate(Avg('rating'))['rating__avg']\n if not average:\n return 0\n return average",
"def get_current_rate(self):\n pass",
"def get_rating(self):\n self.total = sum(int(review['stars']) for review in self.reviews.values())\n if self.total > 0:\n return round(self.total / self.reviews.count(), 1)\n else:\n return self.total",
"def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)",
"def evaluate(self):\n self.df['Score'] = self.df[self.review_col].apply(self.analyzer)\n\n return self.df",
"def get_average_rating(self):\n count = 0\n total = 0\n num_books = len(self.books)\n if num_books > 0:\n for rating in self.books.values():\n if rating:\n count += 1\n total += rating\n average = total / count\n if count > 0:\n return average\n else:\n print(\"Books with ratings not found for user {user}\".format(user=self.name))",
"def highest_ratings_year(data: pd.DataFrame):\n # Convert time integer into datatime type and then to year only\n data['review_year'] = pd.to_datetime(data['review_time'], unit='s').dt.year\n # Find all rows with highest rating (5)\n highest_ratings = data[['review_overall', 'review_year']].loc[data.review_overall == 5]\n # Find year with highest count of 5 star reviews\n highest_year =highest_ratings.value_counts().reset_index().review_year.values[0]\n\n print(f\"The year with highest ratings is {highest_year}\")",
"def rate(self):\n return self.brate / FAC",
"def calculateDataRate(self):\n pass",
"def get_average_rating(self):\n count = 0\n total = 0\n ratings_length = len(self.ratings)\n if ratings_length > 0:\n for rating in self.ratings:\n count += 1\n total += rating\n average = total / count\n return average\n else:\n print(\"There does not seem to be any ratings for {book}\".format(book=self.title))",
"def rate(self):\n return self.__rate",
"def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes",
"def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate",
"def rate(self):\n return self._rate",
"def rate_last(self):\n diff = (self.time - self.lasts[0][0]).total_seconds()\n try:\n return (self.pos - self.lasts[0][1]) / FAC / diff\n except ZeroDivisionError:\n return 0.0",
"def rating( self, restaurant_id, user_id, k, reg ):\n\n\t\t# extract the reviews of the user and recalculate the baseline rating \n\t\tuser_reviews = self.df[ self.df['user_id'] == user_id ]\n\t\tmean_all = self.df['stars'].mean()\n\t\tmean_user = user_reviews['user_avg'].values[0]\n\t\tmean_item = self.df['business_avg'][ self.df['business_id'] == restaurant_id ].values[0]\n\t\tbaseline = mean_user + mean_item - mean_all\n\n\t\tscores_numerator = []\n\t\tscores_denominator = []\n\t\tnearest = self.knearest_amongst_user_rated( restaurant_id, user_id, k = 7, reg = 3.0 )\n\n\t\tfor biz_id, sim, _ in nearest:\n\t\t\treviews = user_reviews[ user_reviews['business_id'] == biz_id ]\n\t\t\treviews_avg = reviews['business_avg'].values[0]\n\t\t\treviews_stars = reviews['stars'].values[0]\t\t\t\n\t\t\treviews_baseline = mean_user + reviews_avg - mean_all\n\t\t\tscores_numerator.append( sim * ( reviews_stars - reviews_baseline ) )\n\t\t\tscores_denominator.append(sim)\n\n\t\tscores = baseline + sum(scores_numerator) / sum(scores_denominator)\n\t\treturn scores",
"def data_rate(self):\n return self._data_rate",
"def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/(self.votes+self.field.weight)",
"def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']",
"def find_highest_diff(asin_2, books_df):\n val = books_df.query('asin == @asin_2').first_review_overall \n val = int(val.values[0])\n if val == 1:\n return True\n else:\n return False",
"def get_review(reviews, userId, BusinessId):\n reviews = reviews[(reviews['business_id'] == BusinessId) & (reviews['user_id'] == userId)]\n\n if reviews.empty:\n return np.nan\n elif len(reviews) > 1:\n return float(reviews['stars'].max())\n else:\n return float(reviews['stars'])",
"def get_earliest_retirement(self):\n return self.years_to_retirement",
"def getPlayerRating(df, name):\n return df.loc[df[\"name\"] == name, \"rating\"].iloc[0]",
"def get_current_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? .*? (.*?) .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating",
"def get_rating_as_of_date(\n self,\n date: Union[str, float],\n default_rating: float = DEFAULT_INITIAL_RATING\n ) -> float:\n history_df = DataFrame(self.rating_history, columns=[\"date\", \"rating\"])\n\n # only select one entry per distinct date\n history_df[\"r\"] = history_df.groupby([\"date\"]).rank(method=\"first\", ascending=False)\n history_df = history_df[history_df[\"r\"] == 1]\n\n # get the rating for the latest date\n history_df = history_df[history_df[\"date\"] <= date].sort_values(\"date\", ascending=False)\n if history_df.shape[0] == 0:\n return default_rating\n else:\n return history_df.reset_index().loc[0, \"rating\"]",
"def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df",
"def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating, counts)*1.1 if is_gs else self.calculate_k(rating, counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating",
"def get_rate(timestamps):\n return (timestamps[1, 1] - timestamps[0, 1]) / (timestamps[1, 0])",
"def compareRating(df, name1, name2):\n rating1 = df.loc[df[\"name\"] == name1, \"rating\"].iloc[0]\n rating2 = df.loc[df[\"name\"] == name2, \"rating\"].iloc[0]\n return (1 + 10 ** ((rating2 - rating1) / 400.0)) ** -1",
"def convertRate(row):\n if pd.isnull(row):\n return 1.0\n elif ':' in str(row):\n rows = row.split(':')\n return 1.0 - float(rows[1]) / float(rows[0])\n else:\n return float(row)"
]
| [
"0.6295078",
"0.617707",
"0.6110781",
"0.59789956",
"0.58231413",
"0.58185756",
"0.57670903",
"0.5752119",
"0.56879455",
"0.5686966",
"0.56659776",
"0.5629178",
"0.56277907",
"0.5627686",
"0.56208754",
"0.5620721",
"0.56025547",
"0.5594329",
"0.5588395",
"0.5568699",
"0.5558224",
"0.5545637",
"0.55191827",
"0.55038005",
"0.5467044",
"0.54659027",
"0.54412735",
"0.54408926",
"0.5430045",
"0.54160035"
]
| 0.6859296 | 0 |
return for the current row of the matching dataFrame a list of dictionary containing n first rating attributed to each book | def find_n_reviews(x, n, review_books_df):
asin_1 = x['asin_1']
asin_2 = x['asin_2']
overall_reviews_1 = review_books_df.query('asin == @asin_1').sort_values(
'unixReviewTime').iloc[0:(n+1)].overall.tolist()
overall_reviews_2 = review_books_df.query('asin == @asin_2').sort_values(
'unixReviewTime').iloc[0:(n+1)].overall.tolist()
dic_1 = {'asin': asin_1}
for i, val in enumerate(overall_reviews_1):
dic_1[str(i)+"-th-review"] = val
dic_2 = {'asin': asin_2}
for i, val in enumerate(overall_reviews_2):
dic_2[str(i)+"-th-review"] = val
return [dic_1, dic_2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ratings(book_list, user, rating):\n num = len(book_list)\n records = {'user_id': [user] * num,\n 'rating': [rating] * num,\n 'item_id': book_list,\n }\n return gl.SFrame(records)",
"def get_reviews(df, col, stars):\n log.info('Number of reviews to extract: {}'.format(stars))\n log.info(\n 'Number of available reviews: {}'.format(df[col].value_counts()))\n if [x for x in df[col].value_counts() if x < min(stars.values())]:\n raise Exception(\"To many review chosen from dataset\")\n idxs = []\n for star, n_rev in stars.iteritems():\n idxs += random.sample(df[df[col] == star].index, n_rev)\n return idxs",
"def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df",
"def df_to_id_list(df, id_book):\n df['Year'] = df['Year'].astype(int).astype(str)\n matched = pd.merge(df, id_book,\n left_on=['Name', 'Year'], right_on=['primaryTitle', 'startYear'],\n how='inner')\n ids = matched['tconst'].astype(str).tolist()\n final_ratings = []\n names = df.Name.tolist()\n years = [int(year) for year in df.Year.tolist()]\n if 'Rating' in df.columns:\n stars = [int(rating) for rating in df.Rating.tolist()]\n info = list(zip(names, years, stars))\n final_ratings = matched['Rating'].astype(int).tolist()\n else:\n info = list(zip(names, years, list(range(len(years)))))\n missed = [x for x in info if x[0] not in matched['primaryTitle'].tolist()]\n for i, j, k in missed:\n i = re.sub('[^\\s0-9a-zA-Z\\s]+', '%', i)\n try:\n # TODO: fix this cursor so it actually references something.\n cursor_dog.execute(f\"\"\"\n SELECT movie_id, original_title, primary_title\n FROM movies\n WHERE primary_title ILIKE '{i}' AND start_year = {j}\n OR original_title ILIKE '{i}' AND start_year = {j}\n ORDER BY runtime_minutes DESC\n LIMIT 1\"\"\")\n id = cursor_dog.fetchone()[0]\n ids.append(id)\n final_ratings.append(k)\n except Exception as e:\n continue\n final_ratings = [x*2 for x in final_ratings]\n ratings_dict = dict(zip(ids, final_ratings))\n return tuple([[fill_id(id) for id in ids], ratings_dict])",
"def mean_more_n_reviews(x, n, review_books_df):\n asin_1 = x['asin_1']\n asin_2 = x['asin_2']\n \n dic_1 = {}\n dic_2 = {}\n \n if (len(review_books_df.query('asin == @asin_1')) > n and len(review_books_df.query('asin == @asin_2')) > 20):\n overall_reviews_1 = review_books_df.query('asin == @asin_1').sort_values(\n 'unixReviewTime').iloc[n:].overall.tolist()\n overall_reviews_2 = review_books_df.query('asin == @asin_2').sort_values(\n 'unixReviewTime').iloc[n:].overall.tolist()\n\n dic_1 = {'asin': asin_1, 'mean': np.mean(overall_reviews_1)}\n dic_2 = {'asin': asin_2, 'mean': np.mean(overall_reviews_2)}\n \n \n return [dic_1, dic_2]",
"def assign_rank_weight(self, data_frame):\n groupby_movies = data_frame.groupby(\"movieid\")\n movie_rank_weight_dict = {}\n for movieid, info_df in groupby_movies:\n max_rank = info_df.actor_movie_rank.max()\n for rank in info_df.actor_movie_rank.unique():\n movie_rank_weight_dict[(movieid, rank)] = (max_rank - rank + 1)/max_rank*100\n return movie_rank_weight_dict",
"def positive_rate(df):\n pos_rate_percentages = {}\n movies = df.columns.values\n nrows, ncols = df.shape[0], df.shape[1]\n for i in range(ncols):\n pos_rate = [df.iloc[j, i] for j in range(nrows) if df.iloc[j, i] >= 4]\n pos_rate_percentage = len(pos_rate)*1.0/df.iloc[:, i].count()\n pos_rate_percentages[movies[i]] = pos_rate_percentage\n return pos_rate_percentages",
"def _get_rank_values(self):\n \n info_gains = {}\n \n #caluclate info gain\n for col in self.cat_cols:\n info_gains[col] = self._get_info_gain(col)\n \n return info_gains",
"def get_movie_scores(df, min_vote_count=1000):\n ratings = {}\n\n for x in CREW_ATTRIBUTES:\n ratings[x] = get_avg_scores_for_attribute(df, x, min_vote_count)\n\n df['production_score'] = df.apply(calculate_final_production_score, ratings=ratings, axis=1)\n return df",
"def rarest_words(book):\n unique_dict = unique_words(book)\n\n count_list = []\n unsorted = [(v, k) for k, v in unique_dict.items()]\n\n smallest_num = min(unsorted)\n word_array = pd.DataFrame(list(unique_dict.items()), columns=['Word', 'Occurence'])\n\n for index, row in word_array.iterrows():\n if row['Occurence'] == smallest_num[0]:\n count_list.append(row['Word'])\n\n return count_list",
"def get_ratings():\n query = \"\"\"\n SELECT DISTINCT rating\n FROM film\n \"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n\n return pd.DataFrame(result, columns=['Rating'])",
"def dist_by_year(self):\n return ratings_by_year",
"def get_recommendations(df,song_title, similarity_score, num_recommends = 5):\r\n indices = pd.Series(df.index, index = df['track_name']).drop_duplicates()\r\n idx = indices[song_title]\r\n sim_scores = list(enumerate(similarity_score[idx]))\r\n sim_scores = sorted(sim_scores, key = lambda x: x[1],reverse = True)\r\n top_scores = sim_scores[1:num_recommends+1]\r\n song_indices = [i[0] for i in top_scores]\r\n return df[\"track_name\"].iloc[song_indices]",
"def get_score_book(self) -> List[Tuple[str, float]]:\n returned = []\n\n for item, size in self.score_book.items():\n my_tuple = item, size\n returned.append(my_tuple)\n\n return returned",
"def _reindex(self, ratings):\n user_list = list(ratings['user_id'].drop_duplicates())\n user2id = {w: i for i, w in enumerate(user_list)}\n\n item_list = list(ratings['item_id'].drop_duplicates())\n item2id = {w: i for i, w in enumerate(item_list)}\n\n ratings['user_id'] = ratings['user_id'].apply(lambda x: user2id[x])\n ratings['item_id'] = ratings['item_id'].apply(lambda x: item2id[x])\n ratings['rating'] = ratings['rating'].apply(lambda x: float(x > 0))\n return ratings",
"def _reindex(self, ratings):\n\t\tuser_list = list(ratings['user_id'].drop_duplicates())\n\t\tuser2id = {w: i for i, w in enumerate(user_list)}\n\n\t\titem_list = list(ratings['item_id'].drop_duplicates())\n\t\titem2id = {w: i for i, w in enumerate(item_list)}\n\n\t\tratings['user_id'] = ratings['user_id'].apply(lambda x: user2id[x])\n\t\tratings['item_id'] = ratings['item_id'].apply(lambda x: item2id[x])\n\t\tratings['rating'] = ratings['rating'].apply(lambda x: float(x > 0))\n\t\treturn ratings",
"def tally_awards_in_column(self, column):\n tally = TallyDict()\n for rec in self:\n ids = rec.get_award_ids (column)\n for id in ids:\n vals = tally[id]\n vals.append(rec['pid'])\n tally[id] = vals\n return tally",
"def get_current_ratings(self) -> DataFrame:\n df = self.player_df.copy()\n df[\"rating\"] = df[\"player\"].apply(lambda x: x.rating)\n df[\"n_games\"] = df[\"player\"].apply(lambda x: x.count_games())\n df = df.sort_values(\"player\", ascending=False).reset_index(drop=True)\n df[\"rank\"] = range(1, df.shape[0] + 1)\n df = df[[\"rank\", \"player_id\", \"n_games\", \"rating\"]]\n return df",
"def prepare_like_problem(df):\n like_theshold = 3.0\n filtered_df = df.loc[df.rating > like_theshold, :]\n filtered_df = filtered_df.reset_index(drop=True)\n filtered_df['like'] = 1\n return filtered_df[['userId', 'movieId', 'like', 'timestamp']]",
"def recommendations_to_dd(recommendations):\n res = defaultdict(lambda: 0.0)\n for rec in recommendations:\n res[rec.product] = rec.rating\n return res",
"def __getitem__(self, index):\n row = self._target_df.iloc[index]\n\n review_vector = \\\n self._vectorizer.vectorize(row.review)\n\n rating_index = \\\n self._vectorizer.rating_vocab.lookup_token(row.rating)\n\n return {'x_data': review_vector,\n 'y_target': rating_index}",
"def get_popularity(rest_data, item_dict):\n max_review_count = rest_data.review_count.max()\n min_review_count = rest_data.review_count.min()\n result = np.zeros((len(rest_data), 2))\n for i in range(len(rest_data)):\n result[i, 0] = item_dict[rest_data.business_id[i]]\n result[i, 1] = (((rest_data.review_count[i] - min_review_count)/(max_review_count - min_review_count))*4 + 1)\n result = result[result[:, 0].argsort()]\n return result",
"def recommend(title, cosine_sim, indices, df):\n\n recommended_papers = {}\n\n # getting the index of the movie that matches the title\n try:\n idx = indices[indices == title].index[0]\n except:\n # return dummy values if indices doesn't behave (?)\n return {}, 0.0\n\n # creating a Series with the similarity scores in descending order\n score_series = pd.Series(cosine_sim[idx]).sort_values(ascending = False)\n #print(type(score_series))\n #print(len(score_series))\n\n # getting the indexes of the 5 most similar papers\n top_5_indexes = list(score_series.iloc[1:5].index)\n average_score = mean(score_series)\n #print('avg:', average_score)\n #highest_score = float(score_series.iloc[1])\n #print(score_series.iloc[1:5].index)\n #print(score_series.iloc[1:5])\n\n # populating the list with the titles of the best 5 matching papers\n for i in top_5_indexes:\n recommended_papers[list(df.index)[i]] = df['Link'][i]\n\n return recommended_papers, average_score",
"def compute_stats_on_reviews_df(df):\n stats = []\n for col in df.columns:\n if col != 'asin':\n mean = np.mean(df[col])\n if np.std(df[col]) != 0:\n b = st.t.interval(0.95, len(df[col])-1, loc=mean, scale=st.sem(df[col]))\n else:\n b = (np.nan, np.nan)\n stats.append({'mean': mean, 'interval95': (b[1] - b[0])/2})\n return stats",
"def beer_reccomendations(data: pd.DataFrame):\n # Add written review polarity and subjectivity using TextBlob sentiment analysis\n data = utils.add_review_polarity_subjectivity(data)\n\n # Get best beeres by indexing beer ID with top review polarity and review overall\n best_beers = data['beer_beerId'].loc[ (data['review_polarity'] >= 0.85) & (data['review_overall']==5) ]\n\n print(f\"These three beer reccomendations have 5 star reviews and top positive scores based on written reviews: {best_beers[0:3]}\")",
"def prep_data(ratings_df, watched_df=None, watchlist_df=None,\n good_threshold=4, bad_threshold=3):\n id_book = pd.read_csv('title_basics_small.csv')\n try:\n # try to read Letterboxd user data\n # drop rows with nulls in the columns we use\n ratings_df = ratings_df.dropna(axis=0, subset=['Rating', 'Name', 'Year'])\n # split according to user rating\n good_df = ratings_df[ratings_df['Rating'] >= good_threshold]\n bad_df = ratings_df[ratings_df['Rating'] <= bad_threshold]\n neutral_df = ratings_df[(ratings_df['Rating'] > bad_threshold) & (ratings_df['Rating'] < good_threshold)]\n # convert dataframes to lists\n good_list, good_dict = df_to_id_list(good_df, id_book)\n bad_list, bad_dict = df_to_id_list(bad_df, id_book)\n neutral_list, neutral_dict = df_to_id_list(neutral_df, id_book)\n except KeyError:\n # Try to read IMDb user data\n # strip ids of \"tt\" prefix\n ratings_df['movie_id'] = ratings_df['Const'].apply(lambda x: str(x).lstrip(\"tt\"))\n # drop rows with nulls in the columns we use\n ratings_df = ratings_df.dropna(axis=0, subset=['Your Rating', 'Year'])\n # split according to user rating\n good_df = ratings_df[ratings_df['Your Rating'] >= good_threshold*2]\n bad_df = ratings_df[ratings_df['Your Rating'] <= bad_threshold*2]\n neutral_df = ratings_df[(ratings_df['Your Rating'] > bad_threshold*2) & (ratings_df['Your Rating'] < good_threshold*2)]\n # convert dataframes to lists\n good_list = good_df['movie_id'].to_list()\n bad_list = bad_df['movie_id'].to_list()\n neutral_list = neutral_df['movie_id'].to_list()\n # make ratings dictionaries\n good_dict = dict(zip(good_list, good_df['Your Rating'].tolist()))\n bad_dict = dict(zip(bad_list, bad_df['Your Rating'].tolist()))\n neutral_dict = dict(zip(neutral_list, neutral_df['Your Rating'].tolist()))\n except Exception as e:\n # can't read the dataframe as Letterboxd or IMDb user data\n print(\"This dataframe has columns:\", ratings_df.columns)\n raise Exception(e)\n\n ratings_dict = dict(list(good_dict.items()) + list(bad_dict.items()) + list(neutral_dict.items()))\n\n if (watched_df is not None) and (not watched_df.empty):\n # Construct list of watched movies that aren't rated \"good\" or \"bad\"\n # First, get a set of identified IDs.\n rated_names = set(good_df.Name.tolist() + bad_df.Name.tolist() + neutral_list)\n # drop nulls from watched dataframe\n full_history = watched_df.dropna(axis=0, subset=['Name', 'Year'])\n # get list of watched movies that haven't been rated\n hist_list = df_to_id_list(full_history[~full_history['Name'].isin(rated_names)], id_book)[0]\n # add back list of \"neutral\" movies (whose IDs we already found before)\n hist_list = hist_list + neutral_list\n else: hist_list = neutral_list\n\n if (watchlist_df is not None) and (not watchlist_df.empty):\n try:\n watchlist_df = watchlist_df.dropna(axis=0, subset=['Name', 'Year'])\n val_list = df_to_id_list(watchlist_df, id_book)[0]\n except KeyError:\n watchlist_df = watchlist_df.dropna(axis=0, subset=['Const', 'Year'])\n watchlist_df['movie_id'] = watchlist_df['Const'].str.lstrip(\"tt\")\n val_list = watchlist_df['movie_id'].tolist()\n else: val_list = []\n\n return (good_list, bad_list, hist_list, val_list, ratings_dict)",
"def fetch_per_category(n, path=os.path.join('data', 'yelp_academic_dataset_review.json.zip')):\n\n subsample = []\n counts = {}\n\n # Read zipped JSON\n with zipfile.ZipFile(path, 'r') as z:\n for filename in z.namelist():\n with z.open(filename) as f:\n\n # Iterate over the reviews\n for line in f:\n review = json.loads(line.decode('utf-8'))\n\n # Collect records and update the count\n if review['stars'] not in counts:\n subsample.append(review)\n counts[review['stars']] = 1\n elif counts[review['stars']] < n:\n subsample.append(json.loads(line.decode('utf-8')))\n counts[review['stars']] += 1\n\n # Break when n records are gathered for all star ratings\n if all(c == n for c in counts.values()) == n:\n break\n\n return subsample",
"def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings",
"def bothTopRatedAndGrossing (movies):\n result = {}\n for (k, v) in movies.items():\n # note, only movies that have all fields are in both\n # (7 for cast + 2 for rating + 2 for gross)\n if len(v) == 11:\n result[k] = v\n return result",
"def rate_bucket(dataset, rate_low, rate_high):\r\n rated_movies=[]\r\n a=movies_header.index('vote_average')\r\n b=movies_header.index('title_movies')\r\n for i in dataset:\r\n if (rate_low <= float(i[a]) <=rate_high):\r\n rated_movies.append(i[b])\r\n \r\n #explore_data(rated_movies,0, 5)\r\n return rated_movies"
]
| [
"0.6305965",
"0.6175478",
"0.5826944",
"0.5774517",
"0.5687366",
"0.5541024",
"0.54844004",
"0.54758245",
"0.5422476",
"0.537953",
"0.533375",
"0.53205276",
"0.5310303",
"0.52701557",
"0.52414566",
"0.52219236",
"0.5207681",
"0.5187615",
"0.5184505",
"0.51826364",
"0.5180113",
"0.5175016",
"0.5162828",
"0.509812",
"0.5092266",
"0.5078018",
"0.50692725",
"0.50589687",
"0.50527346",
"0.5036186"
]
| 0.63651955 | 0 |
return for the current row of the matching dataFrame a list of dictionary containing the mean of all the review from the nth | def mean_more_n_reviews(x, n, review_books_df):
asin_1 = x['asin_1']
asin_2 = x['asin_2']
dic_1 = {}
dic_2 = {}
if (len(review_books_df.query('asin == @asin_1')) > n and len(review_books_df.query('asin == @asin_2')) > 20):
overall_reviews_1 = review_books_df.query('asin == @asin_1').sort_values(
'unixReviewTime').iloc[n:].overall.tolist()
overall_reviews_2 = review_books_df.query('asin == @asin_2').sort_values(
'unixReviewTime').iloc[n:].overall.tolist()
dic_1 = {'asin': asin_1, 'mean': np.mean(overall_reviews_1)}
dic_2 = {'asin': asin_2, 'mean': np.mean(overall_reviews_2)}
return [dic_1, dic_2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mean_of_all_genres(df, merged):\n all_genres = get_all_genres_from_df(df)\n mean_genres = {}\n for genres in all_genres:\n mean_genres[genres] = df['rating'][df[genres] == 1].mean()\n\n\n change_nan(mean_genres) # change Nan value\n\n\n for genres in all_genres:\n merged.loc[merged.genre == genres, 'rating'] = merged.loc[merged.genre == genres, 'rating'].map(lambda x : x - mean_genres[genres])\n\n return mean_genres",
"def get_mean_for_user(df,genres, userID):\n #PROFIL UŻYTWKONIKA#\n\n\n mean_for_user = {}\n for genre in genres:\n mean_for_user[genre] = df[(df['userID'] == userID ) & (df['genre'] == genre)]['rating'].mean()\n change_nan(mean_for_user)\n return mean_for_user",
"def avg_rows(rows):\n if len(rows) == 1:\n return rows[0]\n\n agg_vals = {k: [rows[j][k] for j in range(len(rows))] for k in rows[0].keys()}\n return {k: (reduce(np.add, v) / len(v)) for k, v in agg_vals.items()}",
"def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None",
"def get_avg_sentiment_scores_on_link_id_df(input_file_name):\n\n subreddit_name = input_file_name.split('_')[0]\n\n list_of_columns_to_be_graphed = ['vader_compound_score', 'vader_negative_score', 'vader_neutral_score',\n 'vader_positive_score', 'whole_comment_sentiment_flair']\n\n avg_scores = {'avg_vader_compound_score': 0, 'avg_vader_negative_score': 0, 'avg_vader_neutral_score': 0,\n 'avg_vader_positive_score': 0, 'avg_whole_comment_sentiment_flair': 0}\n\n # gets the dataframe\n df = get_df_from_csv(input_file_name)\n\n # creates date object column for matplotlib\n df['date'] = df['created_utc'].apply(lambda x: mdate.epoch2num(x))\n\n # sorts df according to created_utc\n df = df.sort_values(by=['date'])\n\n # get total number of comments\n num_comments = len(df)\n\n # print(df['link_id'].nunique())\n\n df_link_id_group = df.groupby('link_id').agg({'vader_compound_score': ['mean'], 'vader_negative_score': ['mean'],\n 'vader_neutral_score': ['mean'], 'vader_positive_score': ['mean'],\n 'whole_comment_sentiment_flair': ['mean'],\n 'date': ['min']}).copy()\n\n df_link_id_group.columns = ['avg_vader_compound_score', 'avg_vader_negative_score',\n 'avg_vader_neutral_score', 'avg_vader_positive_score',\n 'avg_whole_comment_sentiment_flair', 'date']\n\n return df_link_id_group",
"def compute_stats_on_reviews_df(df):\n stats = []\n for col in df.columns:\n if col != 'asin':\n mean = np.mean(df[col])\n if np.std(df[col]) != 0:\n b = st.t.interval(0.95, len(df[col])-1, loc=mean, scale=st.sem(df[col]))\n else:\n b = (np.nan, np.nan)\n stats.append({'mean': mean, 'interval95': (b[1] - b[0])/2})\n return stats",
"def get_average_of_sentiment_scores(input_file_name):\n\n subreddit_name = input_file_name.split('_')[0]\n\n list_of_columns_to_be_graphed = ['vader_compound_score', 'vader_negative_score', 'vader_neutral_score',\n 'vader_positive_score', 'whole_comment_sentiment_flair']\n\n avg_scores = {'avg_vader_compound_score': 0, 'avg_vader_negative_score': 0, 'avg_vader_neutral_score': 0,\n 'avg_vader_positive_score': 0, 'avg_whole_comment_sentiment_flair': 0}\n\n # gets the dataframe\n df = get_df_from_csv(input_file_name)\n\n # creates date object column for matplotlib\n df['date'] = df['created_utc'].apply(lambda x: mdate.epoch2num(x))\n\n # sorts df according to created_utc\n df = df.sort_values(by=['date'])\n\n # get total number of comments\n num_comments = len(df)\n\n # avg_vader_compound_score = df['vader_compound_score'].mean()\n # avg_vader_negative_score = df['vader_negative_score'].mean()\n # avg_vader_neutral_score = df['vader_neutral_score'].mean()\n # avg_vader_positive_score = df['vader_positive_score'].mean()\n # avg_whole_comment_sentiment_flair = df['whole_comment_sentiment_flair'].mean()\n\n for col in list_of_columns_to_be_graphed:\n # print('Average ' + col + ':', df[col].mean())\n avg_scores['avg_' + col] = df[col].mean()\n\n return avg_scores",
"def average(row):\n return round((row['Math']+row['English']+row['Science'])/3, 2)",
"def summarize_review(self, data: pd.DataFrame) -> pd.DataFrame:\n\n data[\"review_polarity\"] = data.groupby(\n [\"reviewnumber\", \"aspect\"], as_index=False\n )[\"polarity_strength\"].transform(lambda x: \",\".join(x))\n data = data.drop(\n columns=[\n \"sent_idx\",\n \"word_idx\",\n \"word_found\",\n \"polarity_strength\",\n \"sentiment_words\",\n \"intensifier_words\",\n ]\n ).drop_duplicates()\n\n def calculate_review_polarity(review_polarity):\n review_polarity = review_polarity.replace(\"[\", \"\")\n review_polarity = review_polarity.replace(\"]\", \"\")\n review_polarity = review_polarity.split(\",\")\n try:\n review_polarity = np.mean(\n [float(polarity) for polarity in review_polarity]\n )\n except ValueError:\n review_polarity = np.nan\n return review_polarity\n\n data[\"review_polarity\"] = data[\"review_polarity\"].apply(\n lambda x: calculate_review_polarity(x)\n )\n data = data.dropna()\n return data",
"def mean_rate(df):\n mean_of_rate = dict(df.mean())\n return mean_of_rate",
"def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()",
"def average_reviews_similarity(user1, user2):\n similarities = []\n for u1_reviews in user1.values():\n for u2_reviews in user2.values():\n for u1_review in u1_reviews:\n for u2_review in u2_reviews:\n review_similarity = similarity(u1_review['text'], u2_review['text'])\n similarities.append(review_similarity)\n average = sum(similarities, 0.0) / len(similarities)\n return average",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def get_movie_scores(df, min_vote_count=1000):\n ratings = {}\n\n for x in CREW_ATTRIBUTES:\n ratings[x] = get_avg_scores_for_attribute(df, x, min_vote_count)\n\n df['production_score'] = df.apply(calculate_final_production_score, ratings=ratings, axis=1)\n return df",
"def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)",
"def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat",
"def average_precision(dic=None, gtp=1):\n\n # store average precision\n store_ap = {}\n print(f\"\\nThe number of ground true positives is {gtp} when computing the average precision\")\n\n for key, value in dic.items():\n\n # get the id's of the ranked images/captions\n list_ranking = [item.split(\".\")[0] for item in value[0]]\n\n # check if the correct_id'(s) is (are) in the \"list_ranking\"\n if key.split(\".\")[0] in list_ranking:\n ap = []\n correct = 0\n # check the place of the caption_id/image_id in the ranking\n # see article page 5 for exact reasoning:\n # https://towardsdatascience.com/breaking-down-mean-average-precision-map-ae462f623a52\n for i, k in enumerate(list_ranking):\n if k == key.split(\".\")[0]:\n correct += 1\n ap.append(correct / (i + 1))\n else:\n ap.append(0)\n\n store_ap[key] = (1 / gtp) * sum(ap)\n else:\n store_ap[key] = 0\n\n return pd.DataFrame.from_dict(store_ap, orient='index', columns=['average_precision'])",
"def average_ratings(self):\n return get_average_rate(\n model=Rating,\n article=self.pk\n )",
"def mean(df):\r\n\r\n\tdf_mean_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_mean_dict[col] = df[col].mean()\r\n\r\n\tdf_mean = pd.DataFrame(df_mean_dict, index=['Mean'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_mean",
"def get_average_MAE(true_pred_df): \n age_group = true_pred_df.groupby('y_true')\n \n mae_average = []\n for age, age_data in age_group:\n mae_average.append(np.mean(age_data.mae))\n \n return mae_average",
"def compute_average_user_ratings(user_ratings):\n ave_ratings = {}\n \n for user,value in user_ratings.items():\n sum = 0\n movie_num=0\n for movieId, rating in value.items():\n sum += float(rating)\n movie_num += 1\n average = sum / movie_num\n ave_ratings[user]=average\n return ave_ratings",
"def mean_of_group(gb):\n if type(gb.get_group(1)) is pd.DataFrame:\n d = {}\n for name, df in gb:\n mean = np.nanmean(df.values)\n d.update({name: mean})\n s = pd.Series(d)\n return s\n \n else:\n items= gb.get_group(1).items\n d = {key: {} for key in items}\n for name, p in gb:\n for i in items:\n mean = np.nanmean(p[i].values)\n d[i].update({name: mean})\n df = pd.DataFrame(d)\n return df",
"def mean_avg_precision(top_k_results, relevance):\n map_score = 0.0\n for j, scores in relevance.items():\n precision, _ = calculate_precision_recall(top_k_results[j - 1], scores)\n relevant = set()\n for x in scores:\n relevant.add(x[0])\n \n precision_score, cnt = 0.0, 0\n for i in range(len(top_k_results[j - 1])):\n if top_k_results[j - 1][i] in relevant:\n precision_score += precision[i]\n cnt += 1\n \n map_score += precision_score if cnt == 0 else precision_score / cnt\n \n map_score /= len(relevance)\n \n return map_score",
"def evaluate(self):\n self.df['Score'] = self.df[self.review_col].apply(self.analyzer)\n\n return self.df",
"def average(self):\n if self._average is None:\n self._average = sum([df.df for df in self])/len(self)\n return self._average",
"def get_mean(self):\n average = self.df[self.col_name].mean()\n return average",
"def get_product_means(df):\n try:\n mean_dataframe = df.groupby(['asin'])['overall'].mean()\n print mean_dataframe[:10]\n write_df_tocsv(mean_dataframe, 'product_means.csv')\n return mean_dataframe\n except Exception as e:\n print \"Error getting product means\"\n print str(e)\n pass",
"def get_average_rating(self):\n queryset = ArticleRating.objects.filter(article_id=self.get_object())\n return queryset.aggregate(Avg('rate')).get(\"rate__avg\")",
"def average_click_through_rate_by_group(df):\n Searches_PerDay_By_Group = searched_per_day_by_group(df)\n CT_PerDay_By_Group = df.groupby([pd.TimeGrouper('D'),\n 'group']).apply(lambda row: \n len(row[row['action'] == 'visitPage']))\n\n \n Daily_Averages_By_Group = CT_PerDay_By_Group / Searches_PerDay_By_Group\n\n # groupby the second entry in the multi-tuple index\n return Daily_Averages_By_Group.groupby(level=[1]).mean()",
"def _average_list(self, row_list):\n\n if not row_list:\n return row_list\n else:\n N = len(row_list)\n d = len(row_list[0])\n avg = [0 for _ in range(d)]\n for i in range(d):\n avg[i] = (sum([row_list[j][i] for j in range(1, N)],\n start=row_list[0][i]) * 1.0) / N\n return avg"
]
| [
"0.6661418",
"0.6526508",
"0.63646364",
"0.6344809",
"0.62887746",
"0.62240213",
"0.6211887",
"0.620929",
"0.61768067",
"0.6172782",
"0.60722435",
"0.58878464",
"0.58816546",
"0.58502626",
"0.58459353",
"0.57977176",
"0.57909554",
"0.5758562",
"0.57548964",
"0.5753598",
"0.5749582",
"0.57287276",
"0.5716838",
"0.5696725",
"0.56934774",
"0.567842",
"0.5655183",
"0.56463414",
"0.5632956",
"0.56321055"
]
| 0.6990966 | 0 |
return the letter H or L from a rating value in this particular case return H when val is in between 4 and 5 and L otherwise | def return_category_from_value_HL(val):
val = int(val.values[0])
if val >= 4:
return 'H'
else:
return 'L' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def return_category_from_value_HML(val):\n val = int(val.values[0])\n if val == 5:\n return 'H'\n elif val == 4:\n return 'M'\n else:\n return 'L'",
"def get_rating(mpg):\n if mpg < 14:\n return 1\n elif mpg < 15:\n return 2\n elif mpg < 17:\n return 3\n elif mpg < 20:\n return 4\n elif mpg < 24:\n return 5\n elif mpg < 27:\n return 6\n elif mpg < 31:\n return 7\n elif mpg < 37:\n return 8\n elif mpg < 45:\n return 9\n return 10",
"def k(rating):\n if rating < 100: return 40\n if rating < 200: return 20\n return 10",
"def get_letter(value, IC_type):\n\tif value > 10 or value < 0:\n\t\t# Invalid value (must be between 0 to 10)\n\t\treturn None\n\telif IC_type == 'S' or IC_type == 'T':\n\t\tindex_of_value = code_ST.index(value)\n\t\treturn code_ST[index_of_value + 1] # Letter is always after number.\n\telif IC_type == 'F' or IC_type == 'G':\n\t\tindex_of_value = code_FG.index(value)\n\t\treturn code_FG[index_of_value + 1]\n\telse:\n\t\t# IC_type is invalid\n\t\treturn None",
"def score_to_rating_string(score):\n if score < 1:\n return 'Terrible'\n elif score < 2:\n return 'Bad'\n elif score < 3:\n return 'OK'\n elif score < 4:\n return 'Good'\n elif score < 5:\n return 'Excellent'\n else:\n return 'Super-excellent'",
"def quality_rating(PR):\n \n if PR <= 85:\n quality = \"poor\"\n elif PR < 90:\n quality = \"mediocre\"\n elif PR < 95:\n quality = \"good\"\n else:\n quality = \"great\"\n\n return quality",
"def as_four_letter_type(self):\n four_letter_type = ''\n\n if self.i_score < self.e_score:\n four_letter_type += 'E'\n elif self.i_score == self.e_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'I'\n\n if self.s_score < self.n_score:\n four_letter_type += 'N'\n elif self.s_score == self.n_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'S'\n\n if self.t_score < self.f_score:\n four_letter_type += 'F'\n elif self.t_score == self.f_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'T'\n\n if self.p_score < self.j_score:\n four_letter_type += 'J'\n elif self.p_score == self.j_score:\n four_letter_type += 'X'\n else:\n four_letter_type += 'P'\n\n return four_letter_type",
"def desimal_til_bokstav(snitt):\r\n if snitt >= 4.5:\r\n return 'A'\r\n elif 3.5 <= snitt < 4.5:\r\n return 'B'\r\n elif 2.5 <= snitt < 3.5:\r\n return 'C'\r\n elif 1.5 <= snitt < 2.5:\r\n return 'D'\r\n elif 0.5 <= snitt < 1.5:\r\n return 'E'\r\n else:\r\n return 'F'",
"def res_1Letter(RES):\n RES_LetterCode = ['HOH', 'ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n R_LetterCode = ['O', 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\t\n return R_LetterCode[RES_LetterCode.index(RES)]",
"def get_title(rating):\n title = \"\"\n if rating < 1200:\n title = [\"Newbie\", \"grey-text\"]\n elif rating < 1400:\n title = [\"Pupil\", \"light-green-text\"]\n elif rating < 1600:\n title = [\"Specialist\", \"cyan-text\"]\n elif rating < 1900:\n title = [\"Expert\", \"indigo-text\"]\n elif rating < 2100:\n title = [\"Candidate Master\", \"purple-text\"]\n elif rating < 2300:\n title = [\"Master\", \"amber-text\"]\n elif rating < 2400:\n title = [\"International Master\", \"orange-text\"]\n elif rating < 2600:\n title = [\"Grandmaster\", \"red-text\"]\n elif rating < 3000:\n title = [\"International Grandmaster\", \"red-text\"]\n else:\n title = [\"Legendary Grandmaster\", \"red-text\"]\n return title",
"def icd9_descriptions(row):\n\n if 1 <= row['icd9_code'] <= 139:\n val = 'Parasitic_Disease'\n elif 140 <= row['icd9_code'] <= 239:\n val = 'Neoplasm'\n elif 240 <= row['icd9_code'] <= 279:\n val = 'Endocrine'\n elif 280 <= row['icd9_code'] <= 289:\n val = \"Blood\"\n elif 290 <= row['icd9_code'] <= 319:\n val = \"Mental_Disorder\"\n elif 320 <= row['icd9_code'] <= 389:\n val = \"Nervous_System\"\n elif 390 <= row['icd9_code'] <= 459:\n val = \"Circulatory_System\"\n elif 460 <= row['icd9_code'] <= 519:\n val = \"Respiratory_System\"\n elif 520 <= row['icd9_code'] <= 579:\n val = \"Digestive_System\"\n elif 580 <= row['icd9_code'] <= 629:\n val = \"Genitourinary_System\"\n elif 630 <= row['icd9_code'] <= 679:\n val = \"Pregnancy\"\n elif 680 <= row['icd9_code'] <= 709:\n val = \"Skin\"\n elif 710 <= row['icd9_code'] <= 739:\n val = \"Musculoskeletal\"\n elif 740 <= row['icd9_code'] <= 759:\n val = \"Congenital_Anomalies\"\n elif 760 <= row['icd9_code'] <= 779:\n val = \"Perinatal\"\n elif 780 <= row['icd9_code'] <= 799:\n val = \"Ill-Defined\"\n elif 800 <= row['icd9_code'] <= 999:\n val = \"Injury/Poison\"\n elif row['icd9_code'] < .4:\n val = \"Supplemental_factors\"\n elif .4 <= row['icd9_code'] < .7:\n val = \"External_Cause_Inj_Poison\"\n elif .7 <= row['icd9_code'] < .9:\n val = \"Morphology_of_Neoplasms\"\n else:\n val = row['icd9_code']\n\n return val",
"def get_title(rating):\n\ttitle = \"\"\n\tif rating < 1200:\n\t\ttitle = [\"Newbie\", \"grey-text\"]\n\telif rating < 1400:\n\t\ttitle = [\"Pupil\", \"light-green-text\"]\n\telif rating < 1600:\n\t\ttitle = [\"Specialist\", \"cyan-text\"]\n\telif rating < 1900:\n\t\ttitle = [\"Expert\", \"indigo-text\"]\n\telif rating < 2100:\n\t\ttitle = [\"Candidate Master\", \"purple-text\"]\n\telif rating < 2300:\n\t\ttitle = [\"Master\", \"amber-text\"]\n\telif rating < 2400:\n\t\ttitle = [\"International Master\", \"orange-text\"]\n\telif rating < 2600:\n\t\ttitle = [\"Grandmaster\", \"red-text\"]\n\telif rating < 3000:\n\t\ttitle = [\"International Grandmaster\", \"red-text\"]\n\telse:\n\t\ttitle = [\"Legendary Grandmaster\", \"red-text\"]\n\treturn title",
"def level(score):\n user_level = \"\"\n if score < 20:\n user_level = \"elementary\"\n elif score < 30:\n user_level = \"intermediate\"\n elif score < 35:\n user_level = \"upper intermediate\"\n else:\n user_level = \"advanced\"\n return user_level",
"def letProb( c ):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0",
"def letProb( c ):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0",
"def quality(value: str) -> str:\n if \"HDTV\" in value:\n return \"HDTV\"\n else:\n return \"SD\"",
"def calc_rating(opacity_range, max_s_max_n, continuum_sd):\n rating_codes = 'ABCDEF'\n rating = 0\n\n if opacity_range > 1.5:\n rating += 1\n if max_s_max_n < 3:\n rating += 1\n if continuum_sd*3 > 1:\n rating += 1\n\n return rating_codes[rating]",
"def choose_guard(self):\n\n\t\tg = randint(1, 3)\n\n\t\tif g == 1:\n\t\t\treturn 'h'\n\t\tif g == 2:\n\t\t\treturn 't'\n\t\tif g == 3:\n\t\t\treturn 'l'",
"def letProb(c):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0",
"def letter_prob(c):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0",
"def get_strength_text(currentstrength):\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n strength = STRENGTH_TEXT[i]\n if currentstrength > 99:\n strength = STRENGTH_TEXT[0]\n\n return strength",
"def calc_grade(self, average):\n if 95 <= average:\n return 'S'\n elif 90 <= average:\n return 'A'\n elif 80 <= average:\n return 'B'\n elif 70 <= average:\n return 'C'\n elif 60 <= average:\n return 'D'\n else:\n return 'F'",
"def rssi_strength_rating(value, unit):\n\n if unit != \"dBm\":\n raise ValueError(\"Unsupported unit '{:}'\".format(unit))\n\n rating = 0\n if value > -65:\n rating = 4\n elif -65 >= value > -75:\n rating = 3\n elif -75 >= value > -85:\n rating = 2\n elif value <= -85:\n rating = 1\n\n return rating",
"def get_value(self):\n if self.name in ['1','2','3','4','5','6','7','8', '9', '10']:\n return int(self.name)\n if self.name in ['J','Q','K']:\n return 10\n if self.name == 'A':\n return 1",
"def define_card_value(char):\n if char == '2':\n return Value.TWO\n elif char == '3':\n return Value.THREE\n elif char == '4':\n return Value.FOUR\n elif char == '5':\n return Value.FIVE\n elif char == '6':\n return Value.SIX\n elif char == '7':\n return Value.SEVEN\n elif char == '8':\n return Value.EIGHT\n elif char == '9':\n return Value.NINE\n elif char == 'T':\n return Value.TEN\n elif char == 'J':\n return Value.JACK\n elif char == 'Q':\n return Value.QUEEN\n elif char == 'K':\n return Value.KING\n elif char == 'A':\n return Value.ACE\n else:\n return Value.UNDEFINED",
"def rsrp_strength_rating(value, unit):\n\n if unit != \"dBm\":\n raise ValueError(\"Unsupported unit '{:}'\".format(unit))\n\n rating = 0\n if value > -80:\n rating = 4\n elif -80 >= value > -90:\n rating = 3\n elif -90 >= value > -101:\n rating = 2\n elif value <= -101:\n rating = 1\n\n return rating",
"def get_mpg_class(mpg):\n\n if(mpg >= 45):\n return 10\n elif(mpg >= 37 and mpg < 45):\n return 9\n elif(mpg >= 31 and mpg < 37):\n return 8\n elif(mpg >= 27 and mpg < 31):\n return 7\n elif(mpg >= 24 and mpg < 27):\n return 6\n elif(mpg >= 20 and mpg < 24):\n return 5\n elif(mpg >= 17 and mpg < 20):\n return 4\n elif(mpg >= 15 and mpg < 17):\n return 3\n elif(mpg >= 14 and mpg < 15):\n return 2\n else:\n return 1",
"def dR_tagger(dR):\n\tif dR == 0.04:\n return 5\n elif dR == 0.06:\n return 9\n elif dR == 0.08:\n return 13\n elif dR == 0.1:\n return 17\n elif dR == 0.16:\n return 21\n else:\n print \"invalid dR-input\"\n return False",
"def _sig_stars(val):\n star = \"\"\n if 0 <= val < 0.001:\n star = \"***\"\n elif 0.001 <= val < 0.01:\n star = \"**\"\n elif 0.01 <= val < 0.05:\n star = \"*\"\n elif 0.05 <= val < 0.1:\n star = \".\"\n return star",
"def loc2val(x):\n if x=='Not Sched':\n return 0\n elif x=='Tel':\n return 1\n elif x=='Loc A':\n return 2\n elif x=='Loc B':\n return 3\n elif x=='Loc C':\n return 4\n elif x=='Vaca':\n return 5\n else:\n return 6"
]
| [
"0.640147",
"0.5997784",
"0.5991234",
"0.59513927",
"0.58464277",
"0.57512605",
"0.57475764",
"0.56715345",
"0.5653036",
"0.56257325",
"0.5574845",
"0.55649275",
"0.5541545",
"0.55374426",
"0.55374426",
"0.5517098",
"0.5503665",
"0.5493932",
"0.5469637",
"0.53682107",
"0.5358705",
"0.53540254",
"0.53490746",
"0.5341356",
"0.5333166",
"0.5308122",
"0.5293414",
"0.5287496",
"0.527125",
"0.52582675"
]
| 0.67449373 | 0 |
return a list of dictionarries containing the mean and the interval of confidence at 95% of each column of the specified df except asin col | def compute_stats_on_reviews_df(df):
stats = []
for col in df.columns:
if col != 'asin':
mean = np.mean(df[col])
if np.std(df[col]) != 0:
b = st.t.interval(0.95, len(df[col])-1, loc=mean, scale=st.sem(df[col]))
else:
b = (np.nan, np.nan)
stats.append({'mean': mean, 'interval95': (b[1] - b[0])/2})
return stats | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_confidence_intervals(df, column, ci_level=0.99):\n\n # group all the data at each date\n d = {}\n for name, group in df.groupby(['date']):\n d[name] = group\n\n # for each timepoint, calculate the CI\n for df in d.values():\n df['ci'] = calculate_ci(df[column], ci_level=ci_level)\n\n # merge results\n df = list(d.values())[0]\n for df_ in list(d.values())[1:]:\n df = df.append(df_)\n\n return df",
"def mean_rate(df):\n mean_of_rate = dict(df.mean())\n return mean_of_rate",
"def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)",
"def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)",
"def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range",
"def mean(df):\r\n\r\n\tdf_mean_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_mean_dict[col] = df[col].mean()\r\n\r\n\tdf_mean = pd.DataFrame(df_mean_dict, index=['Mean'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_mean",
"def cre_confidence1(df):\r\n func = lambda x: 1 - np.abs(x.mean())\r\n return df.groupby('creline')['ffb_c'].transform(func)",
"def get_mean_difference(self, data):\n # Create a temporary blank list.\n temp = []\n\n # Get the number of columns in the DataFrame.\n col = data.shape[1]\n\n # Iterate the number of columns and only select the column having\n # the data for means. Since there is only two groups, the subtraction\n # will be hardcoded. There are two possible scenarios where the first\n # mean is larger than the second mean or vise versa. When the difference\n # is acquired, add it to the temporary list.\n for x in range(col):\n if x % 2 == 0:\n if data.loc[0][x] >= data.loc[1][x]:\n diff = data.loc[0][x] - data.loc[1][x]\n temp.append(diff)\n elif data.loc[0][x] < data.loc[1][x]: \n diff = data.loc[1][x] - data.loc[0][x]\n temp.append(diff)\n\n # Convert the list to a Series.\n means = pd.Series(temp)\n\n return means",
"def mean_partial_dependence(model, X, y, col, values, method='predict'):\n results = []\n for value in values:\n X_copy = X.copy()\n X_copy[col] = value\n f = getattr(model, method) \n if method == 'predict_proba':\n y_mean = np.mean(f(X_copy), axis=0)\n else:\n y_mean = np.mean(f(X_copy))\n results.append(y_mean)\n return np.array(results)",
"def get_mean_dict(self):\n mean = {}\n for c in self.cellLines:\n mean[c] = {}\n for l in self.ligands:\n mean[c][l] = self.aucs[c][l]['mean']\n return mean",
"def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices",
"def compute_lof_score(stats, columns, col_name):\n\n if stats[col_name]['data_type'] != DATA_TYPES.NUMERIC:\n return {}\n\n np_col_data = np.array(columns[col_name]).reshape(-1, 1)\n lof = LocalOutlierFactor(contamination='auto')\n outlier_scores = lof.fit_predict(np_col_data)\n outlier_indexes = [i for i in range(len(columns[col_name])) if outlier_scores[i] < -0.8]\n\n return {\n 'lof_outliers': outlier_indexes\n ,'lof_based_outlier_score': round(10 * (1 - len(outlier_indexes)/len(columns[col_name])))\n ,'percentage_of_log_based_outliers': (len(outlier_indexes)/len(columns[col_name])) * 100\n ,'lof_based_outlier_score_description':\"\"\"\n The higher this score, the more outliers your dataset has. This is based on distance from the center of 20 clusters as constructed via KNN.\n \"\"\"\n }",
"def col_means(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n return x.agg(mean, na_rm=na_rm)",
"def conf_interval_two_means(datae,dataf,conf):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Sp,t calculated for lower/upper bounds \n Sp = np.sqrt((((df_e*var_e) + (df_f*var_f))/(df_e+df_f)))\n t = abs(scs.t.ppf(((1-conf)/2), (df_e+df_f)))\n lower = (mean_e-mean_f)-(Sp*t*np.sqrt(1/n_e+1/n_f))\n upper = (mean_e-mean_f)+(Sp*t*np.sqrt(1/n_e+1/n_f))\n \n return lower,upper",
"def overfit_features(df):\r\n overfit = []\r\n for col in df.columns:\r\n counts = df[col].value_counts().iloc[0]\r\n if counts / len(df)*100 > 99.94:\r\n overfit.append(col)\r\n return overfit",
"def column_means(self):\n return list(self._scala.columnMeans())",
"def get_mean(self):\n average = self.df[self.col_name].mean()\n return average",
"def find_confidence(self, t, df):\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n # Subtract from one to get confidence, divide by two to get\n # single section on positive side of distribution.\n confidence = (1.0 - float(col)) / 2.0\n return confidence",
"def lowess_per_col(df, columns, bootstrap_iters=config['bootstrap_iters'], conf_interval=0.95, lowess_kw=None, clip_to_zero=True):\n\n # add missing days in index\n df = df.astype(pd.Float64Dtype()).resample('D').mean().sort_index()\n\n df_ret = pd.DataFrame(index=df.index)\n\n if lowess_kw is None:\n lowess_kw = {}\n\n print('Smoothing using lowess and generating 95% CI by bootstrap resampling')\n\n for col in tqdm(columns, unit='column'):\n idx_start = df[col].first_valid_index()\n idx_end = df[col].last_valid_index()\n\n df_sel = df[col].loc[idx_start:idx_end].astype(float)\n df_sel = df_sel.interpolate('linear', limit=14)\n\n # consider all datapoints at 3 weeks around it\n frac = np.float64(1) / ((idx_end - idx_start) / np.timedelta64(3, 'W'))\n\n if frac > 1:\n # this means there is < 3W of data\n # we should probably ignore the data if this is the case\n # but for now set frac to 1 (use all samples)\n frac = 1\n\n local_run_lowess_kw = {**lowess_kw}\n if 'frac' not in local_run_lowess_kw:\n local_run_lowess_kw['frac'] = frac\n\n\n smoothed = _lowess_on_df(df_sel, local_run_lowess_kw)\n\n resample_lambda = _quantile_resampling\n\n n_jobs = config['n_jobs']\n remainder = bootstrap_iters % n_jobs\n extra_jobs_mod = n_jobs // remainder\n itersize = bootstrap_iters // n_jobs\n iters = [itersize + (x % extra_jobs_mod == 0) for x in range(n_jobs)]\n\n # Perform bootstrap resampling of the data\n # and evaluate the smoothing at points\n with tqdm_joblib(tqdm(total=n_jobs, unit=' bootstrap resampling workers finished', leave=False)) as progress_bar:\n retvals = Parallel(n_jobs=n_jobs)(\n delayed(_lowess_worker_with_func_resampler)(iters[i], df_sel, resample_lambda, local_run_lowess_kw) for i in range(n_jobs)\n )\n\n df_results = _merge_lowess_worker_results(df_sel.index, retvals, n_jobs)\n\n colnames = {\n 'bottom_col': f'{col}_lowess_{conf_interval * 100:0.0f}_perc_ci_bottom',\n 'top_col': f'{col}_lowess_{conf_interval * 100:0.0f}_perc_ci_top',\n }\n\n df_results = _bootstrap_ci_from_std(df_results.index, df_results.std(axis=1).values, smoothed, alpha=conf_interval, **colnames)\n\n df_results[f'{col}_lowess'] = smoothed\n\n df_ret = df_ret.join(df_results)\n\n if clip_to_zero:\n # clip negative values to 0\n df_ret[df_ret < 0] = 0\n\n return df_ret.sort_index()",
"def get_means(self):\n if self.cv_method == 'fixed':\n perf = np.mean(self.evaluations, axis=0)\n perf = np.nanmean(perf, axis=-1)\n elif self.cv_method == 'crossvalidation':\n perf = np.mean(self.evaluations, axis=0)\n perf = np.nanmean(perf, axis=-1)\n else:\n perf = self.evaluations\n while len(perf.shape) > 2:\n perf = np.nanmean(perf, axis=-1)\n perf = perf[~np.isnan(perf[:, 0])]\n perf = np.mean(perf, axis=0)\n return perf",
"def weighted_average(model_df,cols):\n #wa = model_df[cols].apply(lambda x: (x[0]*x[1]).sum()/x[0].sum())\n wa = (model_df[cols[0]]*model_df[cols[1]]).sum()/model_df[cols[0]].sum()\n return wa",
"def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr",
"def cal_thresh(pred_prob,labels):\n mu_stds = []\n for i in range(19):\n pos_mu, pos_std = fit(pred_prob[labels==i, i])\n mu_stds.append([pos_mu, pos_std])\n return mu_stds",
"def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment",
"def get_means_per_interval(counts_per_interval, sites_at_milestone):\n means_per_interval = {}\n for date in counts_per_interval:\n count = counts_per_interval[date]\n sites_at_first_fp = sites_at_milestone['first_fp'][date]\n if sites_at_first_fp == 0:\n mean = 0\n else:\n mean = round(count/sites_at_first_fp)\n means_per_interval[date] = mean\n return means_per_interval",
"def noe_calcs(df):\n noe_status = ['off','on']\n off_mean, on_mean = [df[i].mean(1) for i in noe_status]\n off_sdev, on_sdev = [df[i].std(1) for i in noe_status]\n noe_val = on_mean/off_mean\n noe_err = np.sqrt( (noe_val**2) * ( (on_sdev/on_mean)**2 + \\\n (off_sdev/off_mean)**2 ) )\n return pd.concat([noe_val,noe_err], axis=1, keys=['noe','err'])",
"def get_outliers(a_dataframe):\n outliers_list = []\n for category in a_dataframe.dtypes.keys():\n try:\n column = a_dataframe.loc[:, category]\n mean = np.mean(column) # check if category is numeric\n except TypeError:\n pass\n else:\n # print_hist(column, category)\n st_dev = np.std(column)\n limit_hi = mean + 2 * st_dev\n limit_lo = mean - 2 * st_dev\n flag_bad = (column < limit_lo) | (column > limit_hi)\n if category != \"fnlwgt\": # skip 'fnlwgt' var. 'cos I'll delete it\n outliers_list.append(flag_bad)\n num_outliers = sum(flag_bad)\n print_stats(category, column,\n limit_hi, limit_lo,\n num_outliers\n )\n\n return outliers_list",
"def compute_average_separability_score(self) -> Dict:\n avg_sep_score = {}\n for class_pair_key, class_pair_val in self.separability_scores.items():\n avg_sep_score[class_pair_key] = np.mean(np.array([val for _, val in class_pair_val.items()]))\n avg_sep_score['agg_with_risk'] = sum(\n np.array([val for _, val in avg_sep_score.items()]) *\n RISK\n ) \n avg_sep_score['agg'] = sum([val for key, val in avg_sep_score.items() if type(key)==int]) \n return avg_sep_score",
"def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")",
"def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df"
]
| [
"0.59657013",
"0.59257346",
"0.57639277",
"0.57639277",
"0.57465905",
"0.5622841",
"0.5620874",
"0.55415976",
"0.55392057",
"0.5523666",
"0.5477661",
"0.54676455",
"0.5441022",
"0.5425129",
"0.5410097",
"0.5393089",
"0.5381105",
"0.5324274",
"0.5313938",
"0.5302308",
"0.5295051",
"0.5279587",
"0.5275307",
"0.526042",
"0.52592224",
"0.52435434",
"0.52432275",
"0.5232306",
"0.5217647",
"0.52025956"
]
| 0.6726282 | 0 |
return the error value from an interval (value needed for errorplot) | def error_from_interval(interval):
return (interval[1] - interval[0]) / 2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(Y, X):\n return (Y - X) ** 2",
"def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1",
"def _df_err(self):\n return self.n - self.k - 1",
"def yerr(self, i):\n return self.errors[1][i]",
"def linpol_error(self):\n return self._linpol_error",
"def get_error(self):\n p = self._get_sub_text('error')\n if not p:\n return None\n else:\n try:\n return float(p)\n except ValueError:\n return None",
"def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]",
"def error(ff2, error2):\n\n\tupper_bound_squared = ff2 + error2\n\tupper_bound = upper_bound_squared ** 0.5\n\tff = ff2 ** 0.5\n\treturn upper_bound - ff",
"def error(line, data): # error function\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err",
"def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)",
"def _getErrorFunction(self):\n\n\t\treturn (self._setpoint - self._current)",
"def area_under_curve_and_failure_rate(errors, step_error, max_error,\n min_error=0.):\n x_axis = list(np.arange(min_error, max_error + step_error, step_error))\n ced = np.array(compute_cumulative_error(errors, x_axis))\n return simps(ced, x=x_axis) / max_error, 1. - ced[-1]",
"def xerr(self, i):\n return self.errors[0][i]",
"def _ms_err(self):\n return self._ss_err / self._df_err",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def error_function(x):\n T = [9.60497373987051638749E0,\n 9.00260197203842689217E1,\n 2.23200534594684319226E3,\n 7.00332514112805075473E3,\n 5.55923013010394962768E4]\n U = [3.35617141647503099647E1,\n 5.21357949780152679795E2,\n 4.59432382970980127987E3,\n 2.26290000613890934246E4,\n 4.92673942608635921086E4]\n\n if np.abs(x) > 1.0:\n return 1.0 - error_function_complemented(x)\n else:\n z = x * x\n y = x * pol_evl(z, T, 4) / p1_evl(z, U, 5)\n return y",
"def plot_error(k_vals, error):\n\n plt.plot(k_vals,error)\n plt.xlabel('k-value')\n plt.ylabel('Cost')\n plt.show()",
"def calc_error(self, data):\n errors = np.zeros(data.shape)\n\n # Values below minimum lidar distance are ascribed the minimum error\n errors[data <= self.min_error_dist] = self.min_error\n\n # Values above minimum have erros calculated by proportion of their value\n errors[data > self.min_error_dist] = data[data > self.min_error_dist] * self.error_prop\n return errors",
"def mean_error_rate(y_true, y_interval):\n _check_interval_array(y_interval)\n\n wrong_intervals = ((y_true < y_interval[:, 0]) | (y_true > y_interval[:, 1])).sum()\n\n return wrong_intervals / y_true.shape[0]",
"def _error(self, Y, T):\n err = np.mean((Y - T)**2)\n return err",
"def MeanSqError(self):\r\n\t\treturn self.mse",
"def get_minerror(self):\n\n return self.sigma_w - self.rsq_y",
"def get_err(snr):\n\n quad_terms = np.array(\n [3.11e-3, 1.10e-5, 6.95e-6, 5.05e-6, 4.65e-6, 4.10e-6])\n lin_terms = np.array(\n [-0.869, -2.07e-3, -1.40e-3, -1.03e-3, -1.13e-3, -7.29e-4])\n consts = np.array([104, 0.200, 0.117, 0.114, 0.156, 0.0624])\n err = quad_terms[:,None] * snr**2 + lin_terms[:,None] * snr + consts[:,None]\n\n # find the minimum of the quadratic function\n min_snr = -lin_terms / (2*quad_terms)\n min_err = quad_terms * min_snr**2 + lin_terms * min_snr + consts\n mask = (snr[:,None] > min_snr).T\n for i in range(0,len(min_err)):\n err[i][mask[i]] = min_err[i]\n\n return err",
"def ss_err(self):\n return np.squeeze(self._ss_err)",
"def _std_err(self):\n return np.sqrt(np.sum(np.square(self._resids), axis=-2) / self._df_err)",
"def diff_of_errors(self):\n self.e_of_e = self.azimuth_error - self.altitude_error\n return self.e_of_e",
"def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30",
"def error(number):\n \n if number > 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value <= 1 \\n\"",
"def get_error(self, err_type):\n self.tmp = self.location_errors[0].errors[err_type] * \\\n self.location_errors[0].errors[\"N\"]\n N = self.location_errors[0].errors[\"N\"]\n\n for lerr in self.location_errors[1:]:\n self.tmp = np.add(self.tmp, lerr.errors[err_type] *\n lerr.errors[\"N\"])\n N += lerr.errors[\"N\"]\n\n # print(self.tmp, N, self.tmp/ N)\n return self.tmp / N",
"def error(number):\n \n if number >= 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value < 1 \\n\""
]
| [
"0.62011176",
"0.6166605",
"0.61010337",
"0.60961163",
"0.6058812",
"0.6053413",
"0.60513455",
"0.6042944",
"0.60402334",
"0.6030181",
"0.60052824",
"0.5983752",
"0.5945941",
"0.59452814",
"0.5932404",
"0.584184",
"0.58264226",
"0.5817471",
"0.58165526",
"0.57957214",
"0.5735057",
"0.570229",
"0.5697948",
"0.56948966",
"0.56757665",
"0.5665067",
"0.56537896",
"0.564861",
"0.5639994",
"0.56353444"
]
| 0.8264894 | 0 |
plot the errorplot with each longterm ratings mean. | def plot_lastreviews_means_and_errors(H_in_HL_mean, H_in_HL_error, L_in_HL_mean, L_in_HL_error,
H_in_HH_mean, H_in_HH_error, H_in_HM_mean, H_in_HM_error,
M_in_HM_mean, M_in_HM_error):
# plot the result in a nice plot
plt.figure(figsize=(12, 9))
# create the fig. and axes.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# define the color to use
color_1 = rgb_to_matplot_lib(strong_green)
color_2 = rgb_to_matplot_lib(light_green)
color_3 = rgb_to_matplot_lib(strong_red)
color_4 = rgb_to_matplot_lib(light_green)
color_5 = rgb_to_matplot_lib(orange)
# axis
ax.set_ylabel('Rating', fontsize = 14)
ax.tick_params(axis='both', labelsize=14)
# plot small dash lines to follow the grading
for y in np.arange(4.0, 4.6, 0.1):
ax.plot(range(0, 45), [y] * len(range(0, 45)), "--", lw=0.5, color="black", alpha=0.3)
# set titles
ax.set_title('10+ reviews average rating for each case in each group', fontsize = 14)
plt.ylim([1,5.1])
plt.xlim([0,5.1])
plt.errorbar(1, H_in_HH_mean, H_in_HH_error, lineStyle= None, capsize=5, marker="^", color=color_1)
plt.errorbar(2, H_in_HL_mean, H_in_HL_error, lineStyle= None, capsize=5, marker="^", color=color_2)
plt.errorbar(3, L_in_HL_mean, L_in_HL_error, lineStyle= None, capsize=5, marker="^", color=color_3)
plt.errorbar(4, H_in_HM_mean, H_in_HM_error, lineStyle= None, capsize=5, marker="^", color=color_4)
plt.errorbar(5, M_in_HM_mean, M_in_HM_error, lineStyle= None, capsize=5, marker="^", color=color_5)
plt.text(0.8, 4.01, "({:04.3f})".format(H_in_HH_mean), fontsize=14, color=color_1)
plt.text(1.8, 4.01, "({:04.3f})".format(H_in_HL_mean), fontsize=14, color=color_2)
plt.text(2.8, 4.01, "({:04.3f})".format(L_in_HL_mean), fontsize=14, color=color_3)
plt.text(3.8, 4.01, "({:04.3f})".format(H_in_HM_mean), fontsize=14, color=color_4)
plt.text(4.8, 4.01, "({:04.3f})".format(M_in_HM_mean), fontsize=14, color=color_5)
# set ticks label
ax.set_xticks(range(1,6))
ax.set_xticklabels(('H in HH', 'H in HL', 'L in HL', 'H in HM', 'M in HM'))
#set ticks color
colors = [color_1, color_2, color_3, color_4, color_5]
for xtick, color in zip(ax.get_xticklabels(), colors):
xtick.set_color(color)
plt.ylim([4,4.6])
plt.xlim([0.5,5.5])
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_errors(self):\n\n plt.title(\"Prediction Error\")\n plt.plot(self.errors)\n plt.ylabel(\"MSE (Mean Squared Error)\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def plot_lastreviews_means_and_errors_scaled(H_in_HH_mean, H_in_HH_error, M_in_MM_mean, M_in_MM_error, L_in_LL_mean, L_in_LL_error,\n H_in_HL_mean, H_in_HL_error, L_in_HL_mean, L_in_HL_error, H_in_HM_mean, H_in_HM_error,\n M_in_HM_mean, M_in_HM_error, M_in_ML_mean, M_in_ML_error, L_in_ML_mean, L_in_ML_error):\n plt.figure(figsize=(12, 9)) \n\n # create the fig. and axes.\n ax = plt.subplot(111)\n ax.spines[\"top\"].set_visible(False) \n ax.spines[\"right\"].set_visible(False)\n\n # define the color to use\n light_green = (152, 223, 138)\n strong_green = (44, 160, 44)\n light_red = (255, 152, 150)\n orange = (255, 187, 120)\n strong_red = (214, 39, 40)\n\n strong_green = rgb_to_matplot_lib(strong_green)\n light_green = rgb_to_matplot_lib(light_green)\n strong_red = rgb_to_matplot_lib(strong_red)\n light_red = rgb_to_matplot_lib(light_red)\n orange = rgb_to_matplot_lib(orange)\n\n # axis \n ax.set_ylabel('Rating', fontsize = 14)\n ax.tick_params(axis='both', labelsize=14)\n\n # plot small dash lines to follow the grading \n for y in np.arange(3.5, 4.6, 0.1): \n ax.plot(range(0, 45), [y] * len(range(0, 45)), \"--\", lw=0.5, color=\"black\", alpha=0.3)\n\n\n # set titles\n ax.set_title('10+ reviews average rating for each case in each group', fontsize = 14)\n\n plt.errorbar(1, H_in_HH_mean, H_in_HH_error, lineStyle= None, capsize=5, marker=\"^\", color=strong_green)\n plt.errorbar(2, M_in_MM_mean, M_in_MM_error, lineStyle= None, capsize=5, marker=\"^\", color=orange)\n plt.errorbar(3, L_in_LL_mean, L_in_LL_error, lineStyle= None, capsize=5, marker=\"^\", color=strong_red)\n plt.errorbar(4, H_in_HL_mean, H_in_HL_error, lineStyle= None, capsize=5, marker=\"^\", color=light_green)\n plt.errorbar(5, L_in_HL_mean, L_in_HL_error, lineStyle= None, capsize=5, marker=\"^\", color=light_red)\n plt.errorbar(6, H_in_HM_mean, H_in_HM_error, lineStyle= None, capsize=5, marker=\"^\", color=light_green)\n plt.errorbar(7, M_in_HM_mean, M_in_HM_error, lineStyle= None, capsize=5, marker=\"^\", color=orange)\n plt.errorbar(8, M_in_ML_mean, M_in_ML_error, lineStyle= None, capsize=5, marker=\"^\", color=orange)\n plt.errorbar(9, L_in_ML_mean, L_in_ML_error, lineStyle= None, capsize=5, marker=\"^\", color=light_red) \n\n plt.text(0.7, 3.41, \"({:04.3f})\".format(H_in_HH_mean), fontsize=14, color=strong_green)\n plt.text(1.7, 3.41, \"({:04.3f})\".format(M_in_MM_mean), fontsize=14, color=orange)\n plt.text(2.7, 3.41, \"({:04.3f})\".format(L_in_LL_mean), fontsize=14, color=strong_red)\n plt.text(3.7, 3.41, \"({:04.3f})\".format(H_in_HL_mean), fontsize=14, color=light_green)\n plt.text(4.7, 3.41, \"({:04.3f})\".format(L_in_HL_mean), fontsize=14, color=light_red)\n plt.text(5.7, 3.41, \"({:04.3f})\".format(H_in_HM_mean), fontsize=14, color=light_green)\n plt.text(6.7, 3.41, \"({:04.3f})\".format(M_in_HM_mean), fontsize=14, color=orange)\n plt.text(7.7, 3.41, \"({:04.3f})\".format(M_in_ML_mean), fontsize=14, color=orange)\n plt.text(8.7, 3.41, \"({:04.3f})\".format(L_in_ML_mean), fontsize=14, color=light_red)\n\n # set ticks label\n ax.set_xticks(range(1,10))\n ax.set_xticklabels(('H in HH', 'M in MM', 'L in LL', 'H in HL', 'L in HL', 'H in HM', 'M in HM', 'M in ML', 'L in ML'))\n\n #set ticks color\n colors = [strong_green, orange, strong_red, light_green, light_red, light_green, orange, orange, light_red]\n for xtick, color in zip(ax.get_xticklabels(), colors):\n xtick.set_color(color)\n\n plt.ylim([3.4,4.6])\n plt.xlim([0.5,9.5])\n plt.show()",
"def plot_errors(dat, title='Data', avg='mean', err='sem'):\n\n n_groups = len(dat)\n\n fig = plt.figure(figsize=[4, 5])\n ax = plt.gca()\n\n if avg == 'mean': avg_func = np.nanmean\n if avg == 'median': avg_func = np.nanmedian\n\n if err == 'sem': err_func = sem\n\n plt.errorbar(np.arange(1, n_groups+1), avg_func(dat, 1), yerr=err_func(dat, 1), xerr=None, fmt='.',\n markersize=22, capsize=10, elinewidth=2, capthick=2)\n\n ax.set_xlim([0.5, n_groups+0.5])\n\n # Titles & Labels\n ax.set_title(title, fontsize=16)\n ax.set_xlabel('Noise Levels')\n ax.set_ylabel('Error')\n\n # Set the top and right side frame & ticks off\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n # Set linewidth of remaining spines\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)",
"def plot_ave(results_list):\n x_range = range(len(results_list[0]))\n err_x, err_y, std_list = [], [], []\n\n for i in x_range:\n if i % 10 == 0:\n #get average for each generation\n column = [] \n for result in results_list:\n column.append(result[i])\n average = np.average(column)\n \n std_dev = np.std(column)\n err_x.append(i)\n err_y.append(average)\n std_list.append(std_dev)\n\n pylab.errorbar(err_x, err_y, yerr=std_list)\n pylab.show()",
"def error():\n\n # Make data set using errors\n dataset_a = DataSet(oscillating,error_y=oscillating_error,plot='error_bar',label='Data and error')\n dataset_a.set_error(interval=5,width=1,cap=2)\n dataset_b = DataSet(oscillating,plot='error_shade',error_y=oscillating_error,order=0,colour='lightgrey',label='Error')\n dataset_c = DataSet(oscillating,plot='line',order=1,colour='firebrick',label='Data')\n\n # Make line graph with error bars\n plot_bar = Plot()\n plot_bar.set_legend(legend=True)\n plot_bar.add_dataset(dataset_a)\n plot_bar.plot()\n plot_bar.save(name='./figures/2d_error_bar',fmt='png')\n plot_bar.display()\n\n # Make line graph with shaded errors\n plot_shade = Plot()\n plot_shade.set_legend(legend=True,location='upper left')\n plot_shade.add_dataset(dataset_b)\n plot_shade.add_dataset(dataset_c)\n plot_shade.plot()\n plot_shade.save(name='./figures/2d_error_shade',fmt='png')\n plot_shade.display()",
"def create_error_bar(self, ax):\n ax.scatter(self.x, self.y, label=self.label)\n ax.errorbar(self.x, self.y, yerr=self.asymmetric_y_error)\n\n ax.set_title('Error bar of all the book mean values over each iteration')\n\n # the mean line\n mean_line = ax.axhline(y=self.y_mean, color='r', linestyle='-', alpha=0.3, label='mean value')\n\n # the mean lines for the two clusters\n mean_line_c1 = ax.axhline(y=self.y_mean_c1, color='g', linestyle='--', alpha=0.3, label='mean value c1')\n mean_line_c2 = ax.axhline(y=self.y_mean_c2, color='b', linestyle='--', alpha=0.3, label='mean value c2')\n\n legend = ax.legend(loc='upper right')",
"def plot_error(self, maxstep=20):\n plt.ion()\n plt.xlabel(\"step\")\n plt.ylabel(\"Ave Logloss (bits)\")\n train_errors = []\n if self.dataset.test:\n test_errors = []\n for i in range(maxstep):\n self.learn(1)\n train_errors.append( sum(self.logloss(tple) for tple in self.dataset.train)\n /len(self.dataset.train))\n if self.dataset.test:\n test_errors.append( sum(self.logloss(tple) for tple in self.dataset.test)\n /len(self.dataset.test))\n plt.plot(range(1,maxstep+1),train_errors,\n label=str(self.num_classes)+\" classes. Training set\")\n if self.dataset.test:\n plt.plot(range(1,maxstep+1),test_errors,\n label=str(self.num_classes)+\" classes. Test set\")\n plt.legend()\n plt.draw()",
"def _plot_errors(self):\n for task_id, loss_type in self.task_ids.iteritems():\n x = np.arange(len(self.training_errors[task_id]))\n fig, ax = plt.subplots(1, 1)\n ax.set_xlabel('Number of epochs of training')\n if loss_type is LossTypes.mse:\n ax.set_ylabel('RMSE Error')\n elif loss_type is LossTypes.cross_entropy:\n ax.set_xlabel('(1 - accuracy)')\n plt.plot(x, self.training_errors[task_id], 'r', label='training')\n plt.plot(x, self.validation_errors[task_id], 'b', label='validation')\n plt.legend(loc=\"best\", framealpha=0.3)\n fig.savefig(\"error-curve-task-{}.png\".format(task_id))\n plt.close('all')",
"def plot_error(k_vals, error):\n\n plt.plot(k_vals,error)\n plt.xlabel('k-value')\n plt.ylabel('Cost')\n plt.show()",
"def display_errors(errors_index,img_errors,pred_errors, obs_errors):\n n = 0\n nrows = 3\n ncols = 3\n fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)\n for row in range(nrows):\n for col in range(ncols):\n error = errors_index[n]\n ax[row,col].imshow((img_errors[error]).reshape((28,28)))\n ax[row,col].set_title(\"Predicted label :{}\\nTrue label :{}\".format(pred_errors[error],obs_errors[error]))\n n += 1",
"def plotErr(self):\n if self.xp and self.wp:\n # plot the spectra\n w=self.ws.value(np.array(self.xp))\n self.errcurve,=self.erraxes.plot(self.xp,self.wp-w,linewidth=0.5,linestyle='',marker='o',color='b')\n if self.dxp and self.dwp:\n # plot the spectra\n dw=self.ws.value(np.array(self.dxp))\n self.delerrcurve,=self.erraxes.plot(self.dxp,self.dwp-dw,linewidth=0.5,linestyle='',marker='x',color='b')",
"def plot_insta_err(self, ax=None):\n if ax is None:\n ax = plt.gca()\n ax.set_yscale('log')\n ax.plot(list(range(1, self.max_dets)), self.errors[0], label='independent')\n ax.plot(list(range(1, self.max_dets)), self.errors[1], label='correlated')\n ax.set_ylabel(r'Stochastic error in $E_\\mathrm{corr}$ / ha')\n ax.set_xlabel('Number of determinants in estimator')\n ax.axhline(np.sqrt(self.proje_var[0]), linestyle='--', color='black', label='reference')\n ax.legend()\n return ax",
"def plot_mean_and_max():\n plt.rc('font', size=16)\n data = np.array(\n [\n [0.0000, 0.000, 0.002],\n [0.0001, 0.000, 0.000],\n [0.0005, 0.001, 0.008],\n [0.0008, 0.001, 0.003],\n [0.0012, 0.002, 0.005],\n [0.0100, 0.016, 0.041],\n [0.0150, 0.025, 0.068],\n [0.0200, 0.039, 0.096],\n [0.0300, 0.053, 0.140],\n [0.0400, 0.080, 0.242],\n [0.0500, 0.098, 0.202]\n ])\n rb, mean, maxd = data[:, 0], data[:, 1], data[:, 2]\n plt.plot(rb, mean, marker='.', markerfacecolor='steelblue',\\\n markeredgecolor='steelblue', linestyle=' ', markersize=15)\n plt.plot(rb, maxd, marker='.', markerfacecolor='orchid',\\\n markeredgecolor='orchid', linestyle=' ', markersize=15)\n plt.grid()\n plt.xlabel(r'$r_B$')\n plt.ylabel(r'$\\delta\\beta$ (deg)')\n # plt.ylim([0., 0.15])\n plt.tight_layout()\n # plt.semilogx()\n # plt.semilogy()\n\n plt.show()",
"def plotPredictedError():\n\tglobal normalized\n\n\twarmthPred = []\n\twarmthObserved = []\n\tcompPred = []\n\tcompObserved = []\n\tSStotalWarmth = 0\n\tSSresWarmth = 0\n\tSStotalComp = 0\n\tSSresComp = 0\n\tkeys = parser.getMappings(normalized)[0].keys()\n\tfor key in keys:\n\n\t\tif \"_\" in key:\n\t\t\twarmthAxis, compAxis = getPlotData(key)\n\t\t\twarmthPred.append(warmthAxis[3])\n\t\t\twarmthObserved.append(warmthAxis[2])\n\t\t\tcompPred.append(compAxis[3])\n\t\t\tcompObserved.append(compAxis[2])\n\n\tmeanObservedWarmth = np.mean(warmthObserved)\n\tmeanObservedComp = np.mean(compObserved)\n\tfor i in range(0, len(warmthObserved)):\n\t\tSStotalWarmth += (warmthObserved[i] - meanObservedWarmth)**2\n\t\tSSresWarmth += (warmthObserved[i] - warmthPred[i])**2\n\t\tSStotalComp += (compObserved[i] - meanObservedComp)**2\n\t\tSSresComp += (compObserved[i] - compPred[i])**2\n\n\n\tplt.axis([0, 100, 0, 100])\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(warmthObserved, warmthPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.title(\"Observed vs Predicted Warmth\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(warmthObserved, warmthPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()\n\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(compObserved, compPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.axis([0, 100, 0, 100])\n\tplt.title(\"Observed vs Predicted Competence\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(compObserved, compPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()",
"def plotErrors(losses, model_title ='Shallow Network, SGD, Batch Size = 10'):\n fig, axes = plt.subplots()\n\n x = np.arange(len(losses))\n\n axes.plot(x, losses)\n axes.set_ylabel(\"Loss (cross entropy)\")\n axes.set_xlabel(\"Number of iterations\")\n axes.set_title(model_title) \n\n plt.show() \n\n return None",
"def make_tuning_plot_rmse(df, error_col_name=\"rmse\",\n error_title = \"Top 10% RMSE\",\n cutoff = 0.10):\n\n df = df.copy()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) if \"evidence\" in i else i for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n # Get appropriate datasets\n trials = 'trial_number'\n methods = 'Method'\n\n # Make area plot\n uniq_methods = set(df[\"Method\"].values)\n method_order = sorted(uniq_methods,\n key=lambda x : x if isinstance(x, float) else -1)\n method_df = []\n datasets = set()\n for data, sub_df in df.groupby(\"Data\"):\n # Add datasets\n datasets.add(data)\n rmse_sub = sub_df[error_col_name]\n methods_sub = sub_df[\"Method\"]\n trials_sub= sub_df['trial_number']\n for method_idx, method in enumerate(method_order):\n # Now summarize these lines\n bool_select = (methods_sub == method)\n\n rmse_method = rmse_sub[bool_select]\n trials_temp = trials_sub[bool_select]\n areas = []\n # create area!\n for trial, rmse_trial in zip(trials_sub, rmse_method):\n num_tested = len(rmse_trial)\n cutoff_index = int(cutoff * num_tested) - 1\n rmse_val = rmse_trial[-cutoff_index]\n to_append = {error_title: rmse_val,\n \"Regularizer Coeff, $\\lambda$\": method,\n \"method_name\": method,\n \"Data\": data,\n \"Trial\" : trial}\n method_df.append(to_append)\n method_df = pd.DataFrame(method_df)\n\n # Filter out dropout\n method_df = method_df[[i != \"dropout\" for i in\n method_df['method_name']]].reset_index()\n\n # Normalize by dataset\n for dataset in datasets:\n # Make a divison vector of ones and change it to a different value only\n # for the correct dataset of interest to set max rmse to 1\n division_factor = np.ones(len(method_df))\n indices = (method_df[\"Data\"] == dataset)\n\n # Normalize with respect to the ensemble so that this is 1\n max_val = method_df[indices].query(\"method_name == 'ensemble'\").mean()[error_title]\n\n # Take the maximum of the AVERAGE so it's normalized to 1\n division_factor[indices] = max_val\n method_df[error_title] = method_df[error_title] / division_factor\n\n method_df_evidence = method_df[[isinstance(i, float) for i in\n method_df['method_name']]].reset_index()\n method_df_ensemble = method_df[[\"ensemble\" in str(i) for i in\n method_df['method_name']]].reset_index()\n\n data_colors = {\n dataset : sns.color_palette()[index]\n for index, dataset in enumerate(datasets)\n }\n\n min_x = np.min(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n max_x= np.max(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n\n sns.lineplot(x=\"Regularizer Coeff, $\\lambda$\", y=error_title,\n hue=\"Data\", alpha=0.8, data=method_df_evidence,\n palette = data_colors)\n\n for data, subdf in method_df_ensemble.groupby(\"Data\"):\n\n color = data_colors[data]\n area = subdf[error_title].mean()\n std = subdf[error_title].std()\n plt.hlines(area, min_x, max_x, linestyle=\"--\", color=color, alpha=0.8)\n\n # Add ensemble baseline\n ensemble_line = plt.plot([], [], color='black', linestyle=\"--\",\n label=\"Ensemble\")\n # Now make ensemble plots\n plt.legend(bbox_to_anchor=(1.1, 1.05))",
"def draw_bonus_error(error):\n f, ax = plt.subplots()\n vertices = np.arange(10, 50)\n ax.plot(vertices, error[10:], 'b', label='Error')\n plt.xlabel('Rounds')\n plt.ylabel('Misclassification Error')\n plt.title('Misclassification Error: l = 10, m = 20, n = 40')\n plt.legend(loc='upper left')\n plt.grid(True)\n plt.show()",
"def avg_response(df, x, y_obs, y_est, save=False, show=True):\n\n fig, ax1 = plt.subplots(figsize=(15,15))\n\n ax2 = ax1.twinx()\n\n x_name = x\n if df[x].dtype == \"int\":\n x = df[x].astype(\"category\")\n elif df[x].dtype == \"float\":\n x = pd.cut(df[x], bins=10)\n\n metrics = {\"mean\":\"mean\", \"std err\":\"sem\", \"count\":\"count\"}\n df_grouped = df.groupby([x])[y_obs, y_est].agg(metrics)\n \n x_vals = range(len(df_grouped))\n y_vals = df_grouped[\"mean\"][y_est]\n ax1.errorbar(x_vals, y_vals,yerr=df_grouped[\"std err\"][y_est], fmt='-',\n marker='o',color=\"R\", mec='black', ms=10, mew=2, linewidth=4, \n capsize=10, elinewidth=2)\n\n y_vals = df_grouped[\"mean\"][y_obs]\n ax1.plot(x_vals, y_vals, '-', label=y_obs, marker='o',\n color = \"G\",mec='black', ms=10, mew=2, linewidth=4)\n\n y_vals = df_grouped[\"count\"][y_obs]\n ax2.bar(x_vals,y_vals, color='DarkSlateGray', alpha = 0.25)\n\n ax1.set_xlim(x_vals[0]-0.2,x_vals[-1]+1)\n x_levels = list(y_vals.index)\n plt.xticks(x_vals, x_levels)\n ax1.set_xticklabels(x_levels, rotation=45)\n ax1.grid(False)\n ax2.grid(False)\n font_size = 20\n ax1.set_xlabel(x_name, fontsize=font_size)\n ax1.set_ylabel(y_obs, fontsize=font_size)\n ax2.set_ylabel(\"count\", fontsize=font_size)\n plt.title(\"Average {y} for groups of {x}\".format(x=x_name, y=y_obs), \n fontsize=font_size+5)\n ax1.legend([y_obs, y_est], fontsize=font_size-2)\n if save:\n fig.savefig(\"/home/edward/work/repos/prometheus/python/plots/avg_response/{}.png\".\n format(x_name), bbox_inches='tight')\n if show:\n plt.show()",
"def plot_graph(error_rates, avg_hits):\n plt.xlabel(\"Error rates (σ)\")\n plt.ylabel(\"Average pins hit\")\n plt.plot(error_rates, avg_hits)\n plt.show()",
"def plot_acc_vs_nsn(df, settings):\n plt.clf()\n fig = plt.figure()\n fig, ax1 = plt.subplots()\n ax1.grid(True)\n ax1.set_axisbelow(True)\n\n models_list = [\"randomforest\", \"vanilla\"]\n redshift_list = df[\"redshift\"].unique()\n\n label_dic = {\"randomforest\": \"Random Forest\", \"vanilla\": \"Baseline RNN\"}\n\n group_cols = [\"model_name_noseed\", \"model_type\", \"redshift\", \"data_fraction\"]\n keep_cols = group_cols + [\"all_accuracy\"]\n\n # Cast to float for groupby operation (all_accuracy is type `O`)\n df.all_accuracy = df.all_accuracy.astype(float)\n\n df_errorbars = (\n df[keep_cols]\n .groupby(group_cols)\n .mean()\n .rename(columns={\"all_accuracy\": \"all_accuracy_mean\"})\n .reset_index()\n )\n df_errorbars[\"all_accuracy_std\"] = (\n df[keep_cols]\n .groupby(group_cols)\n .std()\n .rename(columns={\"all_accuracy\": \"all_accuracy_std\"})\n .reset_index()[\"all_accuracy_std\"]\n )\n\n for i, basemodel in enumerate(models_list):\n for z in redshift_list:\n df_sel = df_errorbars[\n (df_errorbars[\"model_type\"] == basemodel)\n & (df_errorbars[\"redshift\"] == z)\n ]\n # Plot these independently to avoid polluting legend\n ax1.errorbar(\n df_sel[\"data_fraction\"],\n df_sel[\"all_accuracy_mean\"],\n yerr=df_sel[\"all_accuracy_std\"],\n c=CONTRAST_COLORS[i],\n fmt=\"none\",\n zorder=3 if basemodel == \"vanilla\" else 1,\n )\n ax1.plot(\n df_sel[\"data_fraction\"],\n df_sel[\"all_accuracy_mean\"],\n label=label_dic[basemodel],\n marker=MARKER_DIC[basemodel],\n c=CONTRAST_COLORS[i],\n fillstyle=FILL_DIC[z],\n lw=0,\n markersize=10,\n markeredgewidth=1.5,\n )\n legend_elements = [\n Line2D(\n [0],\n [0],\n marker=\"s\",\n lw=0,\n color=\"indigo\",\n label=\"Baseline RNN\",\n markerfacecolor=\"w\",\n markersize=12,\n ),\n Line2D(\n [0],\n [0],\n marker=\"o\",\n lw=0,\n color=\"darkorange\",\n label=\"Random Forest\",\n markerfacecolor=\"w\",\n markersize=12,\n ),\n ]\n\n ax1.legend(handles=legend_elements, loc=4)\n ax1.set_ylabel(\"accuracy\", fontsize=18)\n ax1.set_ylim(91, 100)\n ax1.set_xlim(0.025)\n ax1.set_xlabel(\"# SNe for training\", fontsize=18)\n\n # exchange axis and reformat\n ax2 = ax1.twiny()\n ax1Xs = [round(i, 1) for i in ax1.get_xticks()]\n ax2Xs = []\n for X in ax1Xs:\n # BEWARE: only valid with SALTfitted sample\n ax2Xs.append(\"{:0.1e}\".format(int(X * 881_969 * 0.8)))\n\n ax1.set_xticklabels(ax2Xs)\n ax2.set_xticks(ax1Xs)\n ax2.set_xbound(ax1.get_xbound())\n ax2.set_xticklabels(ax1Xs)\n\n title = ax1.set_title(\"data fraction\", fontsize=18)\n title.set_y(1.1)\n plt.tight_layout()\n\n fig.subplots_adjust(top=0.85)\n fig.savefig(f\"{settings.figures_dir}/accuracy_vs_nSN.png\")\n plt.close()\n plt.clf()",
"def plot_error(class_incorreto):\n epochs = np.arange(1, num_iter + 1)\n plt.plot(epochs, class_incorreto)\n plt.xlabel('Iterações')\n plt.ylabel('Classificados incorretamente')\n plt.show()",
"def plot_mean_std(data,ax,label=None,show_error=True):\n x = np.arange(1,100)\n mean = np.array([np.mean(data_n) for data_n in data])\n if show_error: std = np.array([np.std(data_n) for data_n in data])\n ax.plot(x,mean,label=label)\n if show_error: ax.fill_between(x,mean-std,mean+std,alpha=0.3)",
"def plot_errors(loss_train, loss_val, jet):\n plt.plot(list(range(len(loss_train))), loss_train, 'g', label='Training loss')\n plt.plot(list(range(len(loss_val))), loss_val, 'b', label='Validation loss')\n plt.title('Training and Validation loss for jet: {jet}'.format(jet=jet))\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()",
"def plot_train_test_errors(train_errors, test_errors, lambda_str , K , path, rng):\n plt.plot(range(rng), train_errors, marker='o', label='Training Data');\n plt.plot(range(rng), test_errors, marker='v', label='Test Data');\n plt.title('ALS-WR Learning Curve, lambda = %s, K = %d'%(lambda_str, K))\n plt.xlabel('Number of Epochs');\n plt.ylabel('RMSE');\n plt.legend()\n plt.grid()\n plt.savefig(\"../results/test_train_rmse_\"+path)\n plt.show()",
"def plot(self):\n fig = plt.figure()\n ax = plt.gca()\n ax.errorbar(self.sample_bins, self.test_scores.mean(axis=1),\n yerr=self.test_scores.std(axis=1), fmt='r.-',\n label='test')\n ax.errorbar(self.sample_bins, self.train_scores.mean(axis=1),\n yerr=self.train_scores.std(axis=1), fmt='b.-',\n label='train')\n ax.legend(loc='upper right')\n return ax",
"def plot_model_error(self, var, obslabels=None):\n\n fig = plt.figure()\n gs = gridspec.GridSpec(1, 2, wspace=0.05, hspace=0.05, bottom=0.2, width_ratios=[3, 1])\n ax = fig.add_subplot(gs[0])\n\n # 1 vs. 2\n self._draw_error_scatter(1, 2, var, color='red', marker='o', ax=ax, obslabels=obslabels)\n\n # 1 vs. 3\n self._draw_error_scatter(1, 3, var, color='green', marker='*', ax=ax, obslabels=obslabels)\n\n # 1 vs. 4\n self._draw_error_scatter(1, 4, var, color='blue', marker='^', ax=ax, obslabels=obslabels)\n\n # 2 vs. 3\n self._draw_error_scatter(2, 3, var, color='grey', marker='x', ax=ax, obslabels=obslabels)\n\n # 2 vs 4\n self._draw_error_scatter(2, 4, var, color='m', marker='+', ax=ax, obslabels=obslabels)\n\n # 3 vs 4\n self._draw_error_scatter(3, 4, var, color='c', marker='h', ax=ax, obslabels=obslabels)\n\n if ax is not None:\n ax.legend(prop={'size': 8}, ncol=1, fancybox=True, loc='upper left')\n ax.set_xlabel('$\\epsilon$ (observation X)')\n ax.set_ylabel('$\\epsilon$ (observation Y)')\n\n xmi, xma = ax.get_xlim()\n ymi, yma = ax.get_ylim()\n\n ax.set_ylim(min(xmi, ymi), max(xma, yma))\n ax.set_xlim(min(xmi, ymi), max(xma, yma))\n ax.grid()\n ax.set_title('Comparison of model errors: ' + var.upper())\n ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') # 1:1 line\n return fig",
"def plot_MCcycles_vs_err(mc_cycles, error):\n plt.figure(figsize=(15, 10))\n\n plt.semilogx(mc_cycles, error, 'bo-') # or loglog? semilog, only one axis is logarithmic\n\n # zip joins x and y coordinates in pairs\n for x,y in zip(mc_cycles,error):\n\n label = f'{y:10.2e}'\n\n plt.annotate(label, # this is the text\n (x,y), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(0,-10), # distance from text to points (x,y)\n color='black',\n weight='bold',\n size='smaller',\n rotation='0', # plot seems weird w/angle other than 0 or 360..?\n va='top', # [ 'center' | 'top' | 'bottom' | 'baseline' ]\n ha='right') # [ 'left' | 'right' | 'center']\n\n\n plt.title('Error of the Mean Abs. Magnetization',fontsize=15)\n plt.xlabel('Number of Monte-Carlo Cycles',fontsize=15)\n plt.ylabel('error',fontsize=15)\n plt.xticks(fontsize=13);plt.yticks(fontsize=13)\n plt.tight_layout()\n plt.savefig(f'results/plots/4c/ErrorMeanMagnetizationAbs')\n plt.show()",
"def plot_msd(msd, h_exp):\n fig, ax = plt.subplots(1, 2, figsize = (10, 10))\n av_msd = np.mean(msd, axis = 0)\n\n for p in np.arange(0, msd.shape[0], step = 1):\n for t in np.arange(0, msd.shape[1], step = 1): \n ax[0].plot(t, msd[p, t], 'bx')\n ax[1].plot(t, av_msd[t], 'ro')\n ax[0].set_xlabel('Time lag (number of steps)')\n ax[0].set_ylabel('MSD (pix^2)')\n ax[0].set_title('Individual TAMSDs: H = ' + str(h_exp))\n ax[1].set_xlabel('Time lag (number of steps)')\n ax[1].set_ylabel('MSD (pix^2)')\n ax[1].set_title('Averaged TAMSDs: H = ' + str(h_exp)) \n ax[0].set_xlim([0, np.max(t)])\n ax[1].set_xlim([0, np.max(t)])\n ax[0].set_ylim([0, np.max(msd)]) \n ax[1].set_ylim([0, np.max(av_msd)])",
"def do_sample_error_check(cat, subdir):\n #dNdX/Omega_DLA\n cat.plot_dndx_sample_errors(z_max=5,nsample=13)\n plt.legend(loc=0)\n plt.ylim(0,0.16)\n save_figure(path.join(subdir,\"dndx_gp_resample\"))\n plt.clf()\n cat.plot_omega_sample_errors(z_max=5,nsample=13)\n plt.legend(loc=0)\n plt.ylim(0,2.5)\n save_figure(path.join(subdir,\"omega_gp_resample\"))\n plt.clf()",
"def display_averaging(self):\r\n\r\n cwd = os.getcwd()\r\n path = cwd + \"/results\"\r\n df1 = pd.read_csv(path + \"/average_U.csv\") # black line\r\n df2 = pd.read_csv(path + \"/average_N.csv\") # green line\r\n chem = 25 # from 0 to 35\r\n\r\n s1 = df1.iloc[chem]\r\n s1.plot()\r\n\r\n plt.show()"
]
| [
"0.7123026",
"0.6633457",
"0.65329194",
"0.6492488",
"0.64087635",
"0.6374497",
"0.63542676",
"0.62802917",
"0.61992013",
"0.6183493",
"0.613223",
"0.6128087",
"0.61226404",
"0.6120966",
"0.6115421",
"0.6091699",
"0.608599",
"0.6078244",
"0.6043395",
"0.60348445",
"0.603081",
"0.60207564",
"0.6002209",
"0.5958652",
"0.5919395",
"0.5889419",
"0.58836836",
"0.58822",
"0.5871084",
"0.5843922"
]
| 0.72428143 | 0 |
Function to authenticate the Spotify API with client credentials flow manager. | def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):
auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET)
return spotipy.Spotify(auth_manager=auth_manager) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticate(redirect_uri, client_cred_manager, username, scope,client_id,client_secret):\r\n\r\n sp = spotipy.Spotify(client_credentials_manager = client_cred_manager)\r\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\r\n if token:\r\n sp = spotipy.Spotify(auth=token)\r\n else:\r\n print(\"Can't get token for\", username)\r\n return sp",
"def authorize(self):\n\t\ttry:\n\t\t\tauth_url = 'https://accounts.spotify.com/api/token'\n\t\t\theaders={}\n\t\t\tdata={}\n\n\t\t\tdata_string = f\"{self.client_id}:{self.client_secret}\"\n\n\t\t\tdata_bytes = data_string.encode(\"ascii\")\n\t\t\tbase_bytes = base64.b64encode(data_bytes)\n\t\t\tbase_message = base_bytes.decode(\"ascii\")\n\n\t\t\theaders['Authorization'] = f\"Basic {base_message}\"\n\n\t\t\tdata = parse.urlencode({\"grant_type\": \"client_credentials\"})\n\t\t\tdata = data.encode('ascii')\n\n\t\t\treq = request.Request(auth_url,data=data, headers=headers)\n\t\t\tlogging.info(\"Successfully called Spotify token API!\")\n\t\texcept:\n\t\t\tlogging.error(\"Failed to create authorization request!\")\n\t\t\treturn False\n\t\t\t\n\t\tif req is not None:\n\t\t\ttry:\n\t\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\texcept error.URLError as e:\n\t\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\t\tlogging.error(response)\n\t\t\t\treturn False\n\t\t\n\t\ttry:\n\t\t\t_json = json.loads(response)\n\t\t\tself.token = _json[\"access_token\"]\n\t\t\tlogging.info(\"Successfully received token from Spotify!\")\n\t\texcept:\n\t\t\tlogging.error(\"Could not fetch token from response!\")\n\t\t\treturn False\n\t\t\t\n\t\treturn True",
"def Connect(self,scope):\n\n \"\"\"\n Calling util.prompt_for_user_token will open Spotify’s application authorization\n page in your browser (and require you to log in if you are not already logged in\n to spotify.com), unless a locally cached access token exist from a previous authorization/authentication.\n \"\"\"\n try:\n token = util.prompt_for_user_token(\n self.username,\n scope,\n self.client_id,\n self.secret_id,\n self.redirect_uri)\n except ImportError:\n self._isConnected = False\n print(\" onnecting to Spotify failed\") \n\n\n if token:\n sp = spotipy.Spotify(auth=token)\n self._isConnected = True\n return sp\n else:\n print(\"Can't get token for\", self.username)\n self._isConnected = False",
"def authenticate():\n\n # We are uploading and then downloading so we want Musicmanager\n api = Musicmanager()\n\n # Attempt to authenticate and log in\n logged_in = api.login()\n\n # If login() returns false, you have not performed oauth yet, or did not\n # write your credentials to your disk. Using oauth allows authentication\n # without providing plaintext credentials to the application\n if not logged_in:\n print('No oauth credentials found, please authenticate your account')\n\n # Performs oauth and stores generated credentials to Appdirs \n # 'user_data_dir' by default. oauth only needs to be performed once per \n # machine if the credentials are stored, which is the default behavior.\n authenticated = api.perform_oauth(open_browser=True)\n else:\n print('Successfully logged in.\\n')\n\n return api",
"def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']",
"def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()",
"def authorize():\n encoded_auth = base64.b64encode(\n (os.environ[\"SPOTIFY_CLIENT_ID\"] + ':' + os.environ[\"SPOTIFY_CLIENT_SECRET\"]).encode())\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_auth.decode(\"utf-8\"))\n }\n\n response = requests.post(os.environ['SPOTIFY_AUTH_URL'], data={'grant_type': 'client_credentials'},\n headers=headers).text\n return json.loads(response)",
"def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials",
"def authenticate():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'google-drive-credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n return creds",
"def authorize():\n scopes = 'playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative user-read-email user-read-private'\n\n spotify_authorize_url = 'https://accounts.spotify.com/authorize?'\n params = {\n 'response_type': 'code', \n 'client_id': SPOTIFY_CLIENT_ID,\n 'redirect_uri': 'http://0.0.0.0:5000/callback',\n 'scope': scopes, \n 'show_dialog': True\n }\n\n query_params = urllib.parse.urlencode(params)\n response = make_response(redirect(spotify_authorize_url + query_params))\n return response",
"def authenticate():\n with open(APP_KEYS_FILE) as f:\n app_keys = json.load(f)\n storage = Storage(USER_OAUTH_DATA_FILE)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(\n OAuth2WebServerFlow(\n client_id=app_keys['APP_CLIENT_ID'],\n client_secret=app_keys['APP_CLIENT_SECRET'],\n scope=['https://www.googleapis.com/auth/reminders'],\n user_agent='google reminders cli tool'),\n storage,\n )\n auth_http = credentials.authorize(httplib2.Http())\n return auth_http",
"def get_spotify_authtoken(client_id, client_secret, scope, refresh_token=None,\n redirect_uri=\"https://example.com/callback\"):\n\n # If refresh token has been passed in, try to use it to generate a\n # new auth_token.\n\n if refresh_token:\n # Setup Base64 Client Secret to Send\n secret = f\"{client_id}:{client_secret}\"\n b64_secret = base64.b64encode(bytes(secret, \"utf-8\")).decode(\"utf-8\")\n\n body = {\"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token}\n auth_url = \"https://accounts.spotify.com/api/token\"\n auth_header = {\"Authorization\": f\"Basic {b64_secret}\"}\n\n res = requests.post(auth_url, data=body, headers=auth_header)\n\n auth_token = res.json()[\"access_token\"]\n try:\n refresh_token = res.json()[\"refresh_token\"]\n except Exception:\n refresh_token = None\n\n # If no refresh token is available, generate a new auth_token by\n # prompting the user to login and authorise the application.\n\n else:\n auth_url = f\"https://accounts.spotify.com/authorize?client_id={client_id}&response_type=code&redirect_uri={redirect_uri}&scope={scope}\"\n\n # Setup Browser\n opts = Options()\n opts.add_argument('--no-sandbox')\n browser = Chrome(\"./chromedriver/chromedriver\", options=opts)\n\n # Go to auth page, sign-in and wait for code to be returned\n browser.get(auth_url)\n WebDriverWait(browser, 60).until(EC.url_contains(redirect_uri))\n\n # Pull auth code from redirect_uri & close browser\n code = browser.current_url.split(\"code=\")[1].split(\"#\")[0]\n browser.close()\n\n # Step 2: Auth Token\n\n body = {\"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": redirect_uri,\n \"client_id\": client_id,\n \"client_secret\": client_secret}\n auth_url = \"https://accounts.spotify.com/api/token\"\n res = requests.post(auth_url, data=body)\n auth_token = res.json()[\"access_token\"]\n try:\n refresh_token = res.json()[\"refresh_token\"]\n except Exception:\n refresh_token = None\n\n return (auth_token, refresh_token)",
"def _do_authenticate(self, http_client):\n ks_kwargs = {\n 'username': self.opts.get('username'),\n 'password': self.opts.get('password'),\n 'tenant_name': self.opts.get('tenant_name'),\n 'auth_url': self.opts.get('auth_url'),\n }\n self._http_client = http_client\n self._ksclient = ksclient.Client(**ks_kwargs)",
"def authenticate_client(self):\n\n success = False\n prefix = self.build_endpoint_prefix()\n endpoint = prefix + \"/oauth/access_token?grant_type=client_credentials\"\n combined = str(self.key_id) + ':' + str(self.secret)\n message_bytes = combined.encode('utf-8')\n base64_bytes = base64.b64encode(message_bytes)\n base64_message = base64_bytes.decode('utf-8')\n headers = {\"Authorization\": str(\"Basic \" + base64_message)}\n max_tries = 5\n for _ in range(max_tries):\n token, scope = self.get_auth_token(endpoint, headers)\n if token == \"BAD\":\n # Add message for IP restrictions\n exc_msg = \"Invalid credentials- can not obtain session token.\"\n raise CloudPassageAuthentication(exc_msg)\n if token is not None:\n self.auth_token = token\n self.auth_scope = scope\n success = True\n break\n else:\n time.sleep(1)\n return success",
"def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth",
"def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth",
"def get_spotify_token(self):\n scope = \"playlist-modify-public playlist-modify-private user-read-email user-library-modify playlist-read-private\"\n token = spotipy.util.prompt_for_user_token(\n username=self.username,\n scope=scope,\n client_id=secrets.client_id,\n client_secret=secrets.client_secret,\n redirect_uri=secrets.redirect_uri\n )\n sp = spotipy.Spotify(auth=token)\n return sp",
"def get_spotify(s_creds, usernum):\n # Authorize Spotify\n\n token = spotipy.util.prompt_for_user_token(\n s_creds[\"usernames\"][usernum],\n s_creds[\"scopes\"],\n s_creds[\"client_id\"],\n s_creds[\"client_secret\"],\n s_creds[\"redirect_uri\"],\n )\n\n return spotipy.Spotify(auth=token)",
"def get_token():\n\theaders = {\n\t\t'Authorization': 'Basic ' + (base64.b64encode((client_id + ':' + client_secret).encode(\"utf-8\"))).decode(\"utf-8\")}\n\toptions = {\n\t\t'grant_type': 'client_credentials',\n\t\t'json': True,\n\t}\n\n\tresponse = requests.post(\n\t\t'https://accounts.spotify.com/api/token',\n\t\theaders=headers,\n\t\tdata=options\n\t)\n\tif response.status_code == 200:\n\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\taccess_token = content.get('access_token', None)\n\t\treturn access_token\n\telse:\n\t\treturn None",
"def open(self):\n auth = {'user': self._username, 'password': self._password}\n status, data = self.post('credentials', body=auth)\n if status == 201:\n # 201 (created) => Session succefully created\n self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']})\n self._key = data['key']\n elif status == 403:\n # 403 (forbidden) => Wrong user or password\n raise AuthError('Cannot connect to StoreServ. '\n 'Authentification error: %s', data['desc'])",
"def auth(client):\n return AuthActions(client)",
"def _authenticate(self):\n # Check if token expired\n if self._token_expire and self._token_expire < _time():\n self._token = ''\n\n # Get OAuth2 token\n if not self._token:\n # Get user credentials\n credentials = json_read(get_accelize_cred())\n client_id = credentials['client_id']\n client_secret = credentials['client_secret']\n\n # Endpoint override in credentials file\n self._endpoint = credentials.get('endpoint', self._ENDPOINT)\n\n # Try to get CLI cached token\n try:\n self._token, self._token_expire = get_cli_cache(client_id)\n\n # Try to get token from web service\n except TypeError:\n response = self._request(\n 'post', f'{self._endpoint}/o/token/',\n data={\"grant_type\": \"client_credentials\"},\n auth=(client_id, client_secret),\n timeout=self._TIMEOUT)\n\n if response.status_code >= 300:\n raise _AuthenticationException(\n 'Unable to authenticate client ID starting by '\n f'\"{client_id[:10]}\": '\n f'{self._get_error_message(response)}')\n\n access = response.json()\n self._token = access['access_token']\n self._token_expire = int(_time()) + access['expires_in'] - 1\n\n # Cache token value for future CLI usage\n set_cli_cache(client_id, [self._token, self._token_expire],\n self._token_expire)",
"def authenticate(self):\n\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'ClientId': self.client_id,\n }\n self.headers.update(headers)",
"def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']",
"def authenticate(credentials):",
"def authenticate():\n auth = OAuthHandler(config.TW_API_KEY, config.TW_API_SECRET)\n auth.set_access_token(config.TW_ACC_TOKEN, config.TW_ACC_SECRET)\n\n return auth",
"def client():\n return Client(**common_data.AUTH_ARGS)",
"def _authenticate(self):\n if self.creds().consumer_key() is None or \\\n self.creds().app_secret() is None:\n self.logger.error(\"You need a consumer key and app secret, yo\")\n else:\n self._access_token = self._request_access_token()",
"def authenticate(self):\n auth = tw.OAuthHandler(self.consumer_key, self.consumer_secret)\n auth.set_access_token(self.access_token, self.access_secret)\n return tw.API(auth)",
"def authenticate(self, username, password, consumerKey, consumerSecret):\r\n pass"
]
| [
"0.74530387",
"0.6852122",
"0.6674616",
"0.66679966",
"0.66116095",
"0.65835315",
"0.644641",
"0.64427465",
"0.61845005",
"0.61197406",
"0.60306585",
"0.6000339",
"0.593765",
"0.593331",
"0.58841735",
"0.58841735",
"0.58746624",
"0.5839965",
"0.581103",
"0.5800786",
"0.5789935",
"0.578191",
"0.5752413",
"0.57316655",
"0.5728276",
"0.5705102",
"0.56995046",
"0.56749946",
"0.5670753",
"0.5669097"
]
| 0.77174217 | 0 |
Function to initialize the lyrics_extractor class and to authenticate the google custom search engine. | def authenticate_extract_lyrics(GCS_API_KEY, GCS_ENGINE_ID):
# Initialize lyrics_extractor class
return SongLyrics(GCS_API_KEY, GCS_ENGINE_ID) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, name='google'):\n self.engine_info = filter(lambda x: 'NAME' in x.keys() and x['NAME'] is name, SMARTSEARCH_AVAILABLE_ENGINES)[0]\n self.connection = build('customsearch', 'v1', developerKey=self.engine_info['GOOGLE_SITE_SEARCH_API_KEY'])",
"def __init__(self):\n # keys and tokens from the Twitter Dev Console\n key = provide_keys('males')\n\n consumer_key = key['consumer_key']\n consumer_secret = key['consumer_secret']\n access_token = key['access_token_key']\n access_token_secret = key['access_token_secret']\n\n # attempt authentication\n\n # create OAuthHandler object\n self.auth = OAuthHandler(consumer_key, consumer_secret)\n\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n\n try:\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n\n except:\n print(\"Error: Authentication Failed\")\n sys.exit(-1)",
"def __init__(self, email = None, password = None):\n\t\t\n\t\tif email is None:\n\t\t\temail = raw_input(\"Please enter your Google Account username: \")\n\t\tif password is None:\n\t\t\timport getpass\n\t\t\tpassword = getpass.getpass(\"Please enter your Google Account password: \")\n\n\t\t# Set up our opener\n\t\tself.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())\n\t\turllib2.install_opener(self.opener)\n\n\t\t# Define URLs\n\t\tself.login_page_url = 'https://accounts.google.com/ServiceLogin?service=grandcentral'\n\t\tself.authenticate_url = 'https://accounts.google.com/ServiceLoginAuth?service=grandcentral'\n\t\tself.gv_home_page_url = 'https://www.google.com/voice/#inbox'\n\t\tself.contacts_url = 'https://www.google.com/voice/c/u/{0}/ui/ContactManager'\n\n\t\t# Load sign in page\n\t\tlogin_page_contents = self.opener.open(self.login_page_url).read()\n\n\t\t# Find GALX value\n\t\tgalx_match_obj = re.search(r'name=\"GALX\"\\s*type=\"hidden\"\\n\\s*value=\"([^\"]+)\"', login_page_contents, re.IGNORECASE)\n\n\t\tgalx_value = galx_match_obj.group(1) if galx_match_obj.group(1) is not None else ''\n\n\t\t# Set up login credentials\n\t\tlogin_params = urllib.urlencode({\n\t\t\t'Email' : email,\n\t\t\t'Passwd' : password,\n\t\t\t'continue' : 'https://www.google.com/voice/account/signin',\n\t\t\t'GALX': galx_value\n\t\t})\n\n\t\t# Login\n\t\tself.opener.open(self.authenticate_url, login_params)\n\n\t\t# Open GV home page\n\t\tgv_home_page_contents = self.opener.open(self.gv_home_page_url).read()\n\n\t\t# Fine _rnr_se value\n\t\tkey = re.search('name=\"_rnr_se\".*?value=\"(.*?)\"', gv_home_page_contents)\n\n\t\tif not key:\n\t\t\tself.logged_in = False\n\t\telse:\n\t\t\tself.logged_in = True\n\t\t\tself.key = key.group(1)\n\t\t\t\n\t\t\tusername = email.split('@')[0]\n\t\t\tcontacts_content = self.opener.open(self.contacts_url.format(username)).read()\n\t\t\ttok_match_obj = re.search(r\"var\\s+tok\\s*=\\s*'([^']+)'\", contacts_content, re.IGNORECASE)\n\t\t\t\n\t\t\tself.contact_tok = tok_match_obj.group(1) if tok_match_obj.group(1) is not None else ''",
"def __init__(self, username, password):\n self.login_params = {\n 'continue': 'http://www.google.com/trends',\n 'PersistentCookie': 'yes',\n 'Email': username,\n 'Passwd': password}\n # provide fake user agent to look like a desktop browser\n self.fake_ua = UserAgent()\n self.headers = [\n ('Referrer', 'https://www.google.com/accounts/ServiceLoginBoxAuth'),\n ('Content-type', 'application/x-www-form-urlencoded'),\n ('User-Agent', self.fake_ua.chrome),\n ('Accept', 'text/plain')]\n self.url_ServiceLoginBoxAuth = 'https://accounts.google.com/ServiceLoginBoxAuth'\n self.url_Export = 'http://www.google.com/trends/trendsReport'\n self.url_CookieCheck = 'https://www.google.com/accounts/CheckCookie?chtml=LoginDoneHtml'\n self.url_PrefCookie = 'http://www.google.com'\n self._connect()",
"def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)",
"def __init__(self):\n self.gdc = GdocsCrawler()",
"def __init__(self, credentials):\n http = credentials.authorize(httplib2.Http())\n self.service = googleapiclient.discovery.build(\"drive\", \"v2\", http=http)",
"def __init__(self, **kwargs):\n self._username = kwargs.get('username', current_app.config.get('WORDAI_API_EMAIL', None))\n self._password = kwargs.get('password', current_app.config.get('WORDAI_API_PASSWORD', None))\n self._hash = kwargs.get('hash', current_app.config.get('WORDAI_API_KEY', None))",
"def __init__(self):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)",
"def __init__(self, client_access_token, artist_name):\n self.client_access_token = client_access_token\n self.artist_name = artist_name\n self.base_url = 'https://api.genius.com/'\n self.headers = {'Authorization': 'Bearer ' + self.client_access_token}\n self.artist_songs = None",
"async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return",
"def __init__(self, connector):\n self.con = connector\n self.words = self.con.getWordsAPI()\n self.serverSettings = {}\n self.custom = {}\n self.ignored = {}\n self.whitelist = {}\n self.requestLog = []\n self.retrieveGuildsInfo()\n self.retrieveCustomWords()\n self.retrieveIgnoredWords()\n self.retrieveWhitelist()",
"def setup(self):\n # Load application default credentials if they're available.\n self.credentials = self._load_application_default_credentials()\n\n # Otherwise, load credentials from the provided client secrets file.\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the Credentials tab on the Google Developers Console.\n self.client_secrets = os.path.join(os.path.dirname(__file__),\n self.client_secrets)\n\n credential_store_file = os.path.join(os.path.dirname(__file__),\n self.credential_store_file)\n\n storage = oauthFile.Storage(credential_store_file)\n\n if self.credentials is None or self.credentials.invalid:\n self.credentials = self._load_user_credentials(storage)\n\n # Authorize HTTP object with the prepared credentials.\n http = self.credentials.authorize(http=httplib2.Http())\n\n # Construct and return a service object via the discovery service.\n self.service = discovery.build(self.api_name, self.api_version, http=http)\n return self.service",
"def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))",
"def __init__(self,\n auth: str = None,\n user: str = None,\n requests_session=True,\n proxies: dict = None,\n language: str = 'en',\n location: str = ''):\n self.auth = auth\n self.input_dict = None\n self.is_oauth_auth = False\n\n if isinstance(requests_session, requests.Session):\n self._session = requests_session\n else:\n if requests_session: # Build a new session.\n self._session = requests.Session()\n self._session.request = partial(self._session.request, timeout=30)\n else: # Use the Requests API module as a \"session\".\n self._session = requests.api\n\n self.proxies = proxies\n self.cookies = {'CONSENT': 'YES+1'}\n if self.auth is not None:\n input_json = load_headers_file(self.auth)\n self.input_dict = CaseInsensitiveDict(input_json)\n self.input_dict['filepath'] = self.auth\n self.is_oauth_auth = is_oauth(self.input_dict)\n\n self.headers = prepare_headers(self._session, proxies, self.input_dict)\n\n if 'x-goog-visitor-id' not in self.headers:\n self.headers.update(get_visitor_id(self._send_get_request))\n\n # prepare context\n self.context = initialize_context()\n\n if location:\n if location not in SUPPORTED_LOCATIONS:\n raise Exception(\"Location not supported. Check the FAQ for supported locations.\")\n self.context['context']['client']['gl'] = location\n\n if language not in SUPPORTED_LANGUAGES:\n raise Exception(\"Language not supported. Supported languages are \"\n + (', '.join(SUPPORTED_LANGUAGES)) + \".\")\n self.context['context']['client']['hl'] = language\n self.language = language\n try:\n locale.setlocale(locale.LC_ALL, self.language)\n except locale.Error:\n with suppress(locale.Error):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n locale_dir = os.path.abspath(os.path.dirname(__file__)) + os.sep + 'locales'\n self.lang = gettext.translation('base', localedir=locale_dir, languages=[language])\n self.parser = Parser(self.lang)\n\n if user:\n self.context['context']['user']['onBehalfOfUser'] = user\n\n auth_header = self.headers.get(\"authorization\")\n self.is_browser_auth = auth_header and \"SAPISIDHASH\" in auth_header\n if self.is_browser_auth:\n try:\n cookie = self.headers.get('cookie')\n self.sapisid = sapisid_from_cookie(cookie)\n except KeyError:\n raise Exception(\"Your cookie is missing the required value __Secure-3PAPISID\")",
"def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])",
"def __init__(self, zk_client):\n self._settings = SearchServiceSettings(zk_client)\n self.solr = SolrAPI(zk_client, SOLR_ZK_ROOT, self._settings)",
"def __init__(self):\r\n\t\t\r\n\t\tself.redis = redis.Redis()\r\n\t\tself.info_to_get = ['text', 'created_at', 'user']\r\n\t\tself.search_results = {}\r\n\t\tself.raw_data_directory_name = \"raw_mining_data\"\r\n\t\tself.filtered_data_directory_name = \"filtered_mining_data\"\r\n\t\tenglish_file = pjoin( sys.path[0], \"sentiment_word_files\", \"Nielsen2010Responsible_english.csv\")\r\n\t\tself.analyzeEnglish = dict(map(lambda (w,e): (w, int(e)), \\\r\n\t\t\t\t\t\t\t\t\t[ line.strip().lower().split('\\t') for line in open(english_file) ]))\r\n\t\tself.tweets_count = 0",
"def __init__(self):\n self.service = Client(key=GEO_LOCATION_API_KEY)",
"def __init__(self, store=None):\n\n if not store:\n store = sys.argv[0]\n\n self.current_page = None\n self.channel_id = ''\n self.playlist_id = ''\n\n self.current_results_file = os.path.join(store, \"old_results.json\")\n if os.path.exists(self.current_results_file):\n with open(self.current_results_file, \"r\") as f:\n results = json.load(f)\n self.current_page = results[\"current_page\"]\n self.playlist_id = results[\"playlist_id\"]\n\n flow = client.flow_from_clientsecrets(CLIENT_SECRETS_FILE, message=MISSING_MESSAGE,\n scope=SCOPE)\n\n storage = Storage(os.path.join(store, \"-oauth2.json\"))\n\n credentials = storage.get()\n\n if not credentials or credentials.invalid:\n flags = argparser.parse_args()\n credentials = run_flow(flow, storage, flags)\n\n self.youtube = build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http()))\n self.get_ids_request = None",
"def setup_class(cls):\n initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)",
"def __init__(self, username, password): \n self.login_params = {\n 'GA3T': _random_id(11), # unique identifiers for session\n 'GALX': _random_id(11), # unique identifiers for session\n \"continue\": 'http://www.google.com/trends',\n 'nui': '1',\n 'hl': 'en-US',\n 'rmShown': '1',\n \"PersistentCookie\": \"yes\",\n \"Email\": username,\n \"Passwd\": password,\n 'service': 'trends' \n }\n self.headers = [(\"Content-type\", \"application/x-www-form-urlencoded\"),\n ('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'),\n (\"Accept\", \"text/plain\")]\n self.url_ServiceLoginBoxAuth = 'https://www.google.com/accounts/ServiceLoginBoxAuth'\n self.url_Export = 'http://www.google.com/trends/viz'\n self.header_dictionary = {}\n self._connect()",
"def __init__(self, query, title, link, subtext, searchterms, scripts):\n self.search_query = query\n self.title = title\n self.link = link\n self.subtext = subtext\n self.searchterms = searchterms\n self.link_scripts = scripts",
"def __init__(self):\n\n # TODO: Add login and data grab logic",
"def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()",
"def __init__(self):\n\n\t\tself.account_sid = os.environ['TWILIO_ACCOUNT_SID']\n\t\tself.auth_token = os.environ['TWILIO_AUTH_TOKEN']\n\t\tself.twilio_phone_number = os.environ['TWILIO_PHONE_NUMBER']\n\t\tself.client = Client(self.account_sid, self.auth_token)\n\n\t\tself.call_domain = 'http://twimlets.com/echo?Twiml='",
"def __init__(self, recipe_url):\n self.recipe_url = recipe_url\n self.translator = TranslationClient()",
"def __init__(self, username = None, password = None):\n self.username = config['AUTH']['USERNAME']\n self.password = config['AUTH']['PASSWORD']\n self.login = config['URL']['LOGIN']\n self.nav_url = config['URL']['NAV']\n self.tag_url = config['URL']['TAGS']\n self.direct_url = config['URL']['DM']\n self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER'])\n self.stay_logged = False\n self.api = InstagramAPI(self.username, self.password)",
"def __init__(self, twitter_consumer_key, twitter_consumer_secret,\n twitter_access_key, twitter_access_secret,\n search_terms, search_on='news',\n bitly_access_token='',\n news_api_key=''):\n\n # Access Keys and Secrets for Twitter API obtained at: https://developer.twitter.com/\n auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)\n auth.set_access_token(twitter_access_key, twitter_access_secret)\n\n # Store API object for access to Twitter REST API\n self.__api = tweepy.API(auth)\n\n # Term(s) to search news feeds or Twitter on\n self.search_terms = search_terms\n\n # Method TwitterBot will use to search on. Current options are 'news' or 'twitter'\n self.search_on = search_on\n\n # Access token for optional Bitly API: https://dev.bitly.com/\n self.__bitly_access_token = bitly_access_token\n\n # Access token for optional News API: https://newsapi.org/\n self.__news_api_key = news_api_key\n\n # Will store list of items scraped from news or Twitter\n self.list = []",
"def setUp(self):\n self.twitter = Twitter(CUR_DIR + \"/test_crossfit.tweets\", CUR_DIR + \"/test_stop_words.txt\")\n self.twitter.load_tweets_and_build_index()\n\n self.searcher = Searcher(self.twitter.tweets, self.twitter.stop_words)"
]
| [
"0.6703985",
"0.62430346",
"0.60308164",
"0.6027884",
"0.5980706",
"0.59597474",
"0.59042263",
"0.5898735",
"0.5815421",
"0.5810548",
"0.58052367",
"0.57967937",
"0.5794903",
"0.5770148",
"0.5767109",
"0.5728078",
"0.5684845",
"0.5671809",
"0.5668664",
"0.56507987",
"0.5641719",
"0.56317705",
"0.5617409",
"0.5594812",
"0.55944943",
"0.55936235",
"0.5552783",
"0.55403507",
"0.55397195",
"0.5539502"
]
| 0.7254119 | 0 |
Function to get the album image urls from tracks with the Spotify API, given a list of track id's. | def get_img_urls(track_ids, sp):
# Get a list with track information using a list of track id's
tracks = sp.tracks(track_ids)
# Initialize list to append image urls to
img_urls = []
for i in range(len(tracks['tracks'])):
images = tracks['tracks'][i]['album']['images']
seq = [x['height'] for x in images]
img = next(item for item in images if item['height'] == min(seq))
img_url = img['url']
img_urls.append(img_url)
return img_urls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list",
"def fetchAlbumIds(artist_id):\n url = 'https://api.spotify.com/v1/artists/' + artist_id + '/albums?market=US&album_type=album'\n req = requests.get(url)\n\n data = req.json()\n\n #checking for bad return value\n if not req.ok:\n print \"error : \" + data['error']['message']\n return \"error : \" + data['error']['message']\n\n albums = []\n for item in data['items']:\n \talbums.append(item['id'])\n\n return albums",
"def search_for_tracks(album_id):\n \n track_results = spotifyObject.album_tracks(album_id)\n track_results = track_results['items']\n ids = [track['id'] for track in track_results]\n\n return ids",
"def get_tracks_from(album_uri):\n track_uris = []\n album = spotify.album_tracks(album_id=album_uri)\n\n for track in album['items']:\n track_uris.append(track['uri'])\n\n return track_uris",
"def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df",
"def albums(self, albums, **kwargs):\n album_list = map(self._get_album_id, albums)\n return self._get(API.ALBUMS.value, ids=\",\".join(album_list), **kwargs)",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def parse_tracks(self, tracks_json: list):\n tracks = []\n for track in tracks_json:\n track_parsed = {\n 'id': track['id'],\n 'name': track['name'],\n }\n track_parsed['description'] = self.parse_description(track)\n track_parsed['image'] = track['album']['images'][0]['url']\n tracks.append(track_parsed)\n\n return tracks",
"def get_album_tracks(self):\n track_list = self.soup.findAll('div', class_='chart_row')\n number_of_tracks = 0\n titles = []\n urls = []\n track_numbers = []\n \n for track in track_list:\n track_title = re.sub(' Lyrics', '', \" \".join(track.h3.text.split()))\n lyrics_url = track.a['href']\n track_number = track.span.span.text.strip()\n \n if track_number == '':\n # Sometimes there are additional urls that are not a song's lyrics. Skip these.\n continue\n else:\n track_number = int(track_number)\n \n number_of_tracks += 1\n titles.append(track_title)\n urls.append(lyrics_url)\n track_numbers.append(track_number)\n \n if self.song_order:\n # Check that order values are okay.\n for number in self.song_order:\n if number > number_of_tracks:\n raise SongOrderValueError(f'Track number given ({number}) exceeds number of tracks ({number_of_tracks})')\n \n for title, url, number in zip(titles, urls, track_numbers):\n if self.song_order:\n if number not in self.song_order:\n print(f'Skipping song: {number:02d} {title}')\n continue\n \n lyrics = self.get_single_lyrics(url)\n self.album.add_song(Song(title=title, track_number=number, lyrics=lyrics))\n\n self.album.number_of_tracks = number_of_tracks",
"def playlist_track_ids(playlist_id, authorizer, verbose=False):\n spotify_endpoint = f'https://api.spotify.com/v1/playlists/{playlist_id}/tracks'\n params = {'fields':'items(track(id)),next,total'} # only get id's of tracks, and total number of tracks in playlist\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n\n tracks = None\n index = 0\n \n # stops when no more pages left\n while spotify_endpoint:\n response = requests.get(spotify_endpoint, params=params, headers=headers)\n\n if response.status_code == 200:\n data = response.json()\n \n # allocate array for tracks\n if tracks is None:\n tracks = [''] * data['total']\n \n # add tracks to array\n for track in data['items']:\n i = track['track']['id']\n tracks[index] = i\n index += 1\n\n # move forward in paging\n spotify_endpoint = data['next']\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text))\n return None\n\n return [t for t in tracks if t is not None] # filter out null tracks",
"def read_album_tracks(id, artist_name, album_name):\n list_a = [x.name for x in dmla.list_tracks(id)]\n list_c = [x['title'] for x in dmlc.list_tracks_for_album(artist_name, album_name)\n if x['track'] != -1]\n return list_a, list_c",
"def get_tracks(self):\n artist = self.get_request_arg(\"artist\")\n album = self.get_request_arg(\"album\")\n if not (album and artist):\n return self.resp_from_data(\n {\"message\": \"Please specify a valid artist and album\"}, 403)\n else:\n tracks = self.ctrl.library.get_tracks(artist, album)\n return self.resp_from_data(tracks)",
"def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums",
"def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)",
"def tracked_albums():\n print('Your Google Photos Albums ([X] = tracked):')\n albums = get_albums(service)\n for i, a in enumerate(albums):\n check = 'X' if a.id in library.get_album_ids() else ' '\n print('[{}] {}. {}'.format(check, i+1, a.title))\n return albums",
"def fetch_song_data(self, song_ids):\n\t\ttracks_base_url = \"https://api.spotify.com/v1/tracks\"\n\t\theaders = {}\n\t\ttrack_ids = ','.join(song_ids)\n\t\tquery_params = \"/?ids=\"+track_ids\n\t\ttracks_url = tracks_base_url + query_params\n\t\ttracks={}\n\t\theaders['Authorization'] = f\"Bearer {self.token}\"\n\n\t\ttry:\n\t\t\treq = request.Request(url=tracks_url,data=None, headers=headers)\n\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\ttracks = json.loads(response)\n\t\t\tlogging.info(\"Successfully fetched songs from Spotify!\")\n\t\texcept error.URLError as e:\n\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\tlogging.error(response)\n\t\treturn tracks",
"def tracks(self, tracks, market=\"from_token\", **kwargs):\n track_list = map(self._get_track_id, tracks)\n return self._get(\n API.TRACKS.value, ids=\",\".join(track_list), market=market, **kwargs\n )",
"def test_get_songs_by_album_id(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('37cac253-2bca-4a3a-be9f-2ac655e04ad8'))\n service_config.track_store.get_by_album.return_value = track_elms\n request.args['album_id'] = six.text_type(album_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)",
"def cmd_album_images(client, args):\n album_images = client.get_album_images(args.album_id)\n data = [item.__dict__ for item in album_images]\n generate_output({'album_images': data}, args.output_file)",
"def get_track_info(track_id):\n items = spotify.track(track_id)\n name = items[\"name\"]\n artists_names = \", \".join([\n items[\"artists\"][x][\"name\"]\n for x in range(len(items[\"artists\"]))\n ])\n album_artists = \", \".join([\n items[\"album\"][\"artists\"][x][\"name\"]\n for x in range(len(items[\"album\"][\"artists\"]))\n ])\n album_type = items[\"album\"][\"album_type\"]\n album_name = items[\"album\"][\"name\"]\n album_release = items[\"album\"][\"release_date\"]\n album_track_number = items[\"track_number\"]\n track_duration = items[\"duration_ms\"]\n images_link = items[\"album\"][\"images\"]\n max_image_res = 0\n max_icon_size = 0\n image_link = \"\"\n icon_link = \"\"\n for image in images_link:\n if image[\"height\"] * image[\"width\"] > max_image_res:\n image_link = image[\"url\"]\n max_image_res = image[\"height\"] * image[\"width\"]\n if image[\"height\"] < 400:\n if image[\"height\"] > max_icon_size:\n max_icon_size = image[\"height\"]\n icon_link = image[\"url\"]\n track = {\"name\": name,\n \"Artist(s)\": artists_names,\n \"Album Artist(s)\": album_artists,\n \"Album Type\": album_type,\n \"Album Name\": album_name,\n \"Album Release\": album_release,\n \"Track Number\": album_track_number,\n \"Track Duration (ms)\": track_duration,\n \"Image Link\": image_link,\n \"Icon Link\": icon_link\n }\n\n for artist in artists_names.split(', '):\n \"\"\"\n Checks for lyrics with song name and artist names\n combination until one is found.\n \"\"\"\n try:\n lyrics = lyricwikia.get_lyrics(artist, name)\n track['lyrics'] = lyrics\n break\n except lyricwikia.LyricsNotFound:\n pass\n\n return track",
"def test_get_s_track_artists(self):\r\n sp_track = spotify.SpAlbumTrack(munch.munchify({'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/25BObiRSDCMwVrBGIVaLIf'}, 'href': 'https://api.spotify.com/v1/artists/25BObiRSDCMwVrBGIVaLIf', 'id': '25BObiRSDCMwVrBGIVaLIf', 'name': 'James K', 'type': 'artist', 'uri': 'spotify:artist:25BObiRSDCMwVrBGIVaLIf'}, {'external_urls': {'spotify': 'https://open.spotify.com/artist/1g80vffuPrdapR6S4WyxN3'}, 'href': 'https://api.spotify.com/v1/artists/1g80vffuPrdapR6S4WyxN3', 'id': '1g80vffuPrdapR6S4WyxN3', 'name': 'Eve Essex', 'type': 'artist', 'uri': 'spotify:artist:1g80vffuPrdapR6S4WyxN3'}], 'available_markets': ['AD', 'AE', 'AR', 'AT', 'AU', 'BE', 'BG', 'BH', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'DZ', 'EC', 'EE', 'EG', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN',\r\n 'HU', 'ID', 'IE', 'IL', 'IN', 'IS', 'IT', 'JO', 'JP', 'KW', 'LB', 'LI', 'LT', 'LU', 'LV', 'MA', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'OM', 'PA', 'PE', 'PH', 'PL', 'PS', 'PT', 'PY', 'QA', 'RO', 'SA', 'SE', 'SG', 'SK', 'SV', 'TH', 'TN', 'TR', 'TW', 'US', 'UY', 'VN', 'ZA'], 'disc_number': 1, 'duration_ms': 260446, 'explicit': False, 'external_urls': {'spotify': 'https://open.spotify.com/track/1mh4GpKKrmlaUkVzoNqhRt'}, 'href': 'https://api.spotify.com/v1/tracks/1mh4GpKKrmlaUkVzoNqhRt', 'id': '1mh4GpKKrmlaUkVzoNqhRt', 'is_local': False, 'name': 'Stretch Deep - feat. Eve Essex', 'preview_url': 'https://p.scdn.co/mp3-preview/ebb7e70b97a5d29e05044a1f920d1fc594f92b26?cid=ea3ef49a097b42d682d3c7bc98832d65', 'track_number': 12, 'type': 'track', 'uri': 'spotify:track:1mh4GpKKrmlaUkVzoNqhRt'}))\r\n expected = ['Eve Essex', 'James K']\r\n actual = gmspotify.get_sp_track_artists(sp_track)\r\n self.assertEqual(actual, expected)",
"def pic (self, list) : \n result = []\n for pmod in list :\n result.append (pmod.photo_uri)\n return result",
"def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)",
"def get_albums_from(artist_uri):\n album_uris = []\n results = spotify.artist_albums(artist_uri, album_type='album')\n albums = results['items']\n # get URIs for each album\n for album in albums:\n album_uris.append(album['uri'])\n\n return album_uris",
"def get_track_ids_of_playlist(self, playlist_id):\n def get_playlist_data(url):\n req = requests.get(url, headers=self.__header_bearer())\n return req.json() if req.status_code == 200 else False\n\n track_uris = []\n\n endpoint = f'/playlists/{playlist_id}/tracks'\n url = f'{self.api_base_url}{endpoint}'\n\n playlist_data = get_playlist_data(url)\n while True:\n if not playlist_data:\n break\n\n for track in playlist_data['items']:\n track_uris.append(track['track']['uri'])\n\n if not playlist_data['next']:\n break\n else:\n time.sleep(0.5)\n playlist_data = get_playlist_data(playlist_data['next'])\n return track_uris",
"def album_list(self):\n\n artist_id = self.addon_args[\"artist_id\"][0]\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_artist(artist_id):\n self.add_album(album)\n\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_UNSORTED)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ALBUM)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ARTIST)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n\n xbmcplugin.endOfDirectory(self.addon_handle)",
"def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))",
"def album_tracks(self, album_id, limit=50, offset=0, **kwargs):\n _id = self._get_album_id(album_id)\n # pylint: disable=no-member\n return self._get(\n API.ALBUM_TRACKS.value.format(id=_id), limit=limit, offset=offset, **kwargs\n )",
"def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj"
]
| [
"0.69385874",
"0.68852514",
"0.6544861",
"0.64002246",
"0.63731915",
"0.6324169",
"0.63051",
"0.6283222",
"0.61360055",
"0.6056502",
"0.6027224",
"0.59959334",
"0.5973237",
"0.59613895",
"0.5957166",
"0.5903311",
"0.5832611",
"0.58309025",
"0.58115005",
"0.5808388",
"0.58061486",
"0.5802211",
"0.5757172",
"0.57514656",
"0.5724534",
"0.57087237",
"0.56965154",
"0.56800497",
"0.5665905",
"0.56600535"
]
| 0.8094781 | 0 |
Function to make a radar chart of the audio features of a song. | def radar_chart(song, dataset):
# Reset the index of the song dataframe
song = song.reset_index(drop = True)
# Normalize the audio features of the song using the audio features of the database.
song['tempo_norm'] = (song['tempo'] - dataset['tempo'].min())/(dataset['tempo'].max()- dataset['tempo'].min())
song ['loudness_norm'] = 10 ** (song['loudness']/20)
# Only keep the audio features of the song
song = song[['danceability','energy','speechiness','acousticness','instrumentalness','liveness','valence','tempo_norm', 'loudness_norm']]
song = song.rename({'tempo':'tempo normalized', 'loudness': 'loudness normalized'},axis=1)
song = song.T
song = song.reset_index()
# Create radar chart
fig = px.line_polar(song, r = 0, theta = 'index', line_close = True)
fig.update_traces(fill = 'toself')
return fig | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def radar_map(song_id):\n c = [\"acousticness\", \"danceability\", \"energy\", \"valence\"] # Columns to Show\n N = len(c)\n values=df[df[\"track_id\"] == song_id].iloc[0][c].tolist()\n values += values[:1]\n print(values)\n angles = [n / float(N) * 2 * 3.141 for n in range(N)]\n angles += angles[:1]\n print(angles)\n ax = plt.subplot(111, polar=True)\n plt.xticks(angles[:-1], c, color='grey', size=8)\n plt.yticks([], [], color=\"grey\", size=7)\n ax.set_rlabel_position(0)\n ax.plot(angles, values, linewidth=1, linestyle='solid')\n ax.fill(angles, values, 'b', alpha=0.1)\n pic_bytes = io.BytesIO()\n plt.savefig(pic_bytes, format=\"png\")\n pic_bytes.seek(0)\n data = base64.b64encode(pic_bytes.read()).decode(\"ascii\")\n plt.clf()\n return \"<img src='data:image/png;base64,{}'>\".format(data)",
"def make_recommendation_ga(playlist):\n tracklist = []\n\n # tracknames = list(playlist['name'])\n print(playlist.head())\n\n track_features = playlist[['danceability', 'energy']]\n # 'speechiness', 'acousticness',\n # 'instrumentalness', 'liveness', 'valence']]\n\n track_features_matrix = track_features.values\n\n path, fitness = ga.genetic_algorithm(track_features_matrix, plot=False)\n\n visualization.plot_path(\n track_features,\n path,\n fitness,\n mode=\"none\",\n keep=True\n )\n\n return tracklist",
"def example():\n station = \"KFTG\"\n product = 'N0Q'\n start = datetime(2020,5,16,0,0)\n\n file_list,LatLonBox = query_radar_data(station,product,start,\n minute_delta=30,hour_delta=0,day_delta=0)\n radar_plot(station,save_path,product,start,file_list,LatLonBox,save=False,show=True,index=0)",
"def build_radar_chart(players, player_attributes, num_of_top_players=10):\n\n df_overall_rating = player_attributes[[\"id\", \"player_api_id\", \"overall_rating\"]]\n df_record = df_overall_rating.sort_values(by=\"overall_rating\", ascending=False)\n df_record.drop_duplicates(\"player_api_id\", \"first\", inplace=True)\n top_n_records = df_record[:num_of_top_players]\n index_headers = list(top_n_records.index)\n\n df_name = players[[\"id\", \"player_api_id\", \"player_name\"]]\n top_n_players = pd.DataFrame()\n for i in range(len(top_n_records)):\n x = top_n_records.player_api_id.iat[i]\n top_n_players.loc[i, \"Name\"] = df_name[\n players.player_api_id == x\n ].player_name.iat[0]\n\n # choose interested features\n radar_info = pd.DataFrame()\n radar_info = player_attributes.iloc[index_headers, :]\n df = radar_info[\n [\n \"overall_rating\",\n \"potential\",\n \"crossing\",\n \"long_passing\",\n \"ball_control\",\n \"reactions\",\n \"long_shots\",\n \"standing_tackle\",\n ]\n ]\n\n categories = list(df)[:]\n N = len(categories)\n fig, ax = plt.subplots(nrows=1, ncols=1)\n fig.set_size_inches(w=10, h=10)\n\n # plot\n for i in range(num_of_top_players):\n values = df.iloc[i].values.flatten().tolist()\n values += values[:1]\n\n # the angle of each axis in the plot\n angles = [n / float(N) * 2 * math.pi for n in range(N)]\n angles += angles[:1]\n\n # Initialise the spider plot\n ax = plt.subplot(111, polar=True)\n\n # Draw one axe per variable + add labels labels yet\n plt.xticks(angles[:-1], categories, color=\"grey\", size=8)\n\n # Draw ylabels\n ax.set_rlabel_position(0)\n plt.yticks([20, 40, 60, 80], [\"20\", \"40\", \"60\", \"80\"], color=\"grey\", size=7)\n plt.ylim(0, 100)\n\n # Plot data\n ax.plot(angles, values, linewidth=1, linestyle=\"solid\")\n\n # Fill area\n ax.fill(angles, values, \"b\", alpha=0.1)\n plt.show()",
"def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath",
"def create_spectrogram_plots(label_folder='electronic_music/Trance_label/Train/', sr=44100, n_mels=128, n_fft=2048,\n hop_length=512, song_duration=180.0, n_classes=4):\n\n # get list of all artists\n labels = os.listdir(label_folder)\n \n fig, ax = plt.subplots(nrows=2, ncols=int(n_classes/2), figsize=(14, 12), sharex=True, sharey=True)\n\n row = 0\n col = 0\n\n # iterate through labels and random songs and plot a spectrogram on a grid\n for label in labels:\n # Randomly select album and song\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n song = random.choice(label_songs)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration)) / 2\n\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=5)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n\n # Plot on grid\n plt.axes(ax[row, col])\n librosa.display.specshow(log_S, sr=sr)\n plt.title(label)\n col += 1\n if col == int(n_classes/2):\n row += 1\n col = 0\n\n fig.tight_layout()",
"def read_radiance_data():\n data_dir = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass'\n dframe = pd.read_csv(data_dir +'/' + 'High_res_radiance_data.csv', delimiter=',')\n\n plt.plot(dframe['Wavelength'], dframe['Response'], 'k.', markersize=1,\n label='SAO 2010 Solar Irradiance Spectrum')\n plt.grid(True, linestyle=':')\n plt.title('Solar Irradiance Vs. Wavelength', fontsize=12)\n plt.xlabel('Wavelength (nm)', fontsize=12)\n plt.ylabel('Solar Irradiance (' + r'$Wm^{-2}nm^{-1}$' +')', fontsize=12)\n plt.legend(loc='best')\n plt.savefig(data_dir +'/'+'SAO_Solar_Irradiance.png', dpi=100)\n plt.close('all')\n return dframe",
"def aspects_radar_plot(self, aspects: list, _testing=False):\n try:\n len(self.aspects_opinions_df)\n except AttributeError:\n self.aspect_opinon_for_all_comments()\n\n sentiment_scores = [self.sentiment_for_one_aspect(i) for i in aspects]\n\n aspects_sentiments = pd.DataFrame(dict(r=sentiment_scores, theta=aspects))\n fig = px.line_polar(aspects_sentiments,\n r='r',\n theta='theta',\n line_close=True,\n title='Sentiment scores of the most common aspects')\n\n if not _testing:\n fig.show()\n else:\n return 'plot finished'",
"def create_radar_chart(num_vars, frame='polygon', **kwargs):\n theta = _theta(num_vars)\n\n def draw_poly_patch(self):\n verts = _unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n # unit circle centered on (0.5, 0.5)\n return plt.Circle((0.5, 0.5), 0.5)\n\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n class RadarAxes(PolarAxes):\n \"\"\"\n Projection class for a radar chart\n \"\"\"\n\n name = 'radar'\n size = num_vars\n # use 1 line segment to connect specified points\n RESOLUTION = 1\n # define draw_frame method\n shape = frame\n draw_patch = patch_dict[frame]\n\n def set_rscale(self, top, bottom=0, round_up=False):\n \"\"\"Scale the radar chart\n If circle chart then this function just sets the ylim of the polar ax.\n If polygon chart then ylim will be set to fit a circle with radius h\n completely inside it (distance from center to midpoint of polygon \n edge will be h.\n \"\"\"\n if self.shape == 'circle':\n r = top\n elif self.shape == 'polygon':\n angle_of_slice = 2 * np.pi / self.size\n r = top / np.cos(angle_of_slice / 2.)\n if round_up:\n r = np.ceil(r)\n else:\n # this should never happen since this is checked for in class\n # creation\n raise ValueError('unknown value for `frame`: %s' % self.shape)\n self.set_ylim(bottom, r)\n\n def fill(self, *args, **kwargs):\n \"\"\"Override fill so that line is closed by default\"\"\"\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\n\n def plot(self, *args, **kwargs):\n \"\"\"Override plot so that line is closed by default\"\"\"\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n # FIXME: markers at x[0], y[0] get doubled-up\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n \"\"\"Label the radial axes\"\"\"\n self.set_thetagrids(np.degrees(theta) % FULL_CIRCLE_DEG, labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n # The following is a hack to get the spines (i.e. the axes frame)\n # to draw correctly for a polygon frame.\n\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\n spine_type = 'circle'\n verts = _unit_poly_verts(theta)\n # close off polygon by repeating first vertex\n verts.append(verts[0])\n path = Path(verts)\n\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n\n register_projection(RadarAxes)\n \n # if subplot_kw argument is given, overwrite projection field\n # TODO: maybe throw error when projection is given?\n if 'subplot_kw' in kwargs:\n kwargs['subplot_kw']['projection'] = 'radar'\n else:\n kwargs['subplot_kw'] = {'projection': 'radar'}\n fig, axes = plt.subplots(**kwargs)\n\n return fig, axes",
"def radar_factory(num_vars, frame='circle'):\r\n # calculate evenly-spaced axis angles\r\n theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)\r\n # rotate theta such that the first axis is at the top\r\n theta += np.pi/2\r\n\r\n def draw_poly_patch(self):\r\n verts = unit_poly_verts(theta)\r\n return plt.Polygon(verts, closed=True, edgecolor='k')\r\n\r\n def draw_circle_patch(self):\r\n # unit circle centered on (0.5, 0.5)\r\n return plt.Circle((0.5, 0.5), 0.5)\r\n\r\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\r\n if frame not in patch_dict:\r\n raise ValueError('unknown value for `frame`: %s' % frame)\r\n\r\n class RadarAxes(PolarAxes):\r\n\r\n name = 'radar'\r\n # use 1 line segment to connect specified points\r\n RESOLUTION = 1\r\n # define draw_frame method\r\n draw_patch = patch_dict[frame]\r\n\r\n def fill(self, *args, **kwargs):\r\n \"\"\"Override fill so that line is closed by default\"\"\"\r\n closed = kwargs.pop('closed', True)\r\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\r\n\r\n def plot(self, *args, **kwargs):\r\n \"\"\"Override plot so that line is closed by default\"\"\"\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)\r\n\r\n def _close_line(self, line):\r\n x, y = line.get_data()\r\n # FIXME: markers at x[0], y[0] get doubled-up\r\n if x[0] != x[-1]:\r\n x = np.concatenate((x, [x[0]]))\r\n y = np.concatenate((y, [y[0]]))\r\n line.set_data(x, y)\r\n\r\n def set_varlabels(self, labels):\r\n self.set_thetagrids(np.degrees(theta), labels)\r\n\r\n def _gen_axes_patch(self):\r\n return self.draw_patch()\r\n\r\n def _gen_axes_spines(self):\r\n if frame == 'circle':\r\n return PolarAxes._gen_axes_spines(self)\r\n # The following is a hack to get the spines (i.e. the axes frame)\r\n # to draw correctly for a polygon frame.\r\n\r\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\r\n spine_type = 'circle'\r\n verts = unit_poly_verts(theta)\r\n # close off polygon by repeating first vertex\r\n verts.append(verts[0])\r\n path = Path(verts)\r\n\r\n spine = Spine(self, spine_type, path)\r\n spine.set_transform(self.transAxes)\r\n return {'polar': spine}\r\n\r\n register_projection(RadarAxes)\r\n return theta",
"def graph_spectrogram(audio_file):\n secs_per_spec = 10\n data, rate = librosa.core.load(audio_file)\n split_data = split_list_by_num_samples(data, rate * secs_per_spec)\n\n random.shuffle(split_data)\n\n # if songs longer than 100 seconds, take the first 10 images, since its shuffled\n if len(split_data) > 10:\n split_data = split_data[:10]\n\n pool = Pool()\n results = [pool.apply_async(audio_sample_to_img, args=(sample, rate, secs_per_spec)) for sample in split_data]\n specs = [p.get() for p in results]\n\n return specs",
"def radar_factory(num_vars, frame='circle'):\n # calculate evenly-spaced axis angles\n theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)\n # rotate theta such that the first axis is at the top\n theta += np.pi/2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n # unit circle centered on (0.5, 0.5)\n return plt.Circle((0.5, 0.5), 0.5)\n\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n class RadarAxes(PolarAxes):\n\n name = 'radar'\n # use 1 line segment to connect specified points\n RESOLUTION = 1\n # define draw_frame method\n draw_patch = patch_dict[frame]\n\n def fill(self, *args, **kwargs):\n \"\"\"Override fill so that line is closed by default\"\"\"\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\n\n def plot(self, *args, **kwargs):\n \"\"\"Override plot so that line is closed by default\"\"\"\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n # FIXME: markers at x[0], y[0] get doubled-up\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n # The following is a hack to get the spines (i.e. the axes frame)\n # to draw correctly for a polygon frame.\n\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n # close off polygon by repeating first vertex\n verts.append(verts[0])\n path = Path(verts)\n\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n\n register_projection(RadarAxes)\n return theta",
"def compileIntro(self):\n out = audio.AudioQuantumList()\n intro = audio.AudioData(self.sample_path + self.template['intro'], sampleRate=44100, numChannels=2, verbose=False)\n \n # First 4 bars of song\n custom_bars = []\n\n if not self.beats or len(self.beats) < 16:\n # Song is not long or identifiable enough\n # Take our best shot at making something\n self.tempo = 60.0 * 16.0 / self.original.duration\n for i in xrange(0, 4):\n bar = []\n for j in xrange(0, 4):\n length = self.original.duration / 16.0\n start = ((i * 4) + j) * length\n bar.append(audio.AudioQuantum(start, length, None, 0, self.original.source))\n custom_bars.append(bar)\n else:\n for i in xrange(0, 4):\n custom_bars.append(self.beats[i*4:(i*4)+4])\n out.extend([x for bar in custom_bars for x in bar])\n\n # First beat of first bar x 4\n for i in xrange(0, 4):\n out.append(custom_bars[0][0])\n \n # First beat of second bar x 4\n for i in xrange(0, 4):\n out.append(custom_bars[1][0])\n\n beatone = custom_bars[2][0]\n beattwo = custom_bars[3][0]\n beatthree = custom_bars[3][2]\n \n # First beat of third bar x 8\n for x in xrange(0, 8):\n out.append(audio.AudioQuantum(beatone.start, beatone.duration/2, None, beatone.confidence, beatone.source))\n\n # First beat of fourth bar x 8\n for x in xrange(0, 8):\n out.append(audio.AudioQuantum(beattwo.start, beattwo.duration/4, None, beattwo.confidence, beattwo.source))\n\n # Third beat of fourth bar x 8\n for x in xrange(0, 8):\n out.append(audio.AudioQuantum(beatthree.start, beatthree.duration/4, None, beatthree.confidence, beatthree.source))\n \n if self.original.analysis.time_signature == 4:\n shifted = self.st.shiftTempo(audio.getpieces(self.original, out), self.template['tempo']/self.tempo)\n else:\n shifted1 = audio.getpieces(self.original, out)\n shifted = self.st.shiftTempo(shifted1, len(shifted1) / ((44100 * 16 * 2 * 60.0)/self.template['tempo']))\n shifted1.unload()\n if shifted.numChannels == 1: \n shifted = self.mono_to_stereo(shifted)\n return self.truncatemix(intro, shifted, self.mixfactor(out))",
"def get_line_wavelengths():\n line_wavelengths = OrderedDict() ; line_ratios = OrderedDict()\n \n line_wavelengths['PaB'] = [12821]\n line_ratios['PaB'] = [1.]\n line_wavelengths['Ha'] = [6564.61]\n line_ratios['Ha'] = [1.]\n line_wavelengths['Hb'] = [4862.68]\n line_ratios['Hb'] = [1.]\n line_wavelengths['Hg'] = [4341.68]\n line_ratios['Hg'] = [1.]\n line_wavelengths['Hd'] = [4102.892]\n line_ratios['Hd'] = [1.]\n \n line_wavelengths['OIII-4363'] = [4364.436]\n line_ratios['OIII-4363'] = [1.]\n line_wavelengths['OIII'] = [5008.240, 4960.295]\n line_ratios['OIII'] = [2.98, 1]\n \n # Split doublet, if needed\n line_wavelengths['OIII4959'] = [4960.295]\n line_ratios['OIII4959'] = [1]\n line_wavelengths['OIII5007'] = [5008.240]\n line_ratios['OIII5007'] = [1]\n \n line_wavelengths['OII'] = [3727.092, 3729.875]\n line_ratios['OII'] = [1, 1.] \n \n line_wavelengths['OI-6302'] = [6302.046, 6363.67]\n line_ratios['OI-6302'] = [1, 0.33]\n\n line_wavelengths['NeIII'] = [3869]\n line_ratios['NeIII'] = [1.]\n line_wavelengths['NeV'] = [3346.8]\n line_ratios['NeV'] = [1.]\n line_wavelengths['NeVI'] = [3426.85]\n line_ratios['NeVI'] = [1.]\n \n line_wavelengths['SIII'] = [9068.6, 9530.6][::-1]\n line_ratios['SIII'] = [1, 2.44][::-1]\n \n # Split doublet, if needed\n line_wavelengths['SIII9068'] = [9068.6]\n line_ratios['SIII9068'] = [1]\n line_wavelengths['SIII9531'] = [9530.6]\n line_ratios['SIII9531'] = [1]\n \n line_wavelengths['SII'] = [6718.29, 6732.67]\n line_ratios['SII'] = [1., 1.] \n \n line_wavelengths['HeII'] = [4687.5]\n line_ratios['HeII'] = [1.]\n line_wavelengths['HeI-5877'] = [5877.2]\n line_ratios['HeI-5877'] = [1.]\n line_wavelengths['HeI-3889'] = [3889.5]\n line_ratios['HeI-3889'] = [1.]\n line_wavelengths['HeI-1083'] = [10830.]\n line_ratios['HeI-1083'] = [1.]\n \n line_wavelengths['MgII'] = [2799.117]\n line_ratios['MgII'] = [1.]\n \n line_wavelengths['CIV-1549'] = [1549.480]\n line_ratios['CIV-1549'] = [1.]\n line_wavelengths['CIII-1908'] = [1908.734]\n line_ratios['CIII-1908'] = [1.]\n line_wavelengths['OIII-1663'] = [1665.85]\n line_ratios['OIII-1663'] = [1.]\n line_wavelengths['HeII-1640'] = [1640.4]\n line_ratios['HeII-1640'] = [1.]\n \n line_wavelengths['NII'] = [6549.86, 6585.27]\n line_ratios['NII'] = [1., 3]\n line_wavelengths['NIII-1750'] = [1750.]\n line_ratios['NIII-1750'] = [1.]\n line_wavelengths['NIV-1487'] = [1487.]\n line_ratios['NIV-1487'] = [1.]\n line_wavelengths['NV-1240'] = [1240.81]\n line_ratios['NV-1240'] = [1.]\n\n line_wavelengths['Lya'] = [1215.4]\n line_ratios['Lya'] = [1.]\n \n line_wavelengths['Lya+CIV'] = [1215.4, 1549.49]\n line_ratios['Lya+CIV'] = [1., 0.1]\n \n line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]\n line_ratios['Ha+SII'] = [1., 1./10, 1./10]\n line_wavelengths['Ha+SII+SIII+He'] = [6564.61, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+SII+SIII+He'] = [1., 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n\n line_wavelengths['Ha+NII+SII+SIII+He'] = [6564.61, 6549.86, 6585.27, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+NII+SII+SIII+He'] = [1., 1./(4.*4), 3./(4*4), 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n \n line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]\n line_ratios['OIII+Hb'] = [2.98, 1, 3.98/6.]\n \n line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]\n line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]\n\n line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]\n line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]\n\n line_wavelengths['OIII+OII'] = [5008.240, 4960.295, 3729.875]\n line_ratios['OIII+OII'] = [2.98, 1, 3.98/4.]\n \n line_wavelengths['OII+Ne'] = [3729.875, 3869]\n line_ratios['OII+Ne'] = [1, 1./5]\n \n return line_wavelengths, line_ratios",
"def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')",
"def _radar(df,\n ax,\n label,\n all_tags,\n alpha=0.15,\n edge_alpha=0.65,\n zorder=2,\n edge_style='-'):\n values = df[label].values\n values = np.maximum(values, 0.05) # don't let radar collapse to 0.\n values = np.concatenate((values, [values[0]]))\n\n angles = np.linspace(0, 2*np.pi, len(all_tags), endpoint=False)\n angles = np.concatenate((angles, [angles[0]]))\n\n ax.plot(angles, values, '-', \n linewidth=1.8, \n label=label,\n alpha=edge_alpha,\n zorder=zorder,\n linestyle=edge_style)\n \n ax.set_thetagrids(angles * 180/np.pi,\n all_tags,\n fontsize='medium')\n\n # To avoid text on top of gridlines, we flip horizontalalignment\n # based on label location\n text_angles = np.rad2deg(angles)\n for label, angle in zip(ax.get_xticklabels()[:-1], text_angles[:-1]):\n if 90 <= angle <= 270:\n label.set_horizontalalignment('right')\n else:\n label.set_horizontalalignment('left')\n \n return ax",
"def songInfo():\n \n global songFile, currentRadio\n \n lines = songFile.readlines()\n if len(lines) > 0:\n\n songFile.seek(0)\n title = formatSong(lines[0]).strip()\n \n with canvas(device) as draw:\n invert(draw, 0, 0, names[currentRadio][0], True)\n if len(title)<19:\n draw.text((72-4*(len(title)), 20), title , fill=\"white\")\n else:\n lineNum = len(title)\n if lineNum > 72:\n lineNum = 72\n thelist = [title[i:i+19] for i in range(0, lineNum, 19)]\n for i in range(len(thelist)): \n draw.text((81-4*(len(thelist[i].strip())), 19+10*i), thelist[i] , fill=\"white\")",
"def plot_example_spectrograms(example,rate):\r\n plt.figure()\r\n \r\n labels = ['REM', 'NREM 1', 'NREM 2', 'NREM 3-4']\r\n \r\n ###YOUR CODE HERE\r\n for i in xrange(0,4):\r\n plt.subplot(2,2,i+1)\r\n plt.specgram(example[i], NFFT=256, Fs=rate, label=labels[i])\r\n plt.ylim( ymax = 30 )\r\n plt.legend()\r\n plt.show()\r\n \r\n return",
"def get_chartshow_csv(data):\n results = []\n rank = 1\n for line in data.split(\"\\n\"):\n if len(line.strip()) > 0:\n if len(line.split(\",\")) == 3:\n parts = line.split(\",\")\n results.append({\"rank\": parts[0], \"artist\": parts[1], \"title\": parts[2]})\n else:\n m = re.match(\"^([0-9XR]+)(.*)\", line)\n result = {\"rank\": rank, \"artist\": m.group(2), \"title\": \"\"}\n parse_artist_title(line, m, result)\n results.append(result)\n rank = rank + 1\n return results",
"def _read_arasim_antenna_data(filename):\n data = {}\n freqs = set()\n thetas = set()\n phis = set()\n freq = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('freq'):\n freq = 1\n if words[-1]==\"Hz\":\n pass\n elif words[-1]==\"kHz\":\n freq *= 1e3\n elif words[-1]==\"MHz\":\n freq *= 1e6\n elif words[-1]==\"GHz\":\n freq *= 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n freq *= float(words[-2])\n freqs.add(freq)\n elif line.startswith('SWR'):\n swr = float(words[-1])\n elif len(words)==5 and words[0]!=\"Theta\":\n theta = int(words[0])\n thetas.add(theta)\n phi = int(words[1])\n phis.add(phi)\n db_gain = float(words[2])\n # AraSim actually only seems to use the sqrt of the gain\n # (must be gain in power, not voltage)\n # gain = np.sqrt(float(words[3]))\n gain = np.sqrt(10**(db_gain/10))\n phase = np.radians(float(words[4]))\n data[(freq, theta, phi)] = (gain, phase)\n\n # Convert data dictionary into 3-D array of responses\n response = np.empty((len(freqs), len(thetas), len(phis)),\n dtype=np.complex_)\n for i, freq in enumerate(sorted(freqs)):\n for j, theta in enumerate(sorted(thetas)):\n for k, phi in enumerate(sorted(phis)):\n gain, phase = data[(freq, theta, phi)]\n response[i, j, k] = gain * np.exp(1j*phase)\n\n response_data = (response, np.array(sorted(freqs)),\n np.array(sorted(thetas)), np.array(sorted(phis)))\n return _fix_response_wrapping(response_data)",
"def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')",
"def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0",
"def plot(options):\n kwargs = {f'{i}': audio.read(i).data for i in options.files}\n plotter.plot(**kwargs)",
"def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')",
"def plot_chart():\n fig = go.Figure(data=go.Scatterpolar(\n r=[1, 5, 2, 2, 3],\n theta=['happy','sad','anger', 'neutral','calm'],\n fill='toself'\n ))\n\n fig.update_layout(\n polar=dict(\n radialaxis=dict(\n visible=True\n ),\n ),\n showlegend=False\n )\n return fig",
"def plot(self):\n\t\tself.plotOfSpect()",
"def radial_graph(self):\n \n if self['M_RADIAL']['intens'] != None:\n name = self['name']\n id = self._getGraphId()\n figname = 'RADIAL_%s.eps' % id\n sxlabel = 'Pixel Radius' ; sylabel = 'Intens' \n title = 'Radial profile, %s' % (name,)\n y = self['M_RADIAL']['intens']\n x = self['M_RADIAL']['radii']\n xy = ((x,y),)\n Plot(xy,figname,sxlabel,sylabel,title)\n self['figures']['radial'] = figname\n else : pass",
"def armTrack(*args, **kwargs):\n pass",
"def plot_rsfs_waveforms(peak_waveform, durations, labels):\n if np.mean(durations[np.where(labels==0)[0]]) < np.mean(durations[np.where(labels==1)[0]]):\n fs_k = 0;rs_k = 1\n waveform_class_ids = [1,0]\n else:\n rs_k = 0;fs_k = 1\n waveform_class_ids = [0,1]\n waveform_class = [waveform_class_ids[k] for k in labels]\n waveform_class = np.array(waveform_class)\n\n\n plt.figure(figsize=(6,4))\n for i in range(len(peak_waveform)):\n waveform = peak_waveform[i]\n if waveform_class[i]==np.unique(waveform_class)[0]:\n plt.plot(waveform/np.max(np.abs(waveform)),'#b3b3ff',alpha=0.7)\n if waveform_class[i]==np.unique(waveform_class)[1]:\n plt.plot(waveform/np.max(np.abs(waveform)),'#c6ecc6',alpha=0.7)\n\n\n # plot means, normalized\n for waveform_class_id in np.unique(waveform_class):\n plt.plot(np.mean(peak_waveform[waveform_class==waveform_class_id],axis=0)/\n (np.max(np.abs(np.mean(peak_waveform[waveform_class==waveform_class_id],axis=0)))),lw=3,label=waveform_class_id)\n plt.title('Raw: RS:'+str(len(np.where(waveform_class==0)[0]))+', FS: '+str(len(np.where(waveform_class==1)[0])))\n return waveform_class",
"def __init__(self):\n \n # initialize Music-Code\n self.m = MusicCode(bpm=120)\n \n # waveform types\n self.waveform_types = ['sine', 'tri', 'saw1', 'saw2', 'square', 'sine-tri', 'sine-saw', 'sine-square', 'saw-square', 'tri-saw', 'tri-square']\n \n # chord labels\n self.all_chord_labels = self.m.all_chords[:73]\n\n # select 4 octaves for root notes\n excess_notes_list = list(self.m.freq_note_table_sharp.values()) \n \n all_root_notes = []\n for item in excess_notes_list:\n if ('0' in item) or ('1' in item) or ('5' in item) or ('6' in item) or ('7' in item):\n pass\n else:\n all_root_notes.append(item)\n \n all_root_notes.sort() \n self.all_root_notes = all_root_notes"
]
| [
"0.62712663",
"0.58639205",
"0.5833647",
"0.5677688",
"0.5650228",
"0.5584993",
"0.55144435",
"0.55014986",
"0.53806454",
"0.53474057",
"0.532776",
"0.5325962",
"0.52893907",
"0.52685654",
"0.5188182",
"0.518027",
"0.51380223",
"0.5116624",
"0.51140165",
"0.5104122",
"0.5083387",
"0.5056347",
"0.504353",
"0.5043476",
"0.50338805",
"0.5023855",
"0.50059474",
"0.5000693",
"0.4988476",
"0.49799252"
]
| 0.7620976 | 0 |
Create dataframes for the song data and the id lookup table from csv files. | def load_data_csv():
# Load lookup table
path = 'data/id_lookup.csv'
lookup_table = pd.read_csv(path, index_col=0)
# Load song data
path2 = 'data/data_lyrics_features.csv'
data = pd.read_csv(path2, index_col=0)
return data, lookup_table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_training_df(df, id_csv):\n\n train_df = fetch_training_df(df)\n \n for column_name in ['song_id', 'track_id']:\n train_df[column_name] = train_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n \n train_df.drop(['year'], axis=1, inplace=True)\n train_df = merge_id_into_df(train_df, id_csv)\n train_df.drop(['song_id', 'track_id'], axis=1, inplace=True)\n\n return train_df",
"def generate_info_df(df, id_csv):\n\n info_df = fetch_tracks_info_df(df)\n info_df = merge_id_into_df(info_df, id_csv)\n\n return info_df",
"def get_dataframes(csvfile, spec=SPEC):\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()\n for t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}",
"def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)",
"def main(path_to_cdr_ids, path_to_db):\n from sqlalchemy import create_engine\n import pandas as pd\n\n cdr_ids_to_get = set(open(path_to_cdr_ids).readlines())\n\n cdr_ids_str = ','.join(['\"{}\"'.format(x) for x in cdr_ids_to_get])\n query_fmt = 'select * from cdr_id_to_homology where cdr_id in ({})'.format\n\n sql_con = create_engine('sqlite:///{}'.format(path_to_db))\n\n df = pd.read_sql(query_fmt(cdr_ids_str), sql_con)\n\n df = df.pivot(columns='homology').fillna(False)\n\n df.to_pickle('data/generated/homology_df.pkl')",
"def _init_train_valid(self, csv_path, csv_sep, csv_names):\n # load groundtruth\n # last element following a dot is file's extension\n print('Loading data...') \n if csv_path.split('.')[-1] == 'cache':\n # load cache\n # Assumes that the cache contains a list of all the identities, a dictionary containing metadata about those identities and the number of samples contained in the cache.\n # The dictionary must have the same format as the 'groundtruth_metadata' dictionary that is built below.\n # dati che mi servono: identities, groundtruth_metadata, num_samples\n with open(csv_path, 'rb') as cache_file:\n cache = pickle.load(cache_file)\n self.identities = cache['identities']\n self.groundtruth_metadata = cache['groundtruth_metadata']\n self.num_samples = cache['num_samples']\n else:\n # Assumes for the provided csv the following structure:\n # Path, ID, Gender, Age, x_min(roi_origin_x), y_min(roi_origin_y), width(roi_width), height(roi_height)\n groundtruth = pd.read_csv(csv_path, sep=csv_sep, names=csv_names)\n # for each groundtruth row\n for gt_sample in groundtruth.iterrows():\n identity = gt_sample[1][\"ID\"]\n # this iteration is over all of the elements in groundtruth, so the same id can be encountered multiple times (same id associated to multiple images)\n if identity not in self.identities:\n self.identities.append(identity)\n # load identity's metadata\n id_data = {\n 'age': gt_sample[1][\"Age\"],\n 'roi': {\n 'upper_left_x': gt_sample[1][\"x_min\"],\n 'upper_left_y': gt_sample[1][\"y_min\"],\n 'width': gt_sample[1][\"width\"],\n 'height': gt_sample[1][\"height\"]\n },\n 'path': gt_sample[1][\"Path\"]\n }\n if identity not in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity] = {\n 'index': 0,\n 'metadata': []\n }\n # the other elements in the list associated to an identity are metadata \n self.groundtruth_metadata[identity]['metadata'].append(id_data)\n self.num_samples += 1\n # Dump loaded data to cache\n # Split csv path in directory path and filename\n (csv_dir, csv_name) = os.path.split(csv_path)\n # Create a name for cache file with the same name as csv file but different extension\n cache_name = csv_name.split('.')[0]+'.cache'\n # Create a path pointing to the new cache file, locating it in the same directory as the csv file\n cache_path = os.path.join(csv_dir, cache_name)\n # Write relevant data to file\n with open(cache_path, 'wb') as cache_out_file:\n out_dict = {}\n out_dict['identities'] = self.identities\n out_dict['groundtruth_metadata'] = self.groundtruth_metadata\n out_dict['num_samples'] = self.num_samples\n pickle.dump(out_dict, cache_out_file) \n print('Finished loading data!')\n if self.mode == 'training':\n self._shuffle()",
"def process_song_file(cur, filepath):\r\n\r\n \"\"\" open song file\r\n drop duplicates\r\n set NAs to Zero \"\"\"\r\n df = pd.read_json(filepath, lines=True)\r\n df.drop_duplicates(subset=['song_id','artist_id'], keep = 'first')\r\n df['artist_latitude'] = df['artist_latitude'].fillna(0)\r\n df['artist_longitude'] = df['artist_longitude'].fillna(0)\r\n\r\n\r\n \"\"\" Extract columns for dataframe for song table\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n song_data = (df[['song_id','title','artist_id','year','duration']])\r\n song_data.drop_duplicates(subset='song_id',keep ='first',inplace = True)\r\n song_data = (song_data.values).tolist()\r\n song_data = song_data[0]\r\n # insert song record\r\n cur.execute(song_table_insert,song_data)\r\n\r\n \"\"\" Extract columns for dataframe for artist table,\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n artist_data = (df[['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']])\r\n artist_data.drop_duplicates(subset='artist_id',keep ='first',inplace = True)\r\n artist_data = (artist_data.values).tolist()\r\n artist_data = artist_data[0]\r\n # insert artist record\r\n cur.execute(artist_table_insert, artist_data)",
"def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df",
"def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)",
"def create_table_from_csv (sqlite_db_file):\n files = [f for f in os.listdir(os.curdir) if f.endswith(\".csv\")]\n name_df = [re.findall('(.*)\\.csv',f)[0] for f in files ]\n engine = create_engine('sqlite:///' + sqlite_db_file)\n for n, f_n in zip(name_df, files):\n try:\n df = pd.read_csv(f\"{f_n}\", sep=',')\n df.to_sql(f\"{n}\", engine, if_exists=\"fail\")\n\n except Exception:\n pass",
"def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)",
"def process_song_file(cur, filepath):\n # open song file\n\n inputData = pd.read_json(filepath, lines=True)\n song_df = pd.DataFrame(data=inputData)\n song_df.head()\n \n\n # insert song record\n song_data = song_df[['song_id', 'title', 'artist_id','year','duration']].values\n for i, row in song_df.iterrows():\n cur.execute(song_table_insert, song_data[i])\n \n \n # insert artist record\n \n artist_data = song_df[['artist_id', 'artist_name', 'artist_location','artist_latitude','artist_longitude']].values\n for i, row in song_df.iterrows():\n cur.execute(artist_table_insert, artist_data[i])",
"def load_all_data() -> Tuple[pd.DataFrame, ...]:\n return tuple(\n pd.read_csv(path, sep='\\t') for path in (TARGETS_PATH, USER_INFO_PATH, INTERACTIONS_PATH, TRACK_INFO_PATH))",
"def compile_data():\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n metasp = pd.DataFrame()\r\n for count, ticker in enumerate(tickers):\r\n df = pd.read_csv('sp500_data\\{}.csv'.format(ticker))\r\n df.set_index('Date', inplace=True)\r\n df.rename(columns={'Adj Close': ticker}, inplace=True)\r\n df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)\r\n if metasp.empty:\r\n metasp = df\r\n else:\r\n metasp = metasp.join(df, how = 'outer')\r\n if count % 10 == 0:\r\n print(count)\r\n metasp.to_csv('sp500_meta.csv')",
"def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))",
"def process_song_file(cur, filepath):\n df = pd.read_json(filepath, typ='series')\n\n columns = ['song_id', 'title', 'artist_id', 'year', 'duration']\n song_data = df[[*columns]]\n cur.execute(song_table_insert, song_data)\n\n columns = ['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']\n artist_data = df[[*columns]]\n cur.execute(artist_table_insert, artist_data)",
"def create_dataframes(self,\n dataset_path: str) -> None:\n # Creates movies dataframe\n movies_path = '{}/movies.csv'.format(dataset_path)\n self.movies_df = pd.read_csv(movies_path)\n\n # Creates ratings dataframe\n ratings_path = '{}/ratings.csv'.format(dataset_path)\n self.ratings_df = pd.read_csv(ratings_path)\n self.ratings_df = self.ratings_df.drop('timestamp', axis=1)\n return",
"def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')",
"def process_song_file(cur, filepath):\r\n\r\n\r\n\r\n\r\n df=pd.read_json(filepath,lines=True)\r\n for j,row in df.iterrows():\r\n n, artist_id, artist_latitude, artist_longitude, artist_location, artist_name, song_id, title, duration, year =row\r\n cur.execute(song_table_insert,[song_id,title,artist_id,year,duration])\r\n\r\n cur.execute(artist_table_insert, [artist_id, artist_name, artist_location,artist_latitude,artist_longitude])",
"def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')",
"def process_song_file(cur, filepath):\n \n # open song file\n \n df = pd.read_json(filepath,lines=True)\n \n # insert song record\n song_data = df[['song_id', 'title', 'artist_id','year',\n 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = df[['artist_id','artist_name',\n 'artist_location', 'artist_latitude',\n 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)",
"def process_song_file(cur, filepath):\n\n df = pd.read_json(filepath, lines=True)\n\n song_data = df[['song_id', 'title',\n 'artist_id', 'year', 'duration']].values[0]\n cur.execute(song_table_insert, song_data)\n\n artist_data = df[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0]\n cur.execute(artist_table_insert, artist_data)",
"def populate_from_samples():\n\n # Tags\n try:\n for row in get_csv_data('samples/tags.csv'):\n tag = Tag(name=row['Name'], desc=row['Description'])\n db_session.add(tag)\n finally:\n db_session.commit()\n\n # Organizations\n try:\n for row in get_csv_data('samples/organizations.csv'):\n org = Organization(desc=row['Name'])\n db_session.add(org)\n finally:\n db_session.commit()\n\n # Departments\n try: \n for row in get_csv_data('samples/departments.csv'):\n org = db_session.query(Organization).filter_by(desc=row['Organization']).one()\n dpt = Department(desc=row['Department'], org=org)\n\n db_session.add(dpt)\n finally:\n db_session.commit()\n\n # Application types\n try:\n for row in get_csv_data('samples/apptypes.csv'):\n apptype = AppType(desc=row['Name'])\n db_session.add(apptype)\n finally:\n db_session.commit()\n\n # Applications\n try:\n for row in get_csv_data('samples/applications.csv'):\n apptype = db_session.query(AppType).filter_by(desc=row['AppType']).one()\n dpt = db_session.query(Department).join(Organization).\\\n filter(Department.desc==row['Department']).\\\n filter(Organization.desc==row['Organization']).\\\n one()\n\n app = App(desc=row['Application'], \n app_type=apptype, \n department=dpt,\n version=row['Version'],\n environment=row['Environment'],\n platform=row['Platform']\n )\n\n db_session.add(app)\n finally:\n db_session.commit()\n\n # Connections and Headers\n try:\n for row in get_csv_data('samples/connections.csv'):\n conn = Connection(conn_type=row['Type'], url=row['URL'], port=row['Port'], answer=row['Answer'])\n header = Header(conn_id=conn.id, header=row['Header'], value=row['Value'], conn=conn)\n\n db_session.add(conn)\n db_session.add(header)\n finally:\n db_session.commit()",
"def prepare_data(datapath: str, delimiter: str = \"\\t\") -> pd.DataFrame:\n data = pd.read_csv(datapath, delimiter)\n\n # transform labels to consistent labels\n for column in data.columns:\n if column == \"id text\":\n continue\n data[column] = data[column].apply(reduce_labels)\n\n # separate columns\n data[\"text\"] = data[\"id text\"].apply(split_text_id_columns)\n data[\"id\"] = data[\"id text\"].apply(split_text_id_columns, args=(True,))\n return data",
"def build_dataset_by_df(file_path):\n data = pd.read_csv(file_path, sep=',')\n print(data.head())\n\n user_ids = data['CustomerID'].unique()\n prod_ids = data['StockCode'].unique()\n user_num = len(user_ids)\n product_num = len(prod_ids)\n product_lists_by_user = dict(data.groupby('CustomerID')['StockCode'].apply(list)).values()\n product_to_desc = dict(data.groupby('StockCode')['Description'].apply(set))\n dictionary = {}\n for id in prod_ids:\n dictionary[id] = len(dictionary)\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return product_lists_by_user, product_to_desc, dictionary, reversed_dictionary, user_num, product_num",
"def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()",
"def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels",
"def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels",
"def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels",
"def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table"
]
| [
"0.6965932",
"0.65141785",
"0.64995843",
"0.6274993",
"0.62575454",
"0.6177822",
"0.61413383",
"0.6138976",
"0.61084205",
"0.6034166",
"0.6017585",
"0.60097295",
"0.59951246",
"0.59737027",
"0.5946599",
"0.5938458",
"0.59284836",
"0.59180796",
"0.5915057",
"0.590716",
"0.590473",
"0.58981115",
"0.5894785",
"0.585945",
"0.58585674",
"0.5854926",
"0.583593",
"0.583593",
"0.583593",
"0.5830668"
]
| 0.7432108 | 0 |
Create dataframes for the song data and the id lookup table from sql tables | def load_data_sql():
conn = mysql.connect(**st.secrets["mysql"])
data = pd.read_sql('SELECT * FROM song_data', conn)
lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)
return data, lookup_table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df",
"def generate_training_df(df, id_csv):\n\n train_df = fetch_training_df(df)\n \n for column_name in ['song_id', 'track_id']:\n train_df[column_name] = train_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n \n train_df.drop(['year'], axis=1, inplace=True)\n train_df = merge_id_into_df(train_df, id_csv)\n train_df.drop(['song_id', 'track_id'], axis=1, inplace=True)\n\n return train_df",
"def main(path_to_cdr_ids, path_to_db):\n from sqlalchemy import create_engine\n import pandas as pd\n\n cdr_ids_to_get = set(open(path_to_cdr_ids).readlines())\n\n cdr_ids_str = ','.join(['\"{}\"'.format(x) for x in cdr_ids_to_get])\n query_fmt = 'select * from cdr_id_to_homology where cdr_id in ({})'.format\n\n sql_con = create_engine('sqlite:///{}'.format(path_to_db))\n\n df = pd.read_sql(query_fmt(cdr_ids_str), sql_con)\n\n df = df.pivot(columns='homology').fillna(False)\n\n df.to_pickle('data/generated/homology_df.pkl')",
"def fetch_tracks_info_df(df):\n\n gen_df = df.copy()\n gen_df = gen_df[['artist_name', 'title', 'release', 'track_id', 'song_id']]\n\n for column_name in gen_df.columns:\n gen_df[column_name] = gen_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n\n gen_df.rename(columns={'release': 'album_name'}, inplace=True)\n gen_df['year'] = df['year']\n\n return gen_df",
"def get_features_dataframe(tids):\n\n Data = {}\n for tid in tids:\n Data[tid] = get_song_features(tid)\n return pd.DataFrame.from_dict(Data, orient='index')",
"def create_dataframe(connection: sqlite3.Connection) -> pd.DataFrame:\n dataframe = pd.read_sql_query(f\"\"\"\n SELECT\n combined_jobs.id, combined_jobs.company, combined_jobs.link, combined_jobs.location,\n combined_jobs.date, combined_jobs.content, combined_jobs.title, location_cache.location,\n location_cache.latitude, location_cache.longitude\n FROM\n combined_jobs\n LEFT OUTER JOIN\n location_cache on (combined_jobs.location = location_cache.location)\"\"\",\n connection)\n print(dataframe)\n return dataframe",
"def __insert_song_data(cur, df):\n song_data = (\n df.song_id.values[0],\n df.title.values[0],\n df.artist_id.values[0],\n (df.year.values[0]).item(),\n (df.duration.values[0]).item()\n )\n cur.execute(song_table_insert, song_data)",
"def merge_id_into_df(df, id_df):\n df = pd.merge(df, id_df, on=['track_id', 'song_id'])\n return df",
"def create_dataframe(ids, names, p_links, c_links, cl_links):\n try:\n dict = {'ID':ids, 'Name': names, 'Photo':p_links, 'Flag':c_links, 'Club Logo':cl_links}\n df = pd.DataFrame(dict)\n return df\n except Exception as e:\n print(\"Exception creating or storing the dataframe: \" + str(e))",
"def process_song_file(cur, filepath):\r\n\r\n \"\"\" open song file\r\n drop duplicates\r\n set NAs to Zero \"\"\"\r\n df = pd.read_json(filepath, lines=True)\r\n df.drop_duplicates(subset=['song_id','artist_id'], keep = 'first')\r\n df['artist_latitude'] = df['artist_latitude'].fillna(0)\r\n df['artist_longitude'] = df['artist_longitude'].fillna(0)\r\n\r\n\r\n \"\"\" Extract columns for dataframe for song table\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n song_data = (df[['song_id','title','artist_id','year','duration']])\r\n song_data.drop_duplicates(subset='song_id',keep ='first',inplace = True)\r\n song_data = (song_data.values).tolist()\r\n song_data = song_data[0]\r\n # insert song record\r\n cur.execute(song_table_insert,song_data)\r\n\r\n \"\"\" Extract columns for dataframe for artist table,\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n artist_data = (df[['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']])\r\n artist_data.drop_duplicates(subset='artist_id',keep ='first',inplace = True)\r\n artist_data = (artist_data.values).tolist()\r\n artist_data = artist_data[0]\r\n # insert artist record\r\n cur.execute(artist_table_insert, artist_data)",
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def __insert_songplay_data(cur, df):\n # for each songplay event, described by a row in the dataframe\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (pd.to_datetime(row.ts, unit='ms'), row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)",
"def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')",
"def loadDbIntoDf2(content):\n #Loading data into DF\n if content == 'trending':\n file = 'dataVideo.txt'\n elif content == 'music':\n file = 'dataVideoChallenge.txt'\n else:\n file = 'dataVideo.txt'\n with open(file,'r') as f:\n videos_dict = json.load(f)\n df = pd.DataFrame.from_dict(videos_dict)\n #filter on challenge\n if content == 'music':\n df = df[df.musicId == \"6745161928949106690\"]\n return df",
"def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)",
"def process_song_data(spark, input_data, output_data):\n # get filepath to song data file\n song_data = input_data + 'song_data/*/*/*/*.json'\n \n # read song data file\n df = spark.read.json(song_data)\n \n # create view for songs table\n df.createOrReplaceTempView(\"songs\") \n \n \n # extract columns to create songs table. Adding Distinct and Not null to song_id as it is the primary key\n songs_table = spark.sql(\"\"\"\n SELECT DISTINCT song_id, \n title,\n artist_id,\n year,\n duration\n FROM songs\n WHERE song_id IS NOT NULL\n \"\"\")\n \n # write songs table to parquet files partitioned by year and artist\n songs_table.write.mode('overwrite').partitionBy(\"year\", \"artist_id\").parquet(output_data+'songs_table/')\n\n # create view for artists table\n df.createOrReplaceTempView(\"artists\") \n \n # extract columns to create artists table, Adding Distinct and Not null to artist_id as it is the primary key\n artists_table = spark.sql(\"\"\"\n SELECT DISTINCT artist_id, \n artist_name,\n artist_location,\n artist_latitude,\n artist_longitude\n FROM artists\n WHERE artist_id IS NOT NULL\n \"\"\")\n \n # write artists table to parquet files\n artists_table.write.mode('overwrite').parquet(output_data+'artists_table/')",
"def targets_to_dataframe(conn):\n return connect_database.get_table_into_pandas('target_info',conn)",
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"def get_training_data(db_conn):\n return pd.read_sql('''select * from churn_model.churn_data;''', db_conn)",
"def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n ret_df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return ret_df",
"def preprocessing():\n track_metadata = pd.read_csv('/home/sachet/Artificial Intelligence/song_data.csv')\n count_play = pd.read_csv('/home/sachet/Artificial Intelligence/10000.txt', sep='\\t', header=None, names=['user','song','play_count'])\n unique_track_metadata = track_metadata.groupby('song_id').max().reset_index()\n user_song_list = pd.merge(count_play, unique_track_metadata, how='left', left_on='song', right_on='song_id')\n user_song_list.rename(columns={'play_count':'listen_count'},inplace=True)\n del(user_song_list['song_id'])\n return user_song_list",
"def generate_info_df(df, id_csv):\n\n info_df = fetch_tracks_info_df(df)\n info_df = merge_id_into_df(info_df, id_csv)\n\n return info_df",
"def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table",
"def open_data(table):\n engine = create_engine(myDB, encoding='latin1') \n conn = engine.connect()\n select = conn.execute('select * from ' + table)\n\n df = pd.DataFrame(select.fetchall()) \n df.columns = select.keys()\n\n conn.close()\n return df",
"def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df",
"def process_song_data(spark, input_data, output_data):\n \n # get filepath to song data file\n song_data = os.path.join( input_data, \"song_data/*/*/*/*.json\")\n \n # SONG TABLE\n # read song data file\n df = spark.read.json(song_data)\n \n # extract columns to create songs table\n songs_table = df.select('song_id', 'title', 'artist_id',\n 'year', 'duration').dropDuplicates(['song_id'])\n \n print( \"HERE songs_table sample:\\n\")\n songs_table.show(5)\n # write songs table to parquet files partitioned by year and artist\n songs_table.write.partitionBy('year', 'artist_id').parquet(os.path.join(output_data, 'songs/songs.parquet'), 'overwrite')\n \n # ARTISTS TABLE\n # extract columns to create artists table\n artists_table = df.select(\"artist_id\",\"artist_name\",\"artist_location\",\"artist_latitude\",\"artist_longitude\").dropDuplicates(['artist_id'])\n \n print( \"HERE artists_table sample:\\n\")\n artists_table.show(5)\n # write artists table to parquet files\n artists_table.write.parquet(output_data + \"artists/\", mode=\"overwrite\")",
"def df2db_separate(self, df: pd.DataFrame, tab_name):\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n\n max_df_size = 50000\n\n dfs = df_split(df, batch_size=max_df_size)\n num_piece = len(dfs)\n\n dfs[0].to_sql(tab_name, self.engine, method='multi', index=False)\n if num_piece > 1:\n for pdf in dfs[1:]:\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))\n pdf.to_sql(tab_name + '_tmp', self.engine, method='multi', index=False)\n self.execute(\"INSERT INTO TABLE {tn} SELECT * FROM {tt}\".format(\n tn=tab_name, tt=tab_name + '_tmp'\n ))\n print(len(pdf))\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))",
"def gen_user_artist_dataframe():\n print(\"Generating dataframe from lastfm usernames.\")\n user_to_id_dict = lastfm_data.get_users_and_ids()\n playcounts = defaultdict(dict)\n users = user_to_id_dict.keys()\n count = 0\n for user in users:\n count += 1\n top_artist_dict = get_top_artists(user)\n top_artists = top_artist_dict.keys()\n for artist in top_artists:\n playcounts[user][artist] = top_artist_dict[artist]\n print(str(count) + \"/\" + str(len(users)) + \" users counted.\")\n\n df = DataFrame(playcounts).T.fillna(0.0)\n return df",
"def create_query_df(self):\n\n # display output message for timeframe\n print(\n f'{Fore.GREEN}\\nQuerying database for tags between the timeframe: '\n f'{Fore.LIGHTGREEN_EX}{str(self._start)}{Fore.GREEN} and {Fore.LIGHTGREEN_EX}{str(self._end)}'\n f'{Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}\\nTIMESPAN: '\n f'{Fore.LIGHTGREEN_EX}{self.time_span} hours'\n f'{Style.RESET_ALL}')\n\n engine = get_db_engine()\n offset = 0\n chunk_size = 100000\n\n dfs = []\n while True:\n sa_select = sa.select(\n [self.data_table],\n whereclause=sa.and_(\n self.data_table.c._TIMESTAMP > '{}'.format(self._start),\n self.data_table.c._TIMESTAMP <= '{}'.format(self._end)),\n limit=chunk_size,\n offset=offset,\n order_by=self.data_table.c._NUMERICID\n )\n dfs.append(pd.read_sql(sa_select, engine))\n offset += chunk_size\n if len(dfs[-1]) < chunk_size:\n break\n\n self.query_df = pd.concat(dfs)"
]
| [
"0.6467784",
"0.6313377",
"0.6313175",
"0.62454724",
"0.6208443",
"0.6207447",
"0.6163429",
"0.6080419",
"0.6073513",
"0.5993247",
"0.59899527",
"0.59840816",
"0.59701216",
"0.59040105",
"0.5885752",
"0.5883292",
"0.5880176",
"0.58707464",
"0.5863237",
"0.58616227",
"0.58483994",
"0.5810126",
"0.5797223",
"0.57950926",
"0.5790035",
"0.5782515",
"0.577832",
"0.5770354",
"0.57641727",
"0.5759512"
]
| 0.737397 | 0 |
Function to clean the lyrics. It lowercases the lyrics, tokenizes it and removes all stopwords. | def clean_lyrics(data):
#Initialize list to store clean data, tokenizer and the set of stopwords
cleaned_data = []
tokenizer = RegexpTokenizer(r'\w+')
stopword_set = set(stopwords.words('english'))
# Clean data for all the lyrics in the list
for doc in data:
# Get lowercase of lyrics string
new_str = doc.lower()
# Tokenize lyrics strings
dlist = tokenizer.tokenize(new_str)
# Remove stopwords
dlist = list(set(dlist).difference(stopword_set))
# Append cleaned lyrics to list
cleaned_data.append(dlist)
return cleaned_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_stopwords(text):\n tokens = tokenize(text)\n tokens = stopwordsRem(tokens)\n return tokens",
"def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)",
"def clean(text):\n\n lower_proper = src.utils.nlp.lower_with_proper(text)\n lemmas = src.utils.nlp.lemmatize(lower_proper)\n cleaned = src.utils.nlp.clean_stopwords_punctuation(lemmas)\n return cleaned",
"def _clean_words(self, title, filter_stopwords=False):\n chars = '\"[]():;?!,\\'-'\n translation = dict((ord(c), u' ') for c in chars)\n def translate(text):\n if isinstance(text, unicode):\n translated = text.translate(translation)\n else:\n translated = text.translate(None, chars)\n return translated\n strips = '.'\n words = [\n x.strip(strips)\n for x in translate(title).split()\n ]\n for word in words:\n if len(word) >= self.min_word_length:\n if filter_stopwords and word.lower() not in STOPWORDS:\n continue\n # if the word contains non-ascii characters, try to convert\n # it to a ascii equivalent so that it's possible to type\n # \"naive\" when you don't even know how to type \"naïve\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n # it contains non-ascii characters\n ascii_word = unidecode(word)\n yield unicode(ascii_word).lower()\n yield word.lower()\n # yield ''.join(c for c in word if c.isalnum())",
"def lyrics2words(lyrics):\r\n words = word_tokenize(lyrics.decode('utf8'))\r\n words = clean(words)\r\n return words",
"def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text",
"def set_clean_raw_text(raw_text):\n\tlogger.debug('Cleaning Text')\n\n\t#tokenize and lower sentence\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_text.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\ttokens = [w for w in tokens if not is_stopword(w)]\n\n\t#remove punctuation\n\ttokens = [w for w in tokens if not is_punctuation(w)]\n\n\t#remove short \n\ttokens = [w for w in tokens if not is_shorter(w)]\n\n\t#remove number\n\ttokens = [w for w in tokens if not is_number(w)]\n\n\t#stem words\n\ttokens = map(stem, tokens)\n\n\tlogger.debug('Cleaning Text Complete')\n\treturn set(tokens)",
"def clean_text(text):\n text = text.lower() # Convert the text to lower case\n text = re.sub(\",\", \" \", text) # Replace commas with an extra space\n\n text = re.sub(\"<.*?>\", \"\", text) # Clean out any HTML tags\n text = re.sub(\"\\s+\", \" \", text) # Replace multiple spaces with\n\n text = text.split()\n\n text = [\n re.sub(\"[^\\w]\", \"\", i.rstrip()) for i in text if i not in all_stopwords\n ] # Clean out stopwords\n\n # text = engStem.stemWords(text)# English Stemming\n\n text = \" \".join(text)\n return text",
"def clean_lyrics(lyrics_file):\n data_corpus = []\n with open(lyrics_file) as csvfile:\n reader = csv.reader(csvfile, delimiter=\"\\t\")\n for row in reader:\n sentences = row[2].strip().split(u\"<BR>\")\n for sentence in sentences:\n sentence = unicode(sentence)\n sentence = translate_non_alphanumerics(sentence)\n sentence = space_pat.sub(u' ', sentence)\n\n # delete English\n # sentence = eng_words_pat.sub(u'', sentence).split(u\"\\s\")\n\n # sentence = sentence.split(u'')\n # sentence.append(u\".\")\n # sentence += u'.'\n\n if len(sentence) > 1:\n data_corpus.append(sentence)\n\n logger.info(\" Done cleaning crawled data! \")\n\n # saving the corpus\n with codecs.open(\"data/cleaned_lyrics.txt\", \"w\", 'UTF-8') as f:\n f.write(\"\\n\".join(data_corpus))",
"def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def remove_stopwords(text):\n stopwords = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\", \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\", \"he\", \"him\", \"his\", \"himself\", \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\", \"itself\", \"they\", \"them\", \"their\", \"theirs\", \"themselves\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"these\", \"those\", \"am\", \"is\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \"have\", \"has\", \"had\", \"having\", \"do\", \"does\", \"did\", \"doing\", \"a\", \"an\", \"the\", \"and\", \"but\", \"if\", \"or\", \"because\", \"as\", \"until\", \"while\", \"of\", \"at\", \"by\", \"for\", \"with\", \"about\", \"against\", \"between\", \"into\", \"through\", \"during\", \"before\", \"after\", \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\", \"over\", \"under\", \"again\", \"further\", \"then\", \"once\", \"here\", \"there\", \"when\", \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"most\", \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\", \"own\", \"same\", \"so\", \"than\", \"too\", \"very\", \"s\", \"t\", \"can\", \"will\", \"just\", \"don\", \"should\", \"now\"]\n return \" \".join([word for word in text.split() if word not in stopwords])",
"def cleaning(self, document):\n remove_punct = ''.join(i for i in document.lower() if i not in self.punctuation)\n tokenized = [i for i in remove_punct.split() if i not in self.stopwords]\n if self.lang is not 'chinese':\n # Lemmatizes if not chinese\n tokenized = [self.lemmatize.lemmatize(i) for i in tokenized]\n return tokenized",
"def clean_text(corpus, model):\n \n new_doc = []\n doc = model(corpus)\n for word in doc:\n if not word.is_stop and word.is_alpha:\n new_doc.append(word.lemma_.lower())\n \n cleaned_string = \", \".join(new_doc) # putting the strings back into one string\n return cleaned_string",
"def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n #tokenise text\n tokenised_text = text_no_doublespace.split()\n for word in tokenised_text:\n if len(word) == 1:\n tokenised_text.remove(word)\n #if word is a stop word, remove it from the list\n elif word in stopwords.words('english'):\n tokenised_text.remove(word)\n #de-tokenise text\n cleaned_text = ' '.join(tokenised_text)\n return cleaned_text",
"def clean_lyrics(song):\n cleaned_lyrics = dict()\n for name, lyrics in song.lyrics.items():\n # Strip trailing punctuation except for question marks.\n lyrics = lyrics.strip(\"。,;:\").strip(',.;:')\n\n # Replace middle punctuation with special blank-space character.\n # The special space character is specified here:\n # https://unicodelookup.com/#%E3%80%80/1\n lyrics = (\n lyrics.replace(\"。\", \" \")\n .replace(\",\", \" \")\n .replace(\";\", \" \")\n .replace(\"、\", \" \")\n .replace(\".\", \" \")\n .replace(\",\", \" \")\n .replace(\";\", \" \")\n .replace(\" \", \" \")\n )\n cleaned_lyrics[name] = lyrics\n song.lyrics.update(cleaned_lyrics)\n return song",
"def _clean(text, remove_stopwords=False):\n text = _remove_between_square_brackets(text)\n text = _replace_contractions(text)\n \n words = nltk.word_tokenize(text)\n words = _remove_non_ascii(words)\n words = _to_lowercase(words)\n words = _remove_punctuation(words)\n words = _replace_numbers(words)\n\n if remove_stopwords:\n words = _remove_stopwords(words)\n\n return ' '.join(words)",
"def clean_text(self, text):\n words = SPLIT_TEXT.findall(text.lower())\n words = self.rm_stop_words(words)\n words = self.stem_words(words)\n return words",
"def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words",
"def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word",
"def clean_text(corpus, model): \n new_doc = []\n doc = model(corpus)\n for word in doc:\n if not word.is_stop and word.is_alpha:\n new_doc.append(word.lemma_.lower()) \n final = \", \".join(map(str,new_doc)) \n return final",
"def purge_words(self):\n\n\t\tword_list = self.transcript_string.encode('utf-8').split()\n\t\tpurged_word_list = {}\n\t\tfor word in word_list:\n\t\t\tif word.isalpha():\n\t\t\t\tif word.islower():\n\t\t\t\t\tpurged_word_list.setdefault(word, []).append(word)\n\t\t\t\telse:\n\t\t\t\t\tlower_word = word.lower()\n\t\t\t\t\tpurged_word_list.setdefault(lower_word, []).append(word) \n\t\t\telse:\n\t\t\t\tcontinue \n\t\t\n\t\tself.word_list = purged_word_list",
"def sanitize(text, stop_word_list):\n\n # convert the text into Unicode\n text = unicode(text)\n\n #print(type(text))\n\n # replace dot with space\n text = text.translate({ord(\".\"): ord(\" \")})\n # replace dash with space\n text = text.translate({ord(\"-\"): ord(\" \")})\n\n # split the text on white-space\n words = text.split()\n sanitized_words = []\n for w in words:\n\n # ignore numbers\n if w.isnumeric():\n continue\n\n # print(\"Word (Before Punctuation): \" + w)\n\n # remove punctuation\n # Ref: https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python\n # w = w.translate(None, string.punctuation)\n\n # The above method does not work for Unicode strings\n # Ref: https://stackoverflow.com/questions/23175809/typeerror-translate-takes-one-argument-2-given-python#23306505\n # print(type(w))\n\n # replace punctuations with None\n w = w.translate({ord(c): None for c in string.punctuation})\n w = w.lower()\n # print(\"Word (After Punctuation): \"+w)\n\n # Note: Remove stop-words before Stemming, or else the stop-word\n # matching will not work.\n # If the word is in Stop Word List\n try:\n i = stop_word_list.index(w.lower())\n # skip further processing of word loop\n # print(\"Stop Word Removed: \"+w)\n continue\n except ValueError:\n pass\n\n w = stemm_word(w)\n\n # hack, hack, hack\n if w == '':\n continue\n\n # add the sanitized word into return list\n sanitized_words.append(w)\n\n return sanitized_words",
"def sanitize_text(tokens, stopwords=None):\n\n tokens = [x.lower() for x in tokens]\n regex = re.compile('[^a-z]')\n\n for index in range(len(tokens)):\n tokens[index] = regex.sub('', tokens[index])\n if stopwords and tokens[index] in stopwords:\n tokens[index] = ''\n\n # remove empty elements\n tokens = [token for token in tokens if token != '']\n return tokens",
"def basic_clean(text):\n wnl = nltk.stem.WordNetLemmatizer()\n stopwords = stopwords.words('english') + ADDITIONAL_STOPWORDS\n text = (unicodedata.normalize('NFKD', text)\n .encode('ascii', 'ignore')\n .decode('utf-8', 'ignore')\n .lower())\n words = re.sub(r'[^\\w\\s]', '', text).split()\n return [wnl.lemmatize(word) for word in words if word not in stopwords]"
]
| [
"0.7149533",
"0.711997",
"0.71127933",
"0.70084614",
"0.7008055",
"0.6964465",
"0.69019014",
"0.6889795",
"0.6886759",
"0.6817508",
"0.680712",
"0.680712",
"0.680712",
"0.680712",
"0.680712",
"0.680712",
"0.6803836",
"0.6785484",
"0.6744621",
"0.6723114",
"0.6663751",
"0.66411984",
"0.6639854",
"0.66014177",
"0.6597167",
"0.6553143",
"0.64598185",
"0.64476824",
"0.6441657",
"0.64192"
]
| 0.80085784 | 0 |
Function to download the nltk stopwords, necessary for downloading them in deployed streamlit. | def download_nltk():
nltk.download('stopwords')
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_stopwords():\n\n try:\n stopwords = nltk.corpus.stopwords.words('english')\n except LookupError:\n nltk.download('stopwords')\n stopwords = nltk.corpus.stopwords.words('english')\n\n return stopwords",
"def getStopWords():\n import os\n cur_dir = os.getcwd()\n\n dk_addition = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','danish_stopwords.txt'), encoding=\"utf-8\")] # danish stopword list\n\n customer_specific_words = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_lda_customer_specific.txt'), encoding=\"utf-8\")] # customer specific\n dk_addition.extend(customer_specific_words)\n\n stopwords_1gram = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_1gram.txt'), encoding=\"utf-8\")] # stopwords 1grams\n dk_addition.extend(stopwords_1gram)\n\n stopwords_2gram = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_2gram.txt'), encoding=\"utf-8\")] # stopwords 2grams\n dk_addition.extend(stopwords_2gram)\n \n stopwords_3gram = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_3gram.txt'), encoding=\"utf-8\")] # stopwords 3grams\n dk_addition.extend(stopwords_3gram)\n \n # nltk\n stopwords = nltk.corpus.stopwords.words('danish')\n stopwords.extend(dk_addition)\n stopwords = list(set(stopwords))\n return stopwords",
"def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\tif load_stopwords():\r\n\t\treturn\r\n\r\n\tglobal stopwords\r\n\tstopwords = nltk.corpus.stopwords.words('english')\r\n\tfor f in os.listdir(paths.path_data_stopwords):\r\n\t\tpath_stopwords = paths.path_data_stopwords + '/' + f\r\n\t\twith open(path_stopwords,'r') as f:\r\n\t\t\tfor l in f:\r\n\t\t\t\tw = l.strip()\r\n\t\t\t\tw = re.sub(r\"[\\x80-\\xff]\",\" \",w)\r\n\t\t\t\tif (w not in stopwords):\r\n\t\t\t\t\tstopwords.append(w)\r\n\t\r\n\t# wip improve with POS and remove numbers\r\n\twith open(paths.path_data_stopwords_txt,'w') as outf:\r\n\t\toutf.write('\\n'.join(stopwords))\r\n\t\r\n\tprint('\\nstopword count : ' + str(len(stopwords)))",
"def test_get_stopwords():\n stopwords = sd.get_stopwords(\"resources/stopwords.de.json\")\n assert len(stopwords) > 0",
"def pretrained(name=\"stopwords_en\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(StopWordsCleaner, name, lang, remote_loc)",
"def stopwords(self):\n with open(STOPWORDS_LIST, 'r') as content:\n return content.read().splitlines()",
"def load_stopwords():\r\n\tglobal stopwords\r\n\tif os.path.exists(paths.path_data_stopwords_txt):\r\n\t\tprint('\\nloading stopwords')\r\n\t\twith open(paths.path_data_stopwords_txt,'r') as inf:\r\n\t\t\tstopwords = inf.read().split('\\n')\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def _customize_stopwords(self, nlp: Language, language: AnyStr) -> None:\n try:\n stopwords_file_path = os.path.join(self.stopwords_folder_path, f\"{language}.txt\")\n with open(stopwords_file_path) as f:\n custom_stopwords = set(f.read().splitlines())\n for word in custom_stopwords:\n nlp.vocab[word].is_stop = True\n nlp.vocab[word.capitalize()].is_stop = True\n nlp.vocab[word.upper()].is_stop = True\n for word in nlp.Defaults.stop_words:\n if word.lower() not in custom_stopwords:\n nlp.vocab[word].is_stop = False\n nlp.vocab[word.capitalize()].is_stop = False\n nlp.vocab[word.upper()].is_stop = False\n nlp.Defaults.stop_words = custom_stopwords\n except (ValueError, OSError) as e:\n raise TokenizationError(f\"Stopword file for language '{language}' not available because of error: '{e}'\")",
"def stop_words():\n return get_stop_words('es') + get_stop_words('ca') + get_stop_words('en')",
"def load_stop_words():\n with open('../data/stop_words.txt', 'r') as stop_words_file:\n return stop_words_file.read().split()",
"def __init__(self,dir_stopwords):\n \n arc = open(dir_stopwords, \"r\", encoding='utf-8')\n self.stp_wrds = [line.strip() for line in arc]\n arc.close()",
"def getStopWords(spacy_model):\r\n # for languages available go to: https://github.com/stopwords-iso\r\n s_words = stopwords.stopwords('en')\r\n\r\n analyzer = partial(rawAnalyzer, spacy_model, [])\r\n return seq(s_words).flat_map(analyzer).to_list()",
"def _get_stopwords():\n all_stopwords = many_stop_words.get_stop_words('ru')\n all_stopwords.update(many_stop_words.get_stop_words('en'))\n\n more_stopwords = set(stopwords.words(['russian', 'english']))\n all_stopwords.update(more_stopwords)\n\n return all_stopwords",
"def loadDefaultStopWords(language=\"english\"):\n from pyspark.ml.wrapper import _jvm\n stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover\n return list(stopWordsObj.loadDefaultStopWords(language))",
"def loadDefaultStopWords(language=\"english\"):\n from pyspark.ml.wrapper import _jvm\n stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover\n return list(stopWordsObj.loadDefaultStopWords(language))",
"def remove_stopwords(words):\n stopwords = nltk.corpus.stopwords.words('english')\n return [w for w in words if w not in stopwords]",
"def _stopwords():\n global _stopword_set\n if _stopword_set:\n return _stopword_set\n f_name = \"stopword.list\"\n if os.path.isfile(f_name):\n res = set()\n with open(f_name) as f:\n for line in f:\n res.add(line.strip())\n _stopword_set = res\n return res\n else:\n error(\"stop words - not a file: %s\" % f_name)",
"def load_stop_words() -> list:\r\n with open(f'{ENGINE}/stop_words.txt', 'r') as i:\r\n stop_words = i.read().splitlines()\r\n stop_words = list(map(lambda x: x.upper(), stop_words)) # Force all stop words to UPPER case.\r\n return stop_words",
"def remove_stopwords(words):\n new_words = []\n for word in words:\n # print(word)\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words",
"def construct_stop_words():\n stop_words_list = [\"uk\", \"ceo\", \"apple\", \"wal\", \"st\", \"q1\", \"q2\", \"q3\", \"q4\",\n \"bp\", \"wednesday\", \"tuesday\", \"monday\", \"thursday\", \"friday\", \"sept\", \"johnson\", \"inc\",\n \"david\", \"amazon.com\"]\n\n for words in stop_words_list:\n STOP_WORDS.add(words)\n\n return STOP_WORDS",
"def getstopwords():\n file = open('stopWords.txt', 'r')\n stoplist = []\n for word in file.readlines():\n word = word.strip('\\n')\n stoplist.append(word)\n return stoplist",
"def query_stopwords():\n if len(flask.request.args) == 0:\n # default response when no arguments are given\n return flask.jsonify({'stopwords': []})\n\n feature = flask.request.args.get('feature', 'lemmata')\n list_size = flask.request.args.get('list_size', 10)\n try:\n list_size = int(list_size)\n except ValueError:\n return apitess.errors.error(\n 400,\n data={k: v\n for k, v in flask.request.args.items()},\n message='\"list_size\" must be an integer')\n\n # language takes precedence over works\n language = flask.request.args.get('language', None)\n if language:\n stopword_indices = create_stoplist(flask.g.db, list_size, feature,\n language)\n if len(stopword_indices) == 0:\n return apitess.errors.error(\n 400,\n data={k: v\n for k, v in flask.request.args.items()},\n message='No stopwords found for feature \"{}\" in language \"{}\".'\n .format(feature, language))\n return flask.jsonify({\n 'stopwords':\n get_stoplist_tokens(flask.g.db, stopword_indices, feature,\n language)\n })\n\n works = flask.request.args.get('works', None)\n if works:\n oids, fails = apitess.utils.parse_works_arg(works)\n if fails:\n return apitess.errors.bad_object_ids(fails, flask.request.args)\n text_results = flask.g.db.find(tesserae.db.entities.Text.collection,\n _id=oids)\n if len(text_results) != len(oids):\n # figure out which works were not found in the database and report\n found = {str(r.id) for r in text_results}\n not_found = []\n for obj_id in oids:\n if obj_id not in found:\n not_found.append(obj_id)\n return apitess.errors.error(\n 400,\n data={k: v\n for k, v in flask.request.args.items()},\n message=('The following works could not be found '\n f'in the database: {not_found}'))\n stopword_indices = create_stoplist(\n flask.g.db,\n list_size,\n feature,\n text_results[0].language,\n basis=[str(t.id) for t in text_results])\n return flask.jsonify({\n 'stopwords':\n get_stoplist_tokens(flask.g.db, stopword_indices, feature,\n language)\n })\n\n # if we get here, then we didn't get enough information\n return apitess.errors.error(\n 400,\n data={k: v\n for k, v in flask.request.args.items()},\n message=(\n 'Insufficient information was given to calculate a stopwords '\n 'list (Perhaps you forgot to specify \"language\" or \"works\").'))",
"def check_fetch_uci_words():\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'\n url += 'bag-of-words/'\n partial_path = get_dataset_dir(\"uci_words\")\n full_path = os.path.join(partial_path, \"uci_words.zip\")\n if not os.path.exists(partial_path):\n os.makedirs(partial_path)\n if not os.path.exists(full_path):\n # Download all 5 vocabularies and zip them into a file\n all_vocabs = ['vocab.enron.txt', 'vocab.kos.txt', 'vocab.nips.txt',\n 'vocab.nytimes.txt', 'vocab.pubmed.txt']\n for vocab in all_vocabs:\n dl_url = url + vocab\n download(dl_url, os.path.join(partial_path, vocab),\n progress_update_percentage=1)\n\n def zipdir(path, zipf):\n # zipf is zipfile handle\n for root, dirs, files in os.walk(path):\n for f in files:\n if \"vocab\" in f:\n zipf.write(os.path.join(root, f))\n\n zipf = zipfile.ZipFile(full_path, 'w')\n zipdir(partial_path, zipf)\n zipf.close()\n return full_path",
"def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)",
"def remove_stopwords(dataset_path: str) -> str:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n if \"train\" in dataset_path:\n dtypes[\"target\"] = int\n\n def _rm_stopwords(tokens: List[str]):\n return [w for w in tokens\n if w not in nltk.corpus.stopwords.words('english')]\n\n new_path = _make_new_filepath(dataset_path, \"nostopwords\")\n df = pd.read_csv(\n f\"/data/{dataset_path}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df[\"tokens\"] = df[\"tokens\"].apply(_rm_stopwords)\n df.to_csv(f\"/data/{new_path}\")\n return new_path",
"def remove_stopwords(words):\r\n new_words = []\r\n for word in words:\r\n if word not in stopwords.words('english'):\r\n new_words.append(word)\r\n return new_words",
"def removeOwnStopWords(self, sort=True, lc=False):\n\t\tself.textFile = self.removeStopWords(text=self.textFile, sort=sort, lc=lc)",
"def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in nltk.corpus.stopwords.words('french'):\n new_words.append(word)\n return new_words",
"def remove_stop_words(tweet):\n tokens_without_sw = \"\"\n for word in tweet.split():\n if not word.lower() in STOPWORDS:\n tokens_without_sw += word.lower() + \" \"\n return tokens_without_sw",
"def make_stopwords(filepath='stopwords.txt'):\n sw = open(filepath, \"r\")\n my_stopwords = sw.read()\n my_stopwords = my_stopwords.split(\", \")\n sw.close()\n\n all_stopwords = stopwords.words('english')\n all_stopwords.extend(my_stopwords)\n return all_stopwords"
]
| [
"0.7056914",
"0.672676",
"0.65633",
"0.64674133",
"0.64260197",
"0.60864633",
"0.60857505",
"0.6040219",
"0.5840577",
"0.5820878",
"0.5693822",
"0.5691857",
"0.5678198",
"0.567216",
"0.567216",
"0.5590431",
"0.5583685",
"0.55802655",
"0.5579682",
"0.5562735",
"0.5561147",
"0.5520541",
"0.5496522",
"0.5492152",
"0.5478263",
"0.54555327",
"0.54437995",
"0.5440717",
"0.5438917",
"0.54299587"
]
| 0.8456259 | 0 |
Function to create a playlist on the Spotify account of the authenticated user. | def create_playlist(user_id, sp, recommendations, name, description):
# Get current user ID
current_user = sp.current_user()
current_user_id = current_user['id']
# Get list of track ID's
track_id_list = list(recommendations['id'].values)
# Create Empty playlist
sp.user_playlist_create(user = user_id,
name = name,
description = description)
# Get playlist ID
playlists = sp.current_user_playlists(limit=1)
playlist_name = playlists['items'][0]['name']
playlist_id = playlists['items'][0]['id']
# Add tracks to playlist
sp.user_playlist_add_tracks(user = current_user_id,
playlist_id = playlist_id,
tracks = track_id_list)
# Check if playlist is succesfully created.
if name == playlist_name:
return '**Playlist was succesfully created on your Spotify account.**'
else:
return '**Playlist was not succesfully created.**' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]",
"def create_playlist(self, request):\n # TODO: Max amount of playlists at 20 for a user\n user = Account.find_by_id(request.userid)\n if user is None:\n print \"User not found\" \n return PlaylistResponse(errmsg=\"User ID not found\")\n new_pl = Playlist.add_new_playlist(user.key, request.name)\n return PlaylistResponse(pid=new_pl.key.id())",
"def spotify_create_playlist(\n playlist_name: str,\n access_token: str,\n user_spotify_id: str,\n public: bool = True,\n description: str = None,\n) -> str:\n headers = {\n \"Authorization\": \"Bearer {}\".format(access_token),\n \"Content-Type\": \"application/json\",\n }\n body = {\"name\": playlist_name, \"public\": public}\n if description is not None:\n body[\"description\"] = description\n response = requests.post(\n \"https://api.spotify.com/v1/users/{}/playlists\".format(user_spotify_id),\n headers=headers,\n json=body,\n )\n if response.status_code != 200 and response.status_code != 201:\n return \"Error {}\".format(response.text)\n return response.json()[\"id\"]",
"def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')",
"def createspotifyplaylist(accesstoken, name, playlists, tracklist, userid):\n\n # find a unique name for the playlist\n playlistname = \"{} - flowed\".format(name)\n if playlistname in playlists:\n num = 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n while playlistname in playlists:\n num = num + 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n\n # create playlist\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"name\"] = playlistname\n\n url = \"https://api.spotify.com/v1/users/{}/playlists\".format(userid)\n\n r = requests.post(url, headers=headers, json=payload)\n\n response = r.json()\n\n\n if \"collaborative\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(url, headers=headers, json=payload)\n response = r.json()\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n elif \"collaborative\" in response:\n break\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n else: \n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n\n playlistid = response[\"id\"]\n playlisturl = response[\"external_urls\"][\"spotify\"]\n\n # add tracks to playlist\n while len(tracklist) > 100:\n\n # add first 100\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist[:100]\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print(\"error: problem adding songs to playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem adding songs to playlist\")\n print(\"no error response\")\n return(False)\n\n tracklist = tracklist[100:]\n\n if tracklist:\n\n # add the remainder of the tracks\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n response = r.json()\n if \"snapshot_id\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n\n return(playlistname, playlisturl)",
"def create_playlist(self, playlist_name: str, song_ids: List[str]) -> str:\n user = self.init_user()\n user_id = user.me()['id']\n playlist_data = user.user_playlist_create(\n user=user_id, name=playlist_name, public=True)\n user.playlist_add_items(playlist_data['id'], song_ids)\n playlist_link = playlist_data['external_urls']['spotify']\n return playlist_link",
"def create_playlist(self):\n playlist=self.sp.user_playlist_create(user=self.username,name=self.nameOfPlaylist,description=self.description)\n return playlist['id']",
"def playlist_create(self, user_id: str, name: str, public: bool = True,\n description: str = ''):\n payload = {\n 'name': name,\n 'public': public,\n 'description': description\n }\n return self._post(f'users/{user_id}/playlists', payload=payload)",
"def create_playlist(self, name):\n\n user_id = self.get_current_user()\n endpoint = f\"/users/{user_id}/playlists\"\n headers = self.headers\n headers.update()\n response = self._send(\n endpoint,\n \"POST\",\n extra_headers={\"Content-Type\": \"application/json\"},\n data=json.dumps({\"name\": name, \"public\": False})\n )\n playlist_id = response.json()[\"id\"]\n return playlist_id",
"def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")",
"def create_playlist(self, data):\n pass",
"def user_playlist_create(self, user, name, public=True, description=\"\", **kwargs):\n # pylint: disable=no-member\n data = {\"name\": name, \"public\": public, \"description\": description}\n return self._post(\n API.PLAYLISTS.value.format(user_id=user), payload=data, **kwargs\n )",
"def user_playlist_create(self, user, name, public=True, collaborative=False):\n url = '/users/{user_id}/playlists'\n uid = get_id('user', user)\n body = dict(name=name, public=public, collaborative=collaborative)\n return self._post(url.format(user_id=uid), payload=body)",
"def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)",
"def add_to_playlist(track_ids, playlist_name):\n \n playlist_id = find_playlist(playlist_name)\n \n spotifyObject.user_playlist_add_tracks(config.USERNAME, playlist_id,\n track_ids, position=None)",
"def add_tracks():\n sp = credentials()\n tracks = spotify_tracklist()\n playlist_id = grab_playlist()\n sp.user_playlist_add_tracks('truetiming', playlist_id, tracks)",
"def create_playlist(self, playlist_name):\n if playlist_name.upper() in self.playlist.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist[playlist_name.upper()] = []\n self.playlist_list.append(playlist_name)\n print(f\"Successfully created new playlist: {playlist_name}\")",
"async def add_playlist(\n self, user: discord.User, url: str\n ) -> Optional[UserPlaylist]:\n\n playlist = await get_playlist(self.spotify, self.youtube, url)\n\n if not playlist:\n return\n\n generated_id = str(uuid.uuid4())\n await self.database.insertifnotexists(\n self.tables[\"playlists\"],\n {\"user\": user.id, \"playlist_url\": url, \"id\": generated_id},\n {\"user\": user.id, \"playlist_url\": url},\n )\n\n return UserPlaylist(self, user, generated_id, playlist)",
"def create_playlist(self, playlist_name):\n #self._video_playlist.name=playlist_name\n #self._video_playlist.caseless=playlist_name.lower()\n #print(f\"Successfully created new playlist: {self._video_playlist.name}\")\n if playlist_name.lower() not in self.playlists:\n self.playlists[playlist_name.lower()]=[]\n print(\"Successfully created new playlist: {0}\".format(playlist_name))\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")",
"def create_playlist(self, playlist_name):\n if playlist_name.lower() in self._playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n print(f\"Successfully created new playlist: {playlist_name}\")\n self._playlists[playlist_name.lower()] = Playlist(playlist_name)",
"async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))",
"def create_playlist(self, playlist_name):\n if playlist_name.lower() in self.playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist_names[playlist_name.lower()] = playlist_name\n self.playlists[playlist_name.lower()] = []\n print(\"Successfully created new playlist:\", playlist_name)",
"def create_playlist(self, playlist_name):\n playlist_name = Playlist()\n if self != playlist_name:\n print(f\"successfully created new playlist: {playlist_name}\")\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")",
"def create_playlist(self, playlist_name):\n new_playlist_id = playlist_name.lower()\n if new_playlist_id in self.playlists.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n\n new_playlist = Playlist(playlist_name)\n self.playlists[new_playlist_id] = new_playlist\n print(f\"Successfully created new playlist: {playlist_name}\")",
"def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))",
"def create_playlist(self, title, description=\"\"):\n if self.youtube is None:\n self.youtube = __get_client()\n # This code creates a new, private playlist in the authorized user's\n # channel.\n playlists_insert_response = self.youtube.playlists().insert(\n part=\"snippet,status\",\n body = {\n \"snippet\": {\n \"title\": title,\n \"description\": description\n },\n \"status\": {\n \"privacyStatus\": \"private\"\n }\n }\n ).execute()\n return playlists_insert_response",
"def create_playlist(self, playlist_name):\n for playlist in self.playlists.keys():\n if playlist_name.upper() == playlist.upper():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n break\n else:\n self.playlists[playlist_name]=[]\n print(\"Successfully created new playlist: \" + playlist_name)\n # print(\"create_playlist needs implementation\")",
"def get_spotify_token(self):\n scope = \"playlist-modify-public playlist-modify-private user-read-email user-library-modify playlist-read-private\"\n token = spotipy.util.prompt_for_user_token(\n username=self.username,\n scope=scope,\n client_id=secrets.client_id,\n client_secret=secrets.client_secret,\n redirect_uri=secrets.redirect_uri\n )\n sp = spotipy.Spotify(auth=token)\n return sp",
"def obj_create(self, bundle, request=None, **kwargs):\n return super(PlaylistResource, self).obj_create(bundle, request, user=request.user)",
"def add_to_playlist(self, playlist_uri=\"spotify:playlist:3VUBchphbcLwE5WdqBW3gv\", user=\"robbo1992\"):\n if playlist_uri is None or self.song_uri is None:\n log.warn(\"Object attributes are None, cannot add to playlist.\")\n return\n else:\n log.debug(\"Adding song %s to playlist.\" %str(self.song_uri))\n results = self.sp.user_playlist_add_tracks(user, playlist_uri, [self.song_uri])\n log.debug(\"Adding to playlist results: %s\" % results)"
]
| [
"0.8058813",
"0.80005246",
"0.7989324",
"0.79241043",
"0.7740644",
"0.77238566",
"0.7633254",
"0.7545742",
"0.7387511",
"0.73627365",
"0.72458297",
"0.72355944",
"0.70558184",
"0.6973023",
"0.6844902",
"0.68256104",
"0.676774",
"0.6748794",
"0.67386353",
"0.6737213",
"0.6714734",
"0.6712567",
"0.67062396",
"0.66823715",
"0.6646705",
"0.6625129",
"0.6564914",
"0.65467966",
"0.6538672",
"0.6499691"
]
| 0.80740494 | 0 |
Gets the policy_filter of this OrganizationPolicyAssignmentResponse. | def policy_filter(self):
return self._policy_filter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def policy(self) -> HwPolicy:\n return self._policy",
"def policy(self) -> typing.Optional[\"BucketPolicy\"]:\n return jsii.get(self, \"policy\")",
"def policy(self) -> typing.Optional[\"BucketPolicy\"]:\n return jsii.get(self, \"policy\")",
"def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Output['outputs.ServicePolicy']:\n return pulumi.get(self, \"policy\")",
"def permission_policy(self):\n return self._permission_policy",
"def get_policy(self):\n\n return",
"def policy(self) -> Optional[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")",
"def policy_parameters(self) -> Optional['outputs.PolicyParametersResponse']:\n return pulumi.get(self, \"policy_parameters\")",
"def policy(self) -> Optional[pulumi.Input['ServicePolicyArgs']]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[pulumi.Input['ServicePolicyArgs']]:\n return pulumi.get(self, \"policy\")",
"def policy(cls):\n return relationship.many_to_one(cls, 'policy')",
"def policy_document(self) -> aws_cdk.aws_iam.PolicyDocument:\n return jsii.get(self, \"policyDocument\")",
"def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:\n return response",
"def policy_info(self) -> 'outputs.PolicyInfoResponse':\n return pulumi.get(self, \"policy_info\")",
"def policies(self):\n return self._policies",
"def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")",
"def policy_data(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy_data\")",
"def get_policy(self):\n return self.agent.get_policy()",
"def policies(self):\n return self._data.get('policies')",
"def permission_policies(self) -> pulumi.Output[Optional[Sequence['outputs.AccessConfigurationPermissionPolicy']]]:\n return pulumi.get(self, \"permission_policies\")",
"def policy_assignment_properties(self) -> pulumi.Output['outputs.PolicyAssignmentPropertiesResponse']:\n return pulumi.get(self, \"policy_assignment_properties\")",
"def policy_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"policy_uri\")",
"def policy_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"policy_id\")"
]
| [
"0.64426523",
"0.63347477",
"0.63347477",
"0.6254262",
"0.6254262",
"0.6254262",
"0.6232846",
"0.6154645",
"0.6054802",
"0.6038672",
"0.5961586",
"0.5961586",
"0.5961586",
"0.59271795",
"0.5914966",
"0.5914966",
"0.59142625",
"0.5909025",
"0.59050524",
"0.58475024",
"0.58427197",
"0.5807814",
"0.5807814",
"0.5792533",
"0.57515126",
"0.57444024",
"0.5736227",
"0.56992155",
"0.5653607",
"0.56527936"
]
| 0.7547869 | 0 |
Sets the policy_filter of this OrganizationPolicyAssignmentResponse. | def policy_filter(self, policy_filter):
self._policy_filter = policy_filter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setPolicy(self, value):\n return self._set(policy=value)",
"def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:\n return response",
"def policy_id(self, policy_id):\n\n self._policy_id = policy_id",
"def policy_id(self, policy_id):\n\n self._policy_id = policy_id",
"def policy_filter(self):\n return self._policy_filter",
"def policies(self, policies):\n\n self._policies = policies",
"def filter(self, filter):\n self._filter = filter",
"def policyid(self, policyid):\n self._policyid = policyid",
"def permission_policy(self, permission_policy):\n allowed_values = [\"PERMISSION_POLICY_PROMPT\", \"PERMISSION_POLICY_AUTO_GRANT\", \"PERMISSION_POLICY_AUTO_DENY\"]\n if permission_policy not in allowed_values:\n raise ValueError(\n \"Invalid value for `permission_policy` ({0}), must be one of {1}\"\n .format(permission_policy, allowed_values)\n )\n\n self._permission_policy = permission_policy",
"def set_policy (self, policy = None, args = (), policy_cleanup = None):\n if policy == self.policy:\n # same policy; might want to change args/cleanup function, though\n self._policy_args = args\n if policy is not None and not isinstance(policy, basestring):\n self._policy_cleanup = policy_cleanup\n return\n # perform cleanup for current policy, if any\n if isinstance(self.policy, basestring):\n # built-in\n try:\n POLICY_CLEANUP[self.policy](self)\n except AttributeError:\n pass\n elif self.policy is not None and self._policy_cleanup is not None:\n # custom\n self._policy_cleanup(self)\n del self._policy_cleanup\n # set new policy\n self.policy = policy\n if policy is None:\n # if disabling scrolling, clean up some attributes we won't need\n try:\n del self._scroll_fn, self._policy_args\n except AttributeError:\n pass\n else:\n self._policy_args = args if args else ()\n if isinstance(policy, basestring):\n # built-in\n self._scroll_fn = POLICY_SCROLL[policy]\n else:\n # custom\n self._scroll_fn = policy\n self._policy_cleanup = policy_cleanup",
"def policy_name(self, policy_name):\n\n self._policy_name = policy_name",
"def policy_name(self, policy_name):\n\n self._policy_name = policy_name",
"def set_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"set_iam_policy\" not in self._stubs:\n self._stubs[\"set_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/SetIamPolicy\",\n request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"set_iam_policy\"]",
"def set_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"set_iam_policy\" not in self._stubs:\n self._stubs[\"set_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/SetIamPolicy\",\n request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"set_iam_policy\"]",
"def set_actor_policy(self, actor_policy):\n raise NotImplementedError",
"def set_policy(self, name, policy):\n client = self.connect(VAULT_TOKEN)\n client.set_policy(name, policy)",
"def policy_prefix(self, policy_prefix):\n\n self._policy_prefix = policy_prefix",
"def set_target_policy(self, policy):\n self.target_policy = policy",
"def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)",
"def phone_policy(self, phone_policy):\n\n self._phone_policy = phone_policy",
"def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:\n return response",
"def set_policyname(self, policyname):\n self.options['policyname'] = policyname",
"def enable_alert_policies(\n self,\n project_id: str = PROVIDE_PROJECT_ID,\n filter_: str | None = None,\n retry: Retry | _MethodDefault = DEFAULT,\n timeout: float | None = None,\n metadata: Sequence[tuple[str, str]] = (),\n ) -> None:\n self._toggle_policy_status(\n new_state=True,\n project_id=project_id,\n filter_=filter_,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )",
"def set_policyname(self, policyname):\n self.options[\"policyname\"] = policyname",
"def policy_types(self, policy_types):\n\n self._policy_types = policy_types",
"def __assign_policy_def(self):\n\n self.logger.info(\n f\"Creating policy assignment of definition {self.policy_id} to assignment {self.assignment_id}\"\n )\n policy_assignment_res = self.interactor.put_policy_assignment(\n self.policy_id, self.assignment_id\n )\n\n if policy_assignment_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy assignment {self.assignment_id} could not be created - {policy_assignment_res.status_code}: {policy_assignment_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True",
"def update_Policy(self,inputpolicy):\n \n policyob = self.SD_Map.retrieve_ob(inputpolicy)\n policyob.values[-1] = self.PolicyDicts[inputpolicy][self.translate(self.policy_option_vars[inputpolicy].get(),\n input_language = self.language,\n output_language = 'english')]",
"def put_group_policy(self, group_name, policy_name, policy_json):\r\n params = {'GroupName' : group_name,\r\n 'PolicyName' : policy_name,\r\n 'PolicyDocument' : policy_json}\r\n return self.get_response('PutGroupPolicy', params, verb='POST')",
"def setAcceptPolicy(self, policy):\n if not self.__loaded:\n self.__load()\n \n if policy > self.AcceptMax:\n return\n if policy == self.__acceptCookies:\n return\n \n self.__acceptCookies = policy\n self.__saveTimer.changeOccurred()",
"def UpdatePolicy(self, request, global_params=None):\n config = self.GetMethodConfig('UpdatePolicy')\n return self._RunMethod(\n config, request, global_params=global_params)"
]
| [
"0.6124238",
"0.6082827",
"0.559081",
"0.559081",
"0.5558075",
"0.55172706",
"0.5390123",
"0.5359806",
"0.53372866",
"0.52970994",
"0.51797837",
"0.51797837",
"0.50907636",
"0.50907636",
"0.5052488",
"0.50252205",
"0.5023048",
"0.48849055",
"0.48635036",
"0.48629102",
"0.48042977",
"0.47763312",
"0.4775756",
"0.47535124",
"0.47365415",
"0.47346237",
"0.47059637",
"0.46827653",
"0.4675648",
"0.46712726"
]
| 0.76615846 | 0 |
Converts a quaternion frame into an Euler frame | def get_euler_frame(quaternionion_frame):
euler_frame = list(quaternionion_frame[:3])
for quaternion in gen_4_tuples(quaternionion_frame[3:]):
euler_frame += quaternion_to_euler(quaternion)
return euler_frame | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_quaternion_to_euler(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)",
"def convert_quaternion_frames_to_euler_frames(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)",
"def convert_quaternion_to_euler_frames(self, skeleton, quat_frames):\n joint_names = self.skeleton.get_joint_names()\n n_frames = len(quat_frames)\n n_params = sum([len(skeleton.nodes[j].channels) for j in joint_names])\n euler_frames = np.zeros((n_frames, n_params))\n for frame_idx, quat_frame in enumerate(quat_frames):\n euler_frames[frame_idx,:TRANSLATION_LEN] = quat_frame[:TRANSLATION_LEN]\n src = TRANSLATION_LEN\n dst = 0 # the translation offset will be added\n for joint_name in joint_names:\n channels = skeleton.nodes[joint_name].channels\n n_channels = len(channels)\n rotation_order = []\n rotation_offset = None\n for idx, ch in enumerate(channels):\n if ch.lower().endswith(\"rotation\"):\n rotation_order.append(ch)\n if rotation_offset is None:\n rotation_offset = idx\n\n q = quat_frame[src:src+QUAT_LEN]\n e = quaternion_to_euler(q, rotation_order)\n params_start = dst + rotation_offset\n params_end = params_start + EULER_LEN\n euler_frames[frame_idx, params_start:params_end] = e\n dst += n_channels\n src += QUAT_LEN\n return euler_frames",
"def convert_euler_to_quaternion_frame(bvh_reader, e_frame, filter_values=True, animated_joints=None):\n if animated_joints is None:\n animated_joints = list(bvh_reader.node_names.keys())\n quat_frame = collections.OrderedDict()\n for node_name in animated_joints:\n if bvh_reader.get_node_channels(node_name) is not None:\n angles, order = bvh_reader.get_node_angles(node_name, e_frame)\n quat_frame[node_name] = euler_to_quaternion(angles, order, filter_values)\n return quat_frame",
"def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)",
"def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw",
"def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return (roll,pitch,yaw)",
"def quaternion_to_euler(q):\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw",
"def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw",
"def convert_quaternion_to_euler_frames_skipping_fixed_joints(self, frame_data, is_quaternion=False):\n skip_joints = not self.skeleton.is_motion_vector_complete(frame_data, is_quaternion)\n if not is_quaternion:\n if not skip_joints:\n euler_frames = frame_data\n else:\n euler_frames = []\n for frame in frame_data:\n euler_frame = self._get_euler_frame_from_partial_euler_frame(frame, skip_joints)\n euler_frames.append(euler_frame)\n else:\n # check whether or not \"Bip\" frames should be ignored\n euler_frames = []\n for frame in frame_data:\n if skip_joints:\n euler_frame = self._get_euler_frame_from_partial_quaternion_frame(frame)\n else:\n euler_frame = self._get_euler_frame_from_quaternion_frame(frame)\n # print len(euler_frame), euler_frame\n euler_frames.append(euler_frame)\n return euler_frames",
"def convert_euler_to_quaternion(roll, yaw, pitch):\n\n # roll (z), yaw (y), pitch (x)\n\n cy = math.cos(math.radians(roll) * 0.5)\n sy = math.sin(math.radians(roll) * 0.5)\n\n cp = math.cos(math.radians(yaw) * 0.5)\n sp = math.sin(math.radians(yaw) * 0.5)\n\n cr = math.cos(math.radians(pitch) * 0.5)\n sr = math.sin(math.radians(pitch) * 0.5)\n\n w = cy * cp * cr + sy * sp * sr\n x = cy * cp * sr - sy * sp * cr\n y = sy * cp * sr + cy * sp * cr\n z = sy * cp * cr - cy * sp * sr\n\n quat = np.array([w, x, y, z])\n quat = quat / np.linalg.norm(quat)\n return quat",
"def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _ = _TUPLE2AXES[axes]\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n aj = -aj\n\n ai /= 2.0\n aj /= 2.0\n # print(\"ak : {}\".format(type(ak)))\n ak /= 2.0\n ci = math.cos(ai)\n si = math.sin(ai)\n cj = math.cos(aj)\n sj = math.sin(aj)\n ck = math.cos(ak)\n sk = math.sin(ak)\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n quaternion = np.empty((4, ), dtype=np.float64)\n if repetition:\n quaternion[i] = cj*(cs + sc)\n quaternion[j] = sj*(cc + ss)\n quaternion[k] = sj*(cs - sc)\n quaternion[3] = cj*(cc - ss)\n else:\n quaternion[i] = cj*sc - sj*cs\n quaternion[j] = cj*ss + sj*cc\n quaternion[k] = cj*cs - sj*sc\n quaternion[3] = cj*cc + sj*ss\n if parity:\n quaternion[j] *= -1\n\n return quaternion",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians",
"def euler_to_quat(roll, pitch, yaw):\n pose = Pose()\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose.orientation",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians",
"def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])",
"def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z",
"def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r",
"def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)",
"def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q",
"def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm",
"def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians",
"def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\r\n try:\r\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\r\n except (AttributeError, KeyError):\r\n _TUPLE2AXES[axes] # noqa: validation\r\n firstaxis, parity, repetition, frame = axes\r\n\r\n i = firstaxis + 1\r\n j = _NEXT_AXIS[i+parity-1] + 1\r\n k = _NEXT_AXIS[i-parity] + 1\r\n\r\n if frame:\r\n ai, ak = ak, ai\r\n if parity:\r\n aj = -aj\r\n\r\n ai /= 2.0\r\n aj /= 2.0\r\n ak /= 2.0\r\n ci = math.cos(ai)\r\n si = math.sin(ai)\r\n cj = math.cos(aj)\r\n sj = math.sin(aj)\r\n ck = math.cos(ak)\r\n sk = math.sin(ak)\r\n cc = ci*ck\r\n cs = ci*sk\r\n sc = si*ck\r\n ss = si*sk\r\n\r\n q = numpy.empty((4, ))\r\n if repetition:\r\n q[0] = cj*(cc - ss)\r\n q[i] = cj*(cs + sc)\r\n q[j] = sj*(cc + ss)\r\n q[k] = sj*(cs - sc)\r\n else:\r\n q[0] = cj*cc + sj*ss\r\n q[i] = cj*sc - sj*cs\r\n q[j] = cj*ss + sj*cc\r\n q[k] = cj*cs - sj*sc\r\n if parity:\r\n q[j] *= -1.0\r\n\r\n return q",
"def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)",
"def euler2quaternion(psi, theta, phi):\n if abs(psi) == 0 and abs(theta) == 0 and abs(phi) == 0:\n quaternion = np.array([1., 0., 0., 0.])\n else:\n R = euler2rot3D(psi, theta, phi)\n W = np.array([R[1, 2]-R[2, 1], R[2, 0]-R[0, 2], R[0, 1]-R[1, 0]])\n if W[0] >= 0:\n W /= np.linalg.norm(W)\n else:\n W /= np.linalg.norm(W) * -1\n theta = np.arccos(0.5 * (np.trace(R) - 1))\n CCisTheta = corrCoeff(R, angleAxis2rot3D(W, theta))\n CCisNegTheta = corrCoeff(R, angleAxis2rot3D(W, -theta))\n if CCisNegTheta > CCisTheta:\n theta = -theta\n quaternion = np.array([np.cos(theta/2.), np.sin(theta/2.)*W[0], np.sin(theta/2.)*W[1], np.sin(theta/2.)*W[2]])\n if quaternion[0] < 0:\n quaternion *= -1\n return quaternion",
"def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])",
"def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate",
"def test_quaternion_invert():\n q = np.array([0.58183503, -0.75119889, -0.24622332, 0.19116072])\n q_inv = pr.q_conj(q)\n q_q_inv = pr.concatenate_quaternions(q, q_inv)\n assert_array_almost_equal(pr.q_id, q_q_inv)"
]
| [
"0.7414339",
"0.73146856",
"0.71095103",
"0.7028929",
"0.69413733",
"0.6857715",
"0.68519485",
"0.6828414",
"0.67464375",
"0.65595144",
"0.6482063",
"0.6433578",
"0.641723",
"0.64079833",
"0.6349131",
"0.63428414",
"0.63100946",
"0.62874687",
"0.62510014",
"0.6169634",
"0.61358565",
"0.61259145",
"0.61080706",
"0.6059538",
"0.60146546",
"0.59090924",
"0.5861832",
"0.5848613",
"0.5844599",
"0.58272254"
]
| 0.7801406 | 0 |
Converts a quaternion frame into an Euler frame | def get_euler_frame(quaternionion_frame):
euler_frame = list(quaternionion_frame[:3])
for quaternion in gen_4_tuples(quaternionion_frame[3:]):
euler_frame += quaternion_to_euler(quaternion)
return euler_frame | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_quaternion_to_euler(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)",
"def convert_quaternion_frames_to_euler_frames(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)",
"def convert_quaternion_to_euler_frames(self, skeleton, quat_frames):\n joint_names = self.skeleton.get_joint_names()\n n_frames = len(quat_frames)\n n_params = sum([len(skeleton.nodes[j].channels) for j in joint_names])\n euler_frames = np.zeros((n_frames, n_params))\n for frame_idx, quat_frame in enumerate(quat_frames):\n euler_frames[frame_idx,:TRANSLATION_LEN] = quat_frame[:TRANSLATION_LEN]\n src = TRANSLATION_LEN\n dst = 0 # the translation offset will be added\n for joint_name in joint_names:\n channels = skeleton.nodes[joint_name].channels\n n_channels = len(channels)\n rotation_order = []\n rotation_offset = None\n for idx, ch in enumerate(channels):\n if ch.lower().endswith(\"rotation\"):\n rotation_order.append(ch)\n if rotation_offset is None:\n rotation_offset = idx\n\n q = quat_frame[src:src+QUAT_LEN]\n e = quaternion_to_euler(q, rotation_order)\n params_start = dst + rotation_offset\n params_end = params_start + EULER_LEN\n euler_frames[frame_idx, params_start:params_end] = e\n dst += n_channels\n src += QUAT_LEN\n return euler_frames",
"def convert_euler_to_quaternion_frame(bvh_reader, e_frame, filter_values=True, animated_joints=None):\n if animated_joints is None:\n animated_joints = list(bvh_reader.node_names.keys())\n quat_frame = collections.OrderedDict()\n for node_name in animated_joints:\n if bvh_reader.get_node_channels(node_name) is not None:\n angles, order = bvh_reader.get_node_angles(node_name, e_frame)\n quat_frame[node_name] = euler_to_quaternion(angles, order, filter_values)\n return quat_frame",
"def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)",
"def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw",
"def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return (roll,pitch,yaw)",
"def quaternion_to_euler(q):\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw",
"def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw",
"def convert_quaternion_to_euler_frames_skipping_fixed_joints(self, frame_data, is_quaternion=False):\n skip_joints = not self.skeleton.is_motion_vector_complete(frame_data, is_quaternion)\n if not is_quaternion:\n if not skip_joints:\n euler_frames = frame_data\n else:\n euler_frames = []\n for frame in frame_data:\n euler_frame = self._get_euler_frame_from_partial_euler_frame(frame, skip_joints)\n euler_frames.append(euler_frame)\n else:\n # check whether or not \"Bip\" frames should be ignored\n euler_frames = []\n for frame in frame_data:\n if skip_joints:\n euler_frame = self._get_euler_frame_from_partial_quaternion_frame(frame)\n else:\n euler_frame = self._get_euler_frame_from_quaternion_frame(frame)\n # print len(euler_frame), euler_frame\n euler_frames.append(euler_frame)\n return euler_frames",
"def convert_euler_to_quaternion(roll, yaw, pitch):\n\n # roll (z), yaw (y), pitch (x)\n\n cy = math.cos(math.radians(roll) * 0.5)\n sy = math.sin(math.radians(roll) * 0.5)\n\n cp = math.cos(math.radians(yaw) * 0.5)\n sp = math.sin(math.radians(yaw) * 0.5)\n\n cr = math.cos(math.radians(pitch) * 0.5)\n sr = math.sin(math.radians(pitch) * 0.5)\n\n w = cy * cp * cr + sy * sp * sr\n x = cy * cp * sr - sy * sp * cr\n y = sy * cp * sr + cy * sp * cr\n z = sy * cp * cr - cy * sp * sr\n\n quat = np.array([w, x, y, z])\n quat = quat / np.linalg.norm(quat)\n return quat",
"def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _ = _TUPLE2AXES[axes]\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n aj = -aj\n\n ai /= 2.0\n aj /= 2.0\n # print(\"ak : {}\".format(type(ak)))\n ak /= 2.0\n ci = math.cos(ai)\n si = math.sin(ai)\n cj = math.cos(aj)\n sj = math.sin(aj)\n ck = math.cos(ak)\n sk = math.sin(ak)\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n quaternion = np.empty((4, ), dtype=np.float64)\n if repetition:\n quaternion[i] = cj*(cs + sc)\n quaternion[j] = sj*(cc + ss)\n quaternion[k] = sj*(cs - sc)\n quaternion[3] = cj*(cc - ss)\n else:\n quaternion[i] = cj*sc - sj*cs\n quaternion[j] = cj*ss + sj*cc\n quaternion[k] = cj*cs - sj*sc\n quaternion[3] = cj*cc + sj*ss\n if parity:\n quaternion[j] *= -1\n\n return quaternion",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians",
"def euler_to_quat(roll, pitch, yaw):\n pose = Pose()\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose.orientation",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians",
"def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])",
"def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z",
"def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r",
"def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)",
"def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q",
"def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm",
"def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians",
"def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\r\n try:\r\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\r\n except (AttributeError, KeyError):\r\n _TUPLE2AXES[axes] # noqa: validation\r\n firstaxis, parity, repetition, frame = axes\r\n\r\n i = firstaxis + 1\r\n j = _NEXT_AXIS[i+parity-1] + 1\r\n k = _NEXT_AXIS[i-parity] + 1\r\n\r\n if frame:\r\n ai, ak = ak, ai\r\n if parity:\r\n aj = -aj\r\n\r\n ai /= 2.0\r\n aj /= 2.0\r\n ak /= 2.0\r\n ci = math.cos(ai)\r\n si = math.sin(ai)\r\n cj = math.cos(aj)\r\n sj = math.sin(aj)\r\n ck = math.cos(ak)\r\n sk = math.sin(ak)\r\n cc = ci*ck\r\n cs = ci*sk\r\n sc = si*ck\r\n ss = si*sk\r\n\r\n q = numpy.empty((4, ))\r\n if repetition:\r\n q[0] = cj*(cc - ss)\r\n q[i] = cj*(cs + sc)\r\n q[j] = sj*(cc + ss)\r\n q[k] = sj*(cs - sc)\r\n else:\r\n q[0] = cj*cc + sj*ss\r\n q[i] = cj*sc - sj*cs\r\n q[j] = cj*ss + sj*cc\r\n q[k] = cj*cs - sj*sc\r\n if parity:\r\n q[j] *= -1.0\r\n\r\n return q",
"def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)",
"def euler2quaternion(psi, theta, phi):\n if abs(psi) == 0 and abs(theta) == 0 and abs(phi) == 0:\n quaternion = np.array([1., 0., 0., 0.])\n else:\n R = euler2rot3D(psi, theta, phi)\n W = np.array([R[1, 2]-R[2, 1], R[2, 0]-R[0, 2], R[0, 1]-R[1, 0]])\n if W[0] >= 0:\n W /= np.linalg.norm(W)\n else:\n W /= np.linalg.norm(W) * -1\n theta = np.arccos(0.5 * (np.trace(R) - 1))\n CCisTheta = corrCoeff(R, angleAxis2rot3D(W, theta))\n CCisNegTheta = corrCoeff(R, angleAxis2rot3D(W, -theta))\n if CCisNegTheta > CCisTheta:\n theta = -theta\n quaternion = np.array([np.cos(theta/2.), np.sin(theta/2.)*W[0], np.sin(theta/2.)*W[1], np.sin(theta/2.)*W[2]])\n if quaternion[0] < 0:\n quaternion *= -1\n return quaternion",
"def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])",
"def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate",
"def test_quaternion_invert():\n q = np.array([0.58183503, -0.75119889, -0.24622332, 0.19116072])\n q_inv = pr.q_conj(q)\n q_q_inv = pr.concatenate_quaternions(q, q_inv)\n assert_array_almost_equal(pr.q_id, q_q_inv)"
]
| [
"0.7414339",
"0.73146856",
"0.71095103",
"0.7028929",
"0.69413733",
"0.6857715",
"0.68519485",
"0.6828414",
"0.67464375",
"0.65595144",
"0.6482063",
"0.6433578",
"0.641723",
"0.64079833",
"0.6349131",
"0.63428414",
"0.63100946",
"0.62874687",
"0.62510014",
"0.6169634",
"0.61358565",
"0.61259145",
"0.61080706",
"0.6059538",
"0.60146546",
"0.59090924",
"0.5861832",
"0.5848613",
"0.5844599",
"0.58272254"
]
| 0.7801406 | 1 |
Prepare an InteractiveSession, Open a ReproducibleSession, (optionally) Add a metadata command, Close the ReproducibleSession | def new_case_study(with_metadata_command=0):
isess = InteractiveSession(DBSession)
isess.identify({"user": "test_user"}, testing=True) # Pass just user name.
isess.open_reproducible_session(case_study_version_uuid=None,
recover_previous_state=None,
cr_new=None,
allow_saving=True)
issues = None
output = None
if with_metadata_command > 0:
cmd = get_metadata_command()
if with_metadata_command & 1:
issues, output = isess.execute_executable_command(cmd)
if with_metadata_command & 2:
isess.register_executable_command(cmd)
uuid, v_uuid, cs_uuid = isess.close_reproducible_session(issues, output, save=True)
return uuid, isess | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_start_interactive_session(sample_serial_workflow_in_db):\n with patch.multiple(\"reana_workflow_controller.k8s\",\n current_k8s_corev1_api_client=DEFAULT,\n current_k8s_extensions_v1beta1=DEFAULT) as mocks:\n kwrm = KubernetesWorkflowRunManager(sample_serial_workflow_in_db)\n if len(INTERACTIVE_SESSION_TYPES):\n kwrm.start_interactive_session(INTERACTIVE_SESSION_TYPES[0])\n mocks['current_k8s_extensions_v1beta1'].\\\n create_namespaced_deployment.assert_called_once()\n mocks['current_k8s_corev1_api_client'].\\\n create_namespaced_service.assert_called_once()\n mocks['current_k8s_extensions_v1beta1'].\\\n create_namespaced_ingress.assert_called_once()",
"def test_interactive(self):\n self.executor.command(['python']).interactive()",
"def test_atomic_creation_of_interactive_session(sample_serial_workflow_in_db):\n mocked_k8s_client = Mock()\n mocked_k8s_client.create_namespaced_deployment =\\\n Mock(side_effect=ApiException(\n reason='Error while creating deployment'))\n # Raise 404 when deleting Deployment, because it doesn't exist\n mocked_k8s_client.delete_namespaced_deployment =\\\n Mock(side_effect=ApiException(\n reason='Not Found'))\n with patch.multiple('reana_workflow_controller.k8s',\n current_k8s_extensions_v1beta1=mocked_k8s_client,\n current_k8s_corev1_api_client=DEFAULT) as mocks:\n try:\n kwrm = KubernetesWorkflowRunManager(sample_serial_workflow_in_db)\n if len(INTERACTIVE_SESSION_TYPES):\n kwrm.start_interactive_session(INTERACTIVE_SESSION_TYPES[0])\n except REANAInteractiveSessionError:\n mocks['current_k8s_corev1_api_client']\\\n .delete_namespaced_service.assert_called_once()\n mocked_k8s_client.delete_namespaced_ingress.assert_called_once()\n mocked_k8s_client.delete_namespaced_deployment.assert_called_once()\n assert sample_serial_workflow_in_db.interactive_session is None",
"def startSession():\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n return sess",
"def init_session(session=\"ipython\", pretty_print=True, order=None, use_unicode=None, message=None, argv=[]):\n import os, sys\n\n def init_IPython():\n return IPython.Shell.make_IPython(argv)\n\n def init_Python():\n import code\n\n class HistoryConsole(code.InteractiveConsole):\n def __init__(self):\n code.InteractiveConsole.__init__(self)\n\n history = os.path.expanduser('~/.sympy-history')\n\n try:\n import readline, atexit\n\n readline.parse_and_bind('tab: complete')\n\n if hasattr(readline, 'read_history_file'):\n try:\n readline.read_history_file(history)\n except IOError:\n pass\n\n atexit.register(readline.write_history_file, history)\n except ImportError:\n pass\n\n return HistoryConsole()\n\n if session not in ['ipython', 'python']:\n raise ValueError(\"'%s' is not a valid session name\" % session)\n\n in_ipyshell = False\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n if session == 'ipython':\n ip, in_ipyshell = ip.IP, True\n else:\n raise ValueError(\"Can't start Python shell from IPython\")\n else:\n if session == 'ipython':\n ip = init_IPython()\n else:\n ip = init_Python()\n except ImportError:\n if session == 'ipython':\n raise\n else:\n ip = init_Python()\n\n ip.runcode(ip.compile(\"from __future__ import division\"))\n ip.runcode(ip.compile(\"from sympy.interactive import *\"))\n\n ip.runcode(ip.compile(\"init_printing(pretty_print=%s, order=%r, use_unicode=%s)\" % (pretty_print, order, use_unicode)))\n\n if not in_ipyshell:\n from sympy import __version__ as sympy_version\n py_version = \"%d.%d.%d\" % sys.version_info[:3]\n\n if session == \"ipython\":\n py_name = \"IPython\"\n else:\n py_name = \"Python\"\n welcome = \"%s console for SymPy %s (Python %s)\" % (py_name,\n sympy_version, py_version)\n\n if os.getenv('SYMPY_USE_CACHE') == 'no':\n welcome += ' (cache: off)'\n\n if message is not None:\n message = welcome + '\\n\\n' + message\n else:\n message = welcome + '\\n'\n\n ip.interact(message)\n sys.exit('Exiting ...')\n else:\n def shutdown_hook(self):\n print \"Exiting ...\"\n\n ip.set_hook('shutdown_hook', shutdown_hook)",
"def main():\n\n run_manual_session()\n # run_automated_session()",
"def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"session paginate disable\")\n self.set_terminal_width(command='terminal width 511')",
"def session_preparation(self):\n # 0 will defer to the global delay factor\n delay_factor = self.select_delay_factor(delay_factor=0)\n self._test_channel_read()\n self.set_base_prompt()\n cmd = f\"{self.RETURN}set cli mode -page OFF{self.RETURN}\"\n self.disable_paging(command=cmd)\n time.sleep(1 * delay_factor)\n self.set_base_prompt()\n time.sleep(0.3 * delay_factor)\n self.clear_buffer()",
"def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"screen-length 0 temporary\")\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()",
"def session_preparation(self) -> None:\n self._test_channel_read(pattern=r\">\")\n self.set_base_prompt()\n self.disable_paging(command=\"set length 0\")",
"def pytest_sessionstart(session):\n from pyrolite_meltsutil.download import install_melts\n from pyrolite_meltsutil.util.general import pyrolite_meltsutil_datafolder\n\n if not pyrolite_meltsutil_datafolder(subfolder=\"localinstall\").exists():\n install_melts(local=True) # install melts for example files etc",
"def interactive():\n IPython.start_ipython(argv=[])",
"def session_preparation(self):\n self.ansi_escape_codes = True\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"terminal datadump\")\n\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()",
"def session_preparation(self):\n self.ansi_escape_codes = True\n self._test_channel_read()\n self.set_base_prompt()\n self.set_terminal_width(command=\"terminal width 511\", pattern=\"terminal\")\n self.disable_paging()\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()",
"def initiate_session(self):\n config=tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth=True # dynamically grow the memory used on the GPU\n config.log_device_placement=True # to log device placement (on which device the operation ran)\n # (nothing gets printed in Jupyter, only if you run it standalone)\n sess=tf.compat.v1.Session(config=config)\n tf.compat.v1.keras.backend.set_session(sess)",
"def prepare_session(sess):\n init = tf.global_variables_initializer()\n sess.run(init)",
"def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()",
"def run_if_interactive(self):\n pass",
"def interactive(parsed_args, headers, results, force=False):\n context = _read_current(parsed_args)\n interactive = context['interactive'] or force\n\n if interactive:\n print('\\nContainer registry access:')\n print('--------------------------')\n\n for iv in VARS:\n prompt_name = iv.replace('_', ' ').title()\n key_name = ENV_PREFIX + iv\n header_name = iv.lower()\n\n if interactive:\n if settings.redact.key_is_private(key_name):\n is_secret = True\n else:\n is_secret = False\n value = prompt(prompt_name, context[key_name], secret=is_secret)\n else:\n value = context[key_name]\n\n if value is not None and value != '':\n settings_set(key_name, value)\n\n headers.append(header_name)\n results.append(settings.redact.auto_redact(header_name, value))\n\n return (headers, results)",
"def before_run(self, run_context):\n logging.info('Before creating the session...')\n\n self._global_step_value = run_context.session.run(self._global_step)\n if self._global_step_value % self._iterations_per_loop == 0:\n\n # Calling `play` the environment roll out a trajectory of length\n # `self._max_horizon`. Currently, we support two modes for play:\n # (1) stochastic play (similar to PPO)\n # (2) Monte-Carlo Tree Search (MCTS) play\n self._env_wrapper.play(self._max_horizon)\n\n # Computes explained variance between predicted values (from network)\n # and computed return values from environment.\n ev = math_utils.explained_variance(\n np.asarray(self._env_wrapper.trajectory_values),\n np.asarray(self._env_wrapper.trajectory_returns))\n tf_utils.add_summary(\n float(ev), 'Variation/explained_variance', self._global_step_value,\n self.summary_writer)\n\n if type(self._env_wrapper).__name__ == 'Env':\n # Update queues for episode data\n # (length of episodes and episode rewards)\n self._episode_reward_buf.extend(\n self._env_wrapper.trajectory_per_episode_rewards)\n self._episode_length_buf.extend(\n self._env_wrapper.trajectory_per_episode_lengths)\n else:\n self._episode_reward_buf.extend(\n self._env_wrapper.master_game.trajectory_per_episode_rewards)\n self._episode_length_buf.extend(\n self._env_wrapper.master_game.trajectory_per_episode_lengths)\n\n # Summaries for the current trajectory\n tf_utils.summary_stats(self._episode_reward_buf, 'Reward',\n 'Episode Rewards', self._global_step_value,\n self.summary_writer, False)\n tf_utils.summary_stats(self._episode_length_buf, 'Reward',\n 'Episode Length', self._global_step_value,\n self.summary_writer, False)\n\n mcts_tensor = np.full(\n np.asarray(self._env_wrapper.trajectory_values).shape,\n self._env_wrapper.mcts_sampling)\n\n run_context.session.run(\n self._iterator.initializer,\n feed_dict={\n self.features_ph['mcts_features']:\n self._env_wrapper.trajectory_states,\n self.features_ph['policy_features']:\n self._env_wrapper.policy_trajectory_states,\n self.labels_ph['action_tensor']:\n self._env_wrapper.trajectory_actions,\n self.labels_ph['value_tensor']:\n self._env_wrapper.trajectory_values,\n self.labels_ph['return_tensor']:\n self._env_wrapper.trajectory_returns,\n self.labels_ph['old_neg_logprob_tensor']:\n self._env_wrapper.trajectory_neg_logprobs,\n self.labels_ph['mean_tensor']:\n self._env_wrapper.trajectory_means,\n self.labels_ph['logstd_tensor']:\n self._env_wrapper.trajectory_logstds,\n self.labels_ph['mcts_enable_tensor']:\n mcts_tensor,\n self.labels_ph['policy_action_tensor']:\n self._env_wrapper.policy_trajectory_actions,\n self.labels_ph['policy_value_tensor']:\n self._env_wrapper.policy_trajectory_values,\n self.labels_ph['policy_return_tensor']:\n self._env_wrapper.policy_trajectory_returns,\n self.labels_ph['policy_old_neg_logprob_tensor']:\n self._env_wrapper.policy_trajectory_neg_logprobs,\n })",
"def continue_session(self, session_id, epochs, log_base_path='logs',\n model_checkpoint_period=2,\n prediction_checkpoint_period=2):\n exp = ExperimentDB(\n self.dbclient, session_id=session_id, log_base_path=log_base_path\n ).run_experiment(\n model_checkpoint_period=model_checkpoint_period,\n prediction_checkpoint_period=prediction_checkpoint_period,\n save_origin_images=False, verbose=1, epochs=epochs)\n\n return exp",
"def testSetPrompts(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pipeline=pl, ps1='x', ps2='y')\n self.assertEqual('x', repl.prompt)\n self.assertEqual('x', sys.ps1)\n self.assertEqual('y', sys.ps2)",
"def patch_cli(\n caps_directory: str,\n modality: str,\n save_features: bool = False,\n subjects_sessions_tsv: Optional[str] = None,\n use_uncropped_image: bool = False,\n patch_size: int = 50,\n stride_size: int = 50,\n acq_label: Optional[str] = None,\n suvr_reference_region: Optional[str] = None,\n custom_suffix: str = \"\",\n):\n parameters = get_parameters_dict(\n modality,\n \"patch\",\n save_features,\n use_uncropped_image,\n custom_suffix,\n acq_label,\n suvr_reference_region,\n )\n parameters[\"patch_size\"] = patch_size\n parameters[\"stride_size\"] = stride_size\n\n DeepLearningPrepareData(\n caps_directory=caps_directory,\n tsv_file=subjects_sessions_tsv,\n parameters=parameters,\n )",
"async def session(self,ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"The current main session is \" + \"```\" + await self.config.sessions.main() + \"```\")",
"def initialize_if_needed(self):\n if self.session is None:\n raise RunnerTerminalSessionClosed()\n self.session.initialize_if_needed()",
"def setup_interactive(shared):\n parser = setup_args()\n parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.')\n SHARED['opt'] = parser.parse_args(print_args=False)\n\n SHARED['opt']['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent'\n\n # Create model and assign it to the specified task\n agent = create_agent(SHARED.get('opt'), requireModelExists=True)\n SHARED['agent'] = agent\n # SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent'])\n\n # show args after loading model\n parser.opt = agent.opt\n parser.print_args()\n return agent.opt",
"async def repl(self, ctx):\n variables = {\n 'ctx': ctx,\n 'bot': self.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send('Already running a REPL session in this channel. Exit it with `quit`.')\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send('Enter code to execute or evaluate. `exit()` or `quit` to exit.')\n\n def check(m):\n return m.author.id == ctx.author.id and \\\n m.channel.id == ctx.channel.id and \\\n m.content.startswith('`')\n\n while True:\n try:\n response = await self.bot.wait_for('message', check=check, timeout=10.0 * 60.0)\n except asyncio.TimeoutError:\n await ctx.send('Exiting REPL session.')\n self.sessions.remove(ctx.channel.id)\n break\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n fmt = None\n stdout = io.StringIO()\n\n try:\n with contextlib.redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except Exception as e:\n value = stdout.getvalue()\n fmt = f'```py\\n{value}{traceback.format_exc()}\\n```'\n else:\n value = stdout.getvalue()\n if result is not None:\n fmt = f'```py\\n{value}{result}\\n```'\n variables['_'] = result\n elif value:\n fmt = f'```py\\n{value}\\n```'\n\n try:\n if fmt is not None:\n if len(fmt) > 2000:\n await ctx.send('Content too big to be printed.')\n else:\n await ctx.send(fmt)\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(f'Unexpected error: `{e}`')",
"def _new_session(self, m):\n # Create a new session for this model, initialize\n # variables, and save / restore from\n # checkpoint.\n self._session = tf.Session(\n '',\n config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False))\n self.session.run(m.init)\n\n # Load in a previous checkpoint, or save this one\n self.extract_model_spec()\n try:\n yield\n finally:\n tf.Session.reset('')\n self._session = None",
"def launch_new_instance():\n import IPython\n\n IPython.Shell.start().mainloop()",
"def main():\n \n # Help string to be shown using the -h option\n descStr = \"\"\"\n Loop through the data files linked to the current session and extract I, Q\n and U data at the positions of sources. The catalogue should already\n exist in the database, loaded by the 'create_image_session.py' script.\n That script has also created a 'PATH/TO/SESSION/inputs.config' file, used\n to set the pipeline input parameters. Data extracted for each source\n will be saved to a directory called 'PATH/TO/SESSION/OUT'. For each source\n in the catalogue the following data are saved to a FITS format file:\n * A cube centred on each source, or offset if abutting an edge.\n * A single-plane mask image showing the extraction aperture.\n * A one dimentional spectrum from the source, RMS and frequency axis.\n If the output files already exist, default behaviour is to redo the\n measurements of the spectra. \n\n Note: The measurements on the spectra are saved to the SQLite database in\n the file 'PATH/TO/SESSION/session.sqlite'.\n \n Example:\n \n ./3_extract_spectra.py testSession/\n \"\"\"\n\n # Parse the command line options\n parser = argparse.ArgumentParser(description=descStr,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"sessionPath\", metavar=\"PATH/TO/SESSION\", nargs=1,\n help=\"Path to the new session directory [no default]\")\n parser.add_argument(\"-o\", dest=\"doOverwrite\", action=\"store_true\",\n help=\"Overwrite previously extracted files.\")\n parser.add_argument(\"-r\", dest=\"doReset\", action=\"store_true\",\n help=\"Completely reset the OUT/ directory.\")\n args = parser.parse_args()\n sessionPath = args.sessionPath[0]\n doOverwrite = args.doOverwrite\n doReset = args.doReset\n\n # Call the spectral extraction function\n run_spectral_extraction(sessionPath, doOverwrite, doReset)"
]
| [
"0.60375863",
"0.5907077",
"0.57165277",
"0.57161325",
"0.5659394",
"0.5650892",
"0.5644861",
"0.5511649",
"0.5496762",
"0.54647994",
"0.5434915",
"0.5400358",
"0.5388621",
"0.5275898",
"0.5254386",
"0.5252777",
"0.5225612",
"0.5222021",
"0.51890284",
"0.5137504",
"0.5135676",
"0.5135154",
"0.5128734",
"0.5068045",
"0.50504273",
"0.5028498",
"0.5026851",
"0.49949333",
"0.49912757",
"0.49830472"
]
| 0.63685817 | 0 |
! Constructor of clustering algorithm CLARANS. The higher the value of maxneighbor, the closer is CLARANS to KMedoids, and the longer is each search of a local minima. | def __init__(self, data, number_clusters, numlocal, maxneighbor):
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float('inf')
self.__verify_arguments() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def __init__(self, data, number_clusters, numlocal, maxneighbor):\n\n self.__pointer_data = data\n self.__numlocal = numlocal\n self.__maxneighbor = maxneighbor\n self.__number_clusters = number_clusters\n\n self.__clusters = []\n self.__current = []\n self.__belong = []\n\n self.__optimal_medoids = []\n self.__optimal_estimation = float(\"inf\")\n\n self.__verify_arguments()",
"def _qt_radius_clustering_greedy(self, min_to_cluster, reduced, cache, max_cycles):\n centre_inds, clustered_inds = [], set()\n chsn_indices = [self.index[name] for name in self.chosen]\n avail_indices = set(self.index[name] for name in self.available)\n unassigned_indices = list(self._not_ignored_inds - avail_indices - set(chsn_indices))\n if unassigned_indices:\n # Remove unassigned from centre consideration\n reduced[:,unassigned_indices] = np.inf\n for chsn_ind in chsn_indices:\n cluster_inds = np.nonzero(reduced[:,chsn_ind] == 0)[0]\n centre_inds.append(chsn_ind)\n clustered_inds.update(cluster_inds)\n # Remove chosen and their clusters from all future consideration\n reduced[:,cluster_inds] = np.inf\n reduced[cluster_inds,:] = np.inf\n # Iteratively find the largest cluster, until enough variants are clustered\n cache['cycles_used'] = 0\n while len(clustered_inds) < min_to_cluster:\n centre_ind, cluster_inds = self._find_largest_candidate(reduced)\n if centre_ind == None:\n percent_placed = len(clustered_inds)*100.0/float(len(self._not_ignored_inds))\n error_msg = 'Error: clustering finished prematurely ({:.2f}% placed). To fix this, raise the critical threshold, lower the critical percent, or add more available variants.'.format(percent_placed)\n return [], error_msg, [centre_inds, self._not_ignored_inds-clustered_inds]\n centre_inds.append(centre_ind)\n clustered_inds.update(cluster_inds)\n reduced[:,centre_ind] = np.inf\n reduced[cluster_inds,:] = np.inf\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and cache['cycles_used'] >= max_cycles:\n break\n final_cluster_inds = self._partition_nearest(centre_inds, self.orig_dists)\n final_scores = self._sum_dist_scores(centre_inds, final_cluster_inds, self.orig_dists)\n alt_variants = []\n return centre_inds, final_scores, alt_variants",
"def __init__(self, rank=10, clusters=1, iterations=3, metric='euclidean'):\n\n sk_kmeans.__init__(self, n_clusters=clusters, max_iter=iterations)\n # Cluster ranks is a list of lists of knn sorted elements for each cluster w.r.t. the cluster mean\n self.rank = rank\n self.metric = metric",
"def _qt_radius_clustering_minimal(self, min_to_cluster, reduced, unassigned_orphans, cache, max_cycles):\n # Separating components and removing dominated indices reduced runtime on tbpb82 0.4@100% from 10s to 10ms.\n # Before removing dominated, tree_275 0.04@100% found a solution with score 4.0485 after 228k cycles. After, found it in 49k. After adding the second Counter to CoverManager, found it under 1k cycles. Each cycle was substantially slower, but the solution still was found ~1000x faster (ms instead of 20 min).\n out_of_range = reduced.copy()\n out_of_range[out_of_range != 0] = 1\n neighbors_of = {}\n for ind in self._not_ignored_inds:\n clstr_inds = np.nonzero(reduced[:,ind] == 0)[0]\n neighbors_of[ind] = set(clstr_inds)\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n num_not_ignored = len(self._not_ignored_inds)\n considered_nbrs, dominated_inds = self._remove_dominated_inds(neighbors_of, chsn_indices, avail_indices, out_of_range)\n # # Process depending on the run parameters\n cache['cycles_used'] = 0\n final_centre_inds, final_scores = [], []\n if min_to_cluster == num_not_ignored: # Critical percent equivalent to 100%\n # Can dramatically speed up the search by separating components\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n subset_to_cluster = len(subset_indices)\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n if max_cycles != None:\n subset_cycles = ceil(subset_to_cluster/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n elif min_to_cluster == num_not_ignored - len(unassigned_orphans):\n # Can still use the component speedup in this case\n orphan_inds = set(unassigned_orphans)\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n if max_cycles != None:\n subset_cycles = ceil(len(subset_indices)/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_to_cluster = len(subset_indices - orphan_inds)\n if subset_to_cluster == 0: # The entire subset is orphaned, so no centers can be found\n if max_cycles != None:\n cycle_rollover += subset_cycles\n continue\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n else:\n # Can't split into components and guarantee optimal, as I can't predict which component should be allowed to miss some variants.\n # May be a way to remove some components from consideration, but likely requires running _qt_radius_cluster_subset() multiple times. May still be faster, so worth considering if more speed is actually useful here.\n # - All unassigned orphans are part of total_allowed_missed by definition. So all other clusters are only allowed to miss allowed_missed = total_allowed_missed - len(unassigned_orphans).\n # - The global optimal solution for some component is guaranteed to fall between the solution for that component finding 100% of variants, and the solution for that component finding len(component)-allowed_missed variants. If they are equal, that's the global optimal solution for that component, and it can be excluded from the combined run. If they're unequal, it was a waste of time and the component has to be included in the combined run.\n final_centre_inds, final_scores, _cycles_used = self._qt_radius_cluster_subset(set(neighbors_of.keys()), chsn_indices, avail_indices, considered_nbrs, dominated_inds, min_to_cluster, cache, max_cycles, out_of_range)\n alt_variants = []\n return final_centre_inds, final_scores, alt_variants",
"def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters",
"def __init__(self,\n n_clusters=0,\n centroids=None,\n data=None,\n labels=[],\n distance='cov',\n threshold=0.38,\n dimension=128,\n update_centroids=True):\n self.n_clusters = n_clusters\n self.threshold = threshold\n self.distance = distance\n self.dimension = dimension\n self.update_centroids = update_centroids\n if centroids is None:\n self.centroids = np.zeros((n_clusters, 1, dimension))\n else:\n self.centroids = np.array(centroids)\n\n # if data is None:\n # self.data = np.zeros((n_clusters, 1, dimension))\n # else:\n # self.data = np.array(data)\n self.labels = np.array(labels, dtype=np.int32)",
"def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }",
"def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center",
"def run(self, max_clusters):\n sample_dist_matrix = self.matrix_dist()\n self.link.print_link()\n first_clus = self.clusters[0] # initialize first cluster to merge into\n second_clus = self.clusters[0] # initialize second cluster to merge\n max_samples_dist = max(sample_dist_matrix.values())\n # initialize minimun distance between two samples\n min_dist = max_samples_dist\n while len(self.clusters) > max_clusters: # clustering loop\n for clus in self.clusters: # iterate over every cluster\n for other_clus in self.clusters: # iterate over other clusters\n if clus.c_id > other_clus.c_id: # avoid duplicates and make sure to pass correct key to dictionary\n # compute distance between two clusters according to current link\n clus_dist = self.link.compute(clus, other_clus, sample_dist_matrix)\n if clus_dist < min_dist: # keep the minimum distance and its clusters\n min_dist = clus_dist\n first_clus = other_clus\n second_clus = clus\n self.clusters.remove(second_clus) # remove the cluster that's getting merged from clusters list\n first_clus.merge(second_clus) # merge the cluster with higher id into the other\n min_dist = max_samples_dist # restore high distance in order to start the search again\n\n sum_sil = self.compute_summery_silhouette(sample_dist_matrix)\n # print results\n for clus in self.clusters:\n clus.print_details(sum_sil[clus.c_id])\n print(f'Whole data: silhouette = {sum_sil[0]}, RI = {self.compute_rand_index()}')",
"def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion",
"def __grow_cluster(self, init_loc, thresh):\n cluster = np.zeros_like(self.__array, dtype=bool)\n cluster[init_loc[0], init_loc[1]] = True\n pocket = [init_loc]\n adjacent = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n m, n = self.__array.shape\n while pocket:\n pt = pocket.pop(0)\n neighbors_in_cluster = [\n (pt[0] - i, pt[1] - j) for (i, j) in adjacent\n if 0 <= pt[0] - i < m and 0 <= pt[1] - j < n and\n not cluster[pt[0] - i, pt[1] - j] and\n np.absolute(self.__array[pt[0], pt[1]]\n - self.__array[pt[0] - i, pt[1] - j])\n < thresh]\n for nbr in neighbors_in_cluster:\n pocket.append(nbr)\n cluster[nbr[0], nbr[1]] = True\n return cluster",
"def __init__(self, centroid):\n self.label = ''\n self.centroid = centroid\n self.points = []\n self.radius = 0.0 # used to draw plot\n self.neighbour = {}\n self.inter_cost = 0\n self.intra_cost = 0\n self.dm_cost = 0",
"def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def customNcuts(self):\n # computing neighboors graph\n A = kneighbors_graph(self.values, self.k, mode='distance', include_self=False).toarray()\n\n for i in range(self.values.shape[0]):\n for j in range(self.values.shape[0]):\n if A[i][j] > 0:\n\n v1 = (self.values[i][3], self.values[i][4], self.values[i][5])\n v2 = (self.values[j][3], self.values[j][4], self.values[j][5])\n\n magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])\n magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])\n ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))\n\n A[i][j] = max(self.values[i][7], self.values[j][7]) * A[i][j]\n\n # init SpectralClustering\n sc = SpectralClustering(4, affinity='precomputed', n_init=10, assign_labels = 'discretize')\n\n # cluster\n labels = sc.fit_predict(A)\n\n return labels",
"def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)",
"def cluster(S, k=None, max_iter=100, visualize=False, points=None):\n N = len(S)\n if not k: \n k = N\n\n S_bar = 1 - 2 * S\n A = random_cluster_matrix((N, k))\n\n if visualize:\n if not points: \n raise ValueError(\"Cannot visualize clustering without points.\")\n plt.ion()\n\n for _i in range(max_iter): \n # Remove empty clusters\n empty_columns = [i for i in range(k) if sum(A[:,i]) == 0]\n A = remove_clusters(A, empty_columns)\n k = len(A[0]) # Adjust number of clusters\n\n # Permute cluster membership that minimizes objective the most:\n # (a) Compute M = ~SA\n M = S_bar @ A\n\n # (b) Compute v\n MoA = M * A\n v = [min(M[i]) - sum(MoA[i]) for i in range(N)]\n\n # Check if we converged\n if isclose(sum(v), 0, abs_tol=1e-5): \n break\n\n # (c) Find the object X with the greatest optimization potential\n X = np.argmin(v)\n\n # (d) Reassign X to the cluster C where C = argmin(M[X][j]) w.r.t. j\n C = np.argmin(M[X])\n A[X] = np.zeros((k))\n A[X][C] = 1\n\n if _i % 10 == 0: \n progress(_i, max_iter)\n \n if visualize: \n plot(points, A, k) \n\n return A",
"def __optimize_configuration(self):\n index_neighbor = 0\n counter = 0\n while index_neighbor < self.__maxneighbor:\n # get random current medoid that is to be replaced\n current_medoid_index = self.__current[\n random.randint(0, self.__number_clusters - 1)\n ]\n current_medoid_cluster_index = self.__belong[current_medoid_index]\n\n # get new candidate to be medoid\n candidate_medoid_index = random.randint(\n 0, len(self.__pointer_data) - 1\n )\n\n while candidate_medoid_index in self.__current:\n candidate_medoid_index = random.randint(\n 0, len(self.__pointer_data) - 1\n )\n\n candidate_cost = 0.0\n for point_index in range(0, len(self.__pointer_data)):\n if point_index not in self.__current:\n # get non-medoid point and its medoid\n point_cluster_index = self.__belong[point_index]\n point_medoid_index = self.__current[point_cluster_index]\n\n # get other medoid that is nearest to the point (except current and candidate)\n other_medoid_index = self.__find_another_nearest_medoid(\n point_index, current_medoid_index\n )\n other_medoid_cluster_index = self.__belong[\n other_medoid_index\n ]\n\n # for optimization calculate all required distances\n # from the point to current medoid\n distance_current = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[current_medoid_index],\n )\n\n # from the point to candidate median\n distance_candidate = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[candidate_medoid_index],\n )\n\n # from the point to nearest (own) medoid\n distance_nearest = float(\"inf\")\n if (point_medoid_index != candidate_medoid_index) and (\n point_medoid_index != current_medoid_cluster_index\n ):\n distance_nearest = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[point_medoid_index],\n )\n\n # apply rules for cost calculation\n if point_cluster_index == current_medoid_cluster_index:\n # case 1:\n if distance_candidate >= distance_nearest:\n candidate_cost += (\n distance_nearest - distance_current\n )\n\n # case 2:\n else:\n candidate_cost += (\n distance_candidate - distance_current\n )\n\n elif point_cluster_index == other_medoid_cluster_index:\n # case 3 ('nearest medoid' is the representative object of that cluster and object is more\n # similar to 'nearest' than to 'candidate'):\n if distance_candidate > distance_nearest:\n pass\n\n # case 4:\n else:\n candidate_cost += (\n distance_candidate - distance_nearest\n )\n\n if candidate_cost < 0:\n counter += 1\n # set candidate that has won\n self.__current[\n current_medoid_cluster_index\n ] = candidate_medoid_index\n\n # recalculate clusters\n self.__update_clusters(self.__current)\n\n # reset iterations and starts investigation from the begining\n index_neighbor = 0\n\n else:\n\n index_neighbor += 1\n\n print(\"Medoid set changed {0} times\".format(counter))",
"def DBscan_clustering(self,d,s):\r\n print(colored(\"Performing agglomerative clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = DBSCAN(eps=d,min_samples=s,metric = 'euclidean').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\"%len(np.unique(self.labels)),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels",
"def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )",
"def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}",
"def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters",
"def __find_nearest_centroids(self, data):\n \n self.clusters = np.array([]) \n for i, d in enumerate(data):\n min_dist = np.inf\n self.clusters = np.concatenate((self.clusters, np.array([-1])))\n for j, c in enumerate(self.centroids):\n dist = self.__compute_distance(d, c)\n if min_dist > dist:\n min_dist = dist\n self.clusters[i] = j",
"def __optimize_configuration(self):\r\n index_neighbor = 0\r\n counter = 0\r\n while (index_neighbor < self.__maxneighbor):\r\n # get random current medoid that is to be replaced\r\n current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]\r\n current_medoid_cluster_index = self.__belong[current_medoid_index]\r\n\r\n # get new candidate to be medoid\r\n candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)\r\n\r\n while candidate_medoid_index in self.__current:\r\n candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)\r\n\r\n candidate_cost = 0.0\r\n for point_index in range(0, len(self.__pointer_data)):\r\n if point_index not in self.__current:\r\n # get non-medoid point and its medoid\r\n point_cluster_index = self.__belong[point_index]\r\n point_medoid_index = self.__current[point_cluster_index]\r\n\r\n # get other medoid that is nearest to the point (except current and candidate)\r\n other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)\r\n other_medoid_cluster_index = self.__belong[other_medoid_index]\r\n\r\n # for optimization calculate all required distances\r\n # from the point to current medoid\r\n distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])\r\n\r\n # from the point to candidate median\r\n distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index])\r\n\r\n # from the point to nearest (own) medoid\r\n distance_nearest = float('inf')\r\n if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ):\r\n distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index])\r\n\r\n # apply rules for cost calculation\r\n if (point_cluster_index == current_medoid_cluster_index):\r\n # case 1:\r\n if (distance_candidate >= distance_nearest):\r\n candidate_cost += distance_nearest - distance_current\r\n\r\n # case 2:\r\n else:\r\n candidate_cost += distance_candidate - distance_current\r\n\r\n elif (point_cluster_index == other_medoid_cluster_index):\r\n # case 3 ('nearest medoid' is the representative object of that cluster and object is more similar to 'nearest' than to 'candidate'):\r\n if (distance_candidate > distance_nearest):\r\n pass;\r\n\r\n # case 4:\r\n else:\r\n candidate_cost += distance_candidate - distance_nearest\r\n\r\n if (candidate_cost < 0):\r\n counter+=1\r\n # set candidate that has won\r\n self.__current[current_medoid_cluster_index] = candidate_medoid_index\r\n\r\n # recalculate clusters\r\n self.__update_clusters(self.__current)\r\n\r\n # reset iterations and starts investigation from the begining\r\n index_neighbor = 0\r\n\r\n else:\r\n\r\n index_neighbor += 1\r\n\r\n print(\"Medoid set changed {0} times\".format(counter))",
"def __init__(self, k=2, tol=0.001, max_iter=300):\r\n self.k = k\r\n self.tol = tol\r\n self.max_iter = max_iter\r\n\r\n self.centroids = None\r\n self.classifications = None",
"def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]",
"def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])",
"def cluster(self, verbose=0, sum_ess=False):\n ## if sum_ess and self.linkage.__name__ != \"ward_link\":\n ## raise ValueError(\n ## \"Summing for method other than Ward makes no sense...\")\n clusters = copy.copy(self._dist_matrix)\n #clusters = self._dist_matrix\n summed_ess = 0.0\n\n while len(clusters) > max(self._num_clusters, 1):\n if verbose >= 1:\n print('k=%s' % len(clusters))\n if verbose == 2:\n print(clusters)\n\n best, i, j = self.smallest_distance(clusters)\n # In Ward (1963) ess is summed at each iteration\n # in R's hclust and Python's hcluster and some text books it is not.\n # Here it is optional...\n if sum_ess:\n summed_ess += best\n else:\n summed_ess = best\n clusters = self.update_distmatrix(i, j, clusters)\n self._dendrogram.merge(i,j)\n self._dendrogram[i].distance = summed_ess\n indices = numpy.arange(clusters.shape[0])\n indices = indices[indices!=j]\n clusters = clusters.take(indices, axis=0).take(indices, axis=1)",
"def cluster(\n distance_matrix: np.ndarray, num_clusters: int, min_size: float = None\n) -> Tuple[np.ndarray, np.ndarray]:\n # Default value for min_size\n if min_size is None:\n min_size = num_clusters ** -2\n # Some sanity checks\n assert len(distance_matrix.shape) == 2\n assert distance_matrix.shape[0] == distance_matrix.shape[1]\n assert 0 < num_clusters < distance_matrix.shape[0]\n assert 0 < min_size < distance_matrix.shape[0]\n # Handle minsizes in the for of percentages\n if isinstance(min_size, float) and 0.0 < min_size < 1.0:\n min_size = int(distance_matrix.shape[0] * min_size)\n # Select correct type and run the algorithm\n if distance_matrix.dtype == np.float64:\n return _rslc.rslc_f64(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.float32:\n return _rslc.rslc_f32(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.int32:\n return _rslc.rslc_i32(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.int64:\n return _rslc.rslc_i64(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.uint32:\n return _rslc.rslc_u32(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.uint64:\n return _rslc.rslc_u64(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.int16:\n return _rslc.rslc_i16(distance_matrix, num_clusters, min_size)\n if distance_matrix.dtype == np.uint16:\n return _rslc.rslc_u16(distance_matrix, num_clusters, min_size)\n return _rslc.rslc_f64(distance_matrix.astype(np.float64), num_clusters, min_size)"
]
| [
"0.67506176",
"0.6538149",
"0.6469685",
"0.63442355",
"0.63192666",
"0.6225287",
"0.6143774",
"0.61311984",
"0.6123281",
"0.6097289",
"0.60914516",
"0.6089441",
"0.6071795",
"0.6051894",
"0.6039509",
"0.5990073",
"0.5981506",
"0.5952713",
"0.5925311",
"0.5907183",
"0.5874377",
"0.58721274",
"0.58633226",
"0.5847692",
"0.58393574",
"0.5830766",
"0.58247346",
"0.58018196",
"0.5795439",
"0.57873845"
]
| 0.6568333 | 1 |
! Returns clustering result representation type that indicate how clusters are encoded. (type_encoding) Clustering result representation. get_clusters() | def get_cluster_encoding(self):
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cluster_encoding(self):\n\n return type_encoding.CLUSTER_INDEX_LIST_SEPARATION",
"def cluster_type(self) -> str:\n return pulumi.get(self, \"cluster_type\")",
"def cluster_types(self):\n raise NotImplementedError",
"def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")",
"def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)",
"def clusters(self):\n raise NotImplementedError",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }",
"def resource_type(self):\n return 'cluster'",
"def __str__(self):\n return \"Cluster\"",
"def __str__(self):\n return \"Clustering\"",
"def get_clusters(self):\r\n\r\n return self.__clusters",
"def find_cluster_type(self, name):\n raise NotImplementedError",
"def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }",
"def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info",
"def cluster_types(self) -> Iterable[dto.ClusterType]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def matching_clusterization(self):\n result = []\n self.reclustering(self.groups.copy(deep=True), result)\n self.result = pd.DataFrame(result)\n return self.result.sort_values(by=['cluster_size'], ascending=False)",
"def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")",
"def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))",
"def classify_k_cluster(labels, datas):\n classify_k_cluster_to_redis(labels=labels, texts=datas)",
"def __repr__(self):\n return f'<Cluster {len(self.qc_mol)}/{len(self.br_mol)}/{len(self.pc_mol)}>'",
"def get_clusters():\n return objects.ClusterCollection.order_by(\n objects.ClusterCollection.all(),\n 'id'\n )",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def get_clusters(self):\n return self._clusters",
"def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])",
"def _get_cluster_list(self):\n return self.__cluster_list",
"def to_json(self):\n\n tcluster = {\"clusters\": [], \"matchings\": None}\n if self.matching is not None:\n tcluster[\"matchings\"] = self.matching\n elif self.matched is not None:\n tcluster[\"matchings\"] = self.matched\n\n for tid in self.get_observation_ids():\n ct = self.get_clustering_at(tid)\n partition = {\n \"tid\": tid,\n \"communities\": ct.named_communities,\n \"algorithm\": ct.method_name,\n \"params\": ct.method_parameters,\n \"overlap\": ct.overlap,\n \"coverage\": ct.node_coverage,\n }\n tcluster[\"clusters\"].append(partition)\n\n return json.dumps(tcluster)",
"def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df",
"def clustering(self):\n ret_concepts = []\n clusters = []\n for word in self.words:\n clusters.append(WordCluster(None, word))\n while len(clusters) > 1:\n maxi = -1\n maxj = -1\n max = -1\n m = -1\n for i in range(len(clusters)):\n for j in range(len(clusters)):\n if i == j:\n continue\n # print(\"%d cluster compare with %d cluster\" % (i, j))\n # 1: join 21: i absorb j 22: j absorb i 3: collapse\n # l1: join L(Tm) value l21: A absorb B L(Tm)value\n l1, newtags = self.__calculate_ltm(clusters[i], clusters[j], 1)\n if l1 > max:\n m = 1\n maxi = i\n maxj = j\n max = l1\n print(\"max L(Tm) for clustering in current loop: %lf\" % max)\n if max < ClusterAlgorithm.P_threshold:\n return\n Tm = clusters[maxi].join(clusters[maxj])\n Tm_concepts = self.__select_concepts(self.__getword(Tm))\n for tmp_concept in Tm_concepts.items():\n ret_concepts.append(tmp_concept)\n rm1 = clusters[maxi]\n rm2 = clusters[maxj]\n clusters.remove(rm1)\n clusters.remove(rm2)\n if Tm is not None:\n print(\"merged cluster's words:\")\n print(self.__getword(Tm))\n return ret_concepts",
"def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep"
]
| [
"0.69525045",
"0.6760325",
"0.6753326",
"0.651426",
"0.62345177",
"0.61501366",
"0.60817915",
"0.6077545",
"0.6072492",
"0.59875476",
"0.5957499",
"0.5922276",
"0.59057575",
"0.5892965",
"0.58514273",
"0.58257383",
"0.5823855",
"0.58212787",
"0.57745683",
"0.5772822",
"0.57640433",
"0.5760542",
"0.5753053",
"0.5746754",
"0.57349616",
"0.5733465",
"0.5720969",
"0.5700557",
"0.56845796",
"0.56838936"
]
| 0.70536655 | 0 |
! Forms cluster in line with specified medoids by calculation distance from each point to medoids. | def __update_clusters(self, medoids):
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __update_clusters(self, medoids):\n\n self.__belong = [0] * len(self.__pointer_data)\n self.__clusters = [[] for _ in range(len(medoids))]\n for index_point in range(len(self.__pointer_data)):\n index_optim = -1\n dist_optim = 0.0\n\n for index in range(len(medoids)):\n dist = euclidean_distance_square(\n self.__pointer_data[index_point],\n self.__pointer_data[medoids[index]],\n )\n\n if (dist < dist_optim) or (index == 0):\n index_optim = index\n dist_optim = dist\n\n self.__clusters[index_optim].append(index_point)\n self.__belong[index_point] = index_optim\n\n # If cluster is not able to capture object it should be removed\n self.__clusters = [\n cluster for cluster in self.__clusters if len(cluster) > 0\n ]",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def generate_clusters(self, medoids, samples):\n clusters = []\n for i in range(0, medoids.shape[0]):\n clusters.append([])\n for currentSampleIndex in range(0, samples.shape[0]):\n currentSample = samples[currentSampleIndex]\n minDistance = np.inf\n minDistanceIndex = 0\n for currentMedoidIndex in range(0, medoids.shape[0]):\n currentDistance = distance.euclidean(currentSample, medoids[currentMedoidIndex])\n if (currentDistance < minDistance):\n minDistance = currentDistance\n minDistanceIndex = currentMedoidIndex\n clusters[minDistanceIndex].append(currentSample)\n return clusters\n pass",
"def calcClustersMultiprocess(dataset, medoids, number_of_clusters, verbosity=0, class_header=\"Class\"):\n clusters = [pandas.DataFrame(columns=dataset.columns)] * number_of_clusters # create array of clusters\n for _, datum in dataset.iterrows(): # For every datum\n nearest_medoid_index = 0\n nearest_medoid = next(medoids.iterrows())[1]\n shortest_distance = Cluster.calcDistance(datum, nearest_medoid,\n class_header=class_header) # Find nearest medoid\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()):\n medoid_frame_index = medoid_tuple[0] # Find nearest medoid\n medoid = medoid_tuple[1]\n if medoid_row_index is 0: continue\n distance = Cluster.calcDistance(datum, medoid,\n class_header=class_header) # find distance to current medoid\n if verbosity >= 2:\n print(\"DISTANCE TO\", medoid_frame_index)\n print(distance)\n print(\"MEDOID INDEX\")\n print(medoid_row_index)\n\n if distance < shortest_distance: # if current medoid is closer than all previous select it\n shortest_distance = distance\n nearest_medoid_index = medoid_row_index\n\n if verbosity >= 3:\n print(\"ITERROW DATUM\")\n print(datum)\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n # Assign datum to appropriate cluster\n clusters[nearest_medoid_index] = clusters[nearest_medoid_index].append(datum)\n return clusters",
"def byMedoids(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n medoids = dataset.sample(number_of_clusters) # randomly select medoids from dataset\n\n if verbosity >= 1:\n print(\"INITIAL MEDOIDS\")\n print(medoids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1:\n print(\"ITERATIONS\")\n print(iterations)\n\n clusters = Cluster.calcClusters(dataset, medoids, number_of_clusters, verbosity=verbosity,\n class_header=class_header) # Assign all points to a cluster\n\n base_distortion = Cluster.calcDistortion(medoids, clusters, class_header=class_header)\n # Find base distortion\n\n set_list = [] # set up multiprocessing structures\n work_list = []\n change_list = []\n\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()): # For each medoid\n medoid_frame_index = medoid_tuple[0]\n for datum_index, datum in clusters[medoid_row_index].iterrows(): # For each point in the medoid cluster\n if medoid_frame_index != datum_index: # Do not try to swap a medoid with itself\n temp = medoids.copy() # Make a copy of the medoids DataFrame\n temp.iloc[medoid_row_index] = datum # Swap the medoid in the copy\n temp.index.values[medoid_row_index] = datum.name\n work_list.append((temp, clusters, class_header)) # add calculation arguments to work list\n change_list.append((medoid_row_index, datum)) # add swap info to change list\n\n multiprocess_count = multiprocessing.cpu_count() # Find cpu count\n partition_size = math.ceil(len(work_list) / multiprocess_count) # find size of work list partitions\n if verbosity >= 1: # optionally print work list length\n print(\"Work list length:\")\n print(len(work_list))\n for i in range(multiprocess_count - 1): # repeat for every subset\n sample = work_list[i * partition_size: (i + 1) * partition_size] # take a subset of the work list\n set_list.append(sample) # add that subset as an item in the set list\n set_list.append((work_list[(multiprocess_count - 1) * partition_size:])) # add tailing subset to set list\n if verbosity > 2: # optionally print entire set list.\n print(\"Set list\")\n print(set_list)\n pool = multiprocessing.Pool(processes=multiprocess_count) # create multiprocessing pool\n distortion_lists = pool.map(Cluster.calcDistortionList, set_list) # map set list to processing pool\n pool.close()\n pool.join()\n #print(distortion_lists)\n distortions = sum(distortion_lists, [])\n #print(distortions)\n\n break_flag = True # set break flag in case there are no good changes\n distortion_index = 0\n for medoid_row_index, _ in enumerate(medoids.iterrows()): # For each medoid\n cluster_size = len(clusters[medoid_row_index])\n distortions_subset = distortions[distortion_index: distortion_index + cluster_size]\n distortion_index += cluster_size # keep track of how far we are through the change list\n if len(distortions_subset) != 0: # did this cluster have any possible changes\n best_distortion = min(distortions_subset) # pick the best distortion\n if best_distortion < base_distortion: # if that distortion is better than our old distortion\n best_dist_index = distortions.index(best_distortion)\n best_change = change_list[best_dist_index] # apply the change for that distortion.\n else:\n best_change = None\n else:\n best_change = None\n if verbosity > 0: # Optionally print best changes\n print(\"MEDOIDS\")\n print(medoids)\n print(\"BEST_CHANGE\")\n print(best_change)\n if best_change is not None: # make sure there is a change before trying to make it.\n medoids.iloc[best_change[0]] = best_change[1] # swap best change into medoids list\n medoids.index.values[best_change[0]] = best_change[1].name\n break_flag = False\n\n if break_flag: # if we made no changes then the clustering is settled.\n break\n\n medoids = medoids.drop_duplicates() # make sure we do not duplicate medoids\n if return_clusters is True: # optionally return clusters\n return medoids, clusters\n pass\n else:\n return medoids # return medoids dataframe",
"def calculate_medoids(self, distances, k=6):\n m = distances.shape[0] # number of points\n\n # Pick k random medoids.\n curr_medoids = np.array([-1] * k)\n while not len(np.unique(curr_medoids)) == k:\n curr_medoids = np.array([random.randint(0, m - 1)\n for _ in range(k)])\n # Doesn't matter what we initialize these to.\n old_medoids = np.array([-1] * k)\n new_medoids = np.array([-1] * k)\n\n # Until the medoids stop updating, do the following:\n while not ((old_medoids == curr_medoids).all()):\n # Assign each point to cluster with closest medoid.\n clusters = self.assign_points_to_clusters(curr_medoids, distances)\n\n # Update cluster medoids to be lowest cost point.\n for curr_medoid in curr_medoids:\n cluster = np.where(clusters == curr_medoid)[0]\n new_medoids[curr_medoids == curr_medoid] = self.compute_new_medoid(\n cluster, distances)\n\n old_medoids[:] = curr_medoids[:]\n curr_medoids[:] = new_medoids[:]\n\n return clusters, curr_medoids",
"def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters",
"def distance(point, cluster):\n return np.sqrt((point[0] - cluster[0])**2 + (point[1] - cluster[1])**2)",
"def linkage(self, other):\n \n distance = 0\n list1 = self.get_cluster_elements\n list2 = other.get_cluster_elements\n lenght = 0\n t = 0\n \n #first it \"de-clusterize\" the cluster into a simple list\n #of Sequence.\n done = False\n while done == False:\n done = True\n try:\n for i in range(0,len(list1)):\n if type(list1[i]) != Sequence:\n for j in range(0,len(list1[i])):\n list1.append(list1[i][j])\n list1.pop(i)\n done = False\n except:\n\n pass\n \n try:\n lenght = len(list1)\n except:\n lenght += 1\n empty_list = []\n empty_list.append(list1)\n list1 = empty_list\n\n \n #it do the same for the second cluster\n done = False\n while done == False:\n done = True\n try:\n for i in range(0,len(list2)):\n if type(list2[i]) != Sequence:\n for j in range(0,len(list2[i])):\n list2.append(list2[i][j])\n list2.pop(i)\n done = False\n except:\n pass\n\n try:\n lenght += len(list2)\n except:\n lenght += 1\n empty_list = []\n empty_list.append(list2)\n list2 = empty_list\n\n #then it calculate the total of all the distance...\n for i in range(0,len(list1)):\n for j in range(0,len(list2)):\n t += list1[i].distance_to(list2[j])\n\n #...and get the average distance (lenght is the sum of the lenght of)\n #the two cluster.\n distance = t/lenght \n return distance",
"def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()",
"def calcDistortion(medoids, clusters, class_header=\"Class\"):\n distortion = 0\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()): # For every Medoid\n for _, datum in clusters[medoid_row_index].iterrows(): # For each point in the medoid cluster\n # Add the distance between medoid and data point squared to total distortion\n distortion += (Cluster.calcDistance(medoid_tuple[1], datum, class_header=class_header)) ** 2\n return distortion",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers",
"def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust",
"def distance_dmc(distances, Ks, points):\n doors = []\n for d in distances:\n dmc = []\n for k in Ks:\n print \"==========================\", k, \"==========================\"\n clusters = create_clusters(25, k)\n\n kmeans(points, clusters)\n # print \"Finished creating kmeans algorithm\"\n\n create_backbone_network(GRAPH, clusters, d)\n # print \"Finished creating backbone network\"\n\n find_all_shortest_paths(clusters, SP_TABLE, GRAPH)\n # print \"Finished finding all shortest paths\"\n\n for clst in clusters:\n clst.inter_cost = inter_cost(clst)\n clst.intra_cost = intra_cost(points, clst)\n clst.dm_cost = door_matt_cost(clusters, clst, SP_TABLE)\n\n ret = total_cost(clusters)\n dmc.append(ret[2])\n doors.append(sum(dmc))\n draw_door_matts(map(lambda d: float(format(d, \".4g\")), distances), doors)",
"def Dist_clust(data):\n mask = np.loadtxt('/net/tarea/scratch/Rafael/phd/apogee/python/comb_SkyTel_mask.dat')\n masked = np.where(mask == 1)[0]\n spectra_list = data['fullset']\n clusters = data['clusters']\n clusters = clusters.transpose()\n distance = np.zeros((len(spectra_list), 2))\n min_dist_cl = np.zeros((data['nc'], 2))\n for j_cluster in range(data['nc']):\n dist_cluster= np.zeros((data['nc']))\n for i_cluster in range(data['nc']):\n dist_cluster[i_cluster] = np.nansum((clusters[j_cluster][masked] - clusters[i_cluster][masked])**2)**0.5\n min_dist_cl[j_cluster,0] = np.argmin(dist_cluster)\n dist_cluster[np.argmin(dist_cluster)] = dist_cluster[np.argmax(dist_cluster)]\n if (len(np.where(dist_cluster != 0)[0]) > 0):\n min_dist_cl[j_cluster,1] = np.argmin(dist_cluster[(dist_cluster != 0)])\n for i_spec, name in enumerate(spectra_list):\n vec_temp = np.load(name)\n for i_cluster, j_cluster in enumerate(min_dist_cl[data['assign'][i_spec]]):\n distance[i_spec,i_cluster] = np.nansum((clusters[j_cluster][masked] - vec_temp['norm'][masked])**2)**0.5\n vec_temp.close()\n return distance, min_dist_cl",
"def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]",
"def formClusters(dists, link, distance):\n # Make distance matrix square\n dists = squareform(dists)\n # Compute linkage\n links = linkage(dists, link)\n\n # import matplotlib.pyplot as plt\n # from scipy.cluster import hierarchy\n # plt.figure(figsize=(15,5))\n # p = hierarchy.dendrogram(links)\n\n # Break into clusters based on cutoff\n clusters = fcluster(links, distance, criterion='distance')\n return clusters",
"def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist",
"def computeCluster(filename,cop,serie):\n latitudes = []\n longitudes = []\n tempo = []\n cluster3DLatLong =[]\n serieItens =[]\n \n for i in serie:\n if(hasattr(i,'reporting_date')): # é incidentes\n if(cop == 'TODOS' and i.lon and i.lat):\n latitudes.append(float(i.lat))\n longitudes.append(float(i.lon))\n tempo.append(i.reporting_date)\n cluster3DLatLong.append([float(i.lat),float(i.lon)])\n serieItens.append(i)\n \n elif(i.lon and i.lat and haversine(float(latLongCops[cop][1]),float(latLongCops[cop][0]),float(i.lon),float(i.lat))<=50):\n latitudes.append(float(i.lat))\n longitudes.append(float(i.lon))\n tempo.append(i.reporting_date)\n cluster3DLatLong.append([float(i.lat),float(i.lon)])\n serieItens.append(i)\n \n elif(hasattr(i,'data_hora') and hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon')): # é relato\n if(cop == 'TODOS' and hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon')):\n latitudes.append(float(i.localizacao['lat']))\n longitudes.append(float(i.localizacao['lon']))\n tempo.append(i.data_hora)\n cluster3DLatLong.append([float(i.localizacao['lat']),float(i.localizacao['lon'])])\n serieItens.append(i)\n \n elif(hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon') and\n haversine(float(latLongCops[cop][1]),float(latLongCops[cop][0]),float(i.localizacao['lon']),float(i.localizacao['lat']))<=50):\n latitudes.append(float(i.localizacao['lat']))\n longitudes.append(float(i.localizacao['lon']))\n tempo.append(i.data_hora)\n cluster3DLatLong.append([float(i.localizacao['lat']),float(i.localizacao['lon'])])\n serieItens.append(i)\n \n #clusters geograficos\n features = array(zip(latitudes,longitudes))\n # escolhi pegar o maior valor menor q sqrt(n/2)\n #k = int(math.floor(math.sqrt(len(latitudes)/2.0)))\n k = int(math.floor(math.sqrt(len(latitudes)/4.0)))\n if (k==0): \n k = 1\n clusters,distorcao = kmeans(features,k)\n \n #criando um vetor com a qtde de clusters necessarios\n itensClusterizados = []\n for i in range(0,k):\n itensClusterizados.append([])\n #agrupando cada item no seu cluster\n for i in range(0,len(tempo)):\n distancias=[]\n for c in clusters:\n #calcula a distancia o item ao centro de cada cluster\n distancias.append(haversine(float(longitudes[i]),float(latitudes[i]),float(c[1]),float(c[0])))\n #armazena o item no cluster mais proximo\n itensClusterizados[distancias.index(np.min(distancias))].append(serieItens[i])\n\n menorTempo = np.min(tempo)\n #criando os graficos ... cada grafico com uma cor\n \n plt.close('all')\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n colors=['y','b','r']\n for c in range(0,k):\n for i in itensClusterizados[c]:\n if(hasattr(i,'reporting_date') and i.lon and i.lat): # é incidentes\n ax.scatter(float(i.lat), float(i.lon), (i.reporting_date - menorTempo).total_seconds(),c='r',s=100)\n elif(hasattr(i,'data_hora') and hasattr(i.localizacao,'lat') and hasattr(i.localizacao,'lon')): # é relato\n ax.scatter(float(i.localizacao['lat']), float(i.localizacao['lon']), (i.data_hora - menorTempo).total_seconds(),c='g',s=100)\n ax.set_title('Ocorrencias', fontsize=24)\n ax.set_xlabel('Latitude', fontsize=20)\n ax.set_ylabel('Longitude', fontsize=20)\n ax.set_zlabel('Tempo', fontsize=20)\n fig.set_size_inches(18.5,10.5)\n fig.savefig(filename+cop+'.png',dpi=96)\n \n return clusters,itensClusterizados",
"def rmsd_cluster(input, ref, output, clusters):\n ifs = oemolistream()\n if not ifs.open(input):\n OEThrow.Fatal(\"Unable to open %s for reading\" % input)\n poses = list()\n mol = OEMol()\n while OEReadMolecule(ifs, mol):\n mol_copy = OEMol(mol)\n #print(dir(mol_copy))\n #print(mol_copy.NumConfs())\n for conf in mol_copy.GetConfs():\n poses.append(conf)\n ifs.close()\n print(\"%d poses read\" % len(poses))\n\n # Create a list of centroids, starting with first molecule.\n centroids = list()\n\n # Make first pose our first centroid.\n centroids.append(poses.pop(0))\n if int(clusters) < len(poses):\n print(\"Will return %s poses...\" % clusters)\n else:\n print(\"Will return %s poses...\" % (len(poses)+1))\n while len(centroids) < int(clusters) and len(poses)>0:\n print(len(centroids))\n # Compute distance from all poses to closest centroid.\n min_rmsd = numpy.zeros([len(poses)])\n for (pose_index, pose) in enumerate(poses):\n centroids_rmsds = [OERMSD(pose, centroid) for centroid in centroids]\n min_rmsd[pose_index] = min(centroids_rmsds)\n # Find pose that is farthest away from all current centroids.\n farthest_pose_index = min_rmsd.argmax()\n print(\"Farthest pose is %d at %f A away from centroids\" % (farthest_pose_index, min_rmsd[farthest_pose_index]))\n # Move farthest pose to centroids.\n centroids.append(poses.pop(farthest_pose_index))\n # Write out all centroids.\n ofs=oemolostream()\n if not ofs.open(output):\n OEThrow.Fatal(\"Unable to open %s for writing\" % itf.GetString(\"-o\"))\n for mol in centroids:\n #OEWritePDBFile(ofs, mol)\n OEWriteMolecule(ofs, mol)\n\n print(\"Done!\")\n\n return 0",
"def _processing( infile, rchr, dist, outf ):\n\n coords, sizes = build_dict(infile)\n qry_chrs = list(coords.keys())\n\n print(\"Primary\\tHaplotig\\tPrimary_Start\\tPrimary_end\\tHaplotig_Start\\tHaplotig_End\\tHaplotig_Length\", file=outf)\n for qchr in qry_chrs:\n refcoords = coords[qchr][0]\n qrycoords = coords[qchr][1]\n refst, refend, qryst, qryend = \\\n clustering( refcoords, sorted(qrycoords), sizes[qchr], dist )\n\n print(\"%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\" % \\\n (rchr, qchr, refst, refend, qryst, qryend, sizes[qchr]), file=outf)",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def cluster_testing_dist(agg1, agg2, partDiameter):\n agg2_temp = translate_aggregate(agg2, random_point_generator(calculate_LD(agg1), calculate_LD(agg2), calculate_COM(agg1), calculate_COM(agg2), partDiameter))\n agg2_temp = random_rotate_aggregate(agg2_temp)\n\n check = 1\n while check == 1:\n agg2_temp = translate_aggregate(agg2_temp, numpy.array((calculate_COM(agg1)-calculate_COM(agg2_temp))*0.01))\n check, index = test_collision(agg1, agg2_temp, partDiameter)\n \"\"\" Index from this part is not valid! Function returns '99' before collision happens.\n \"\"\"\n if (check == 2):\n # print(index)\n return numpy.linalg.norm(calculate_COM(agg1) - calculate_COM(agg2_temp)), numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[:,index])\n # return numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[0:3,index])\n break",
"def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass",
"def distances(self):",
"def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)",
"def distance_plot(self, classified_lines):\n for regions, labels in classified_lines:\n for region, label in zip(regions, labels):\n start = self.attacker_position(region[0])\n end = self.attacker_position(region[1])\n plt.plot([start[0], end[0]], [start[1], end[1]],\n color=self.color(int(label)))",
"def partition_clusters(clustering_matrix, args, nr_clusters=5, method='complete', metric='euclidean', plotting=False):\n # clustering with linkage\n fig = plt.figure(figsize=(8,8))\n ax1 = fig.add_axes([0.09,0.1,0.2,0.6])\n # gives back linkage matrix after hierarchical clustering\n Y = sch.linkage(clustering_matrix, method=method,metric=metric)\n # creates dendogram for plotting and flattening\n Z = sch.dendrogram(Y, orientation='left')\n ax1.set_xticks([])\n ax1.set_yticks([])\n # calculate cluster membership\n # fcluster flattens out dendograms to the specified nr_clusters\n cluster_memberships = sch.fcluster(Y, t=nr_clusters, criterion='maxclust') # ith element in this array is the cluster for i\n idx = np.array(Z['leaves']) # idx ordered in cluster\n \n ax2 = fig.add_axes([0.3,0.71,0.6,0.2])\n Z2 = sch.dendrogram(Y)\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])\n\n clustering_matrix = clustering_matrix[idx,:]\n clustering_matrix = clustering_matrix[:,idx]\n im = axmatrix.matshow(clustering_matrix, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)\n axmatrix.set_xticks([])\n axmatrix.set_yticks([])\n\n # Plot colorbar.\n axcolor = fig.add_axes([0.91,0.1,0.02,0.6])\n plt.colorbar(im, cax=axcolor)\n if plotting:\n fig.savefig(f'{args.results_root_dir}/clust_{args.clustering_method}_nr_users-{args.num_users}_nr_of_partition_clusters_{nr_clusters}_method_{method}_reconstructed.png')\n\n # Plot filtered\n canvas = np.zeros_like(clustering_matrix)\n for i in range(1,nr_clusters+1):\n mask = np.ones_like(clustering_matrix)\n mask[cluster_memberships[idx]!=i,:] = 0\n mask[:,cluster_memberships[idx]!=i] = 0\n canvas+=clustering_matrix*mask\n fig = plt.figure()\n plt.matshow(canvas,origin='lower')\n if plotting:\n fig.savefig(f'{args.results_root_dir}/clust_{args.clustering_method}_nr_users-{args.num_users}_nr_of_partition_clusters_{nr_clusters}_method_{method}_filtered.png')\n\n d_error = np.sum(clustering_matrix-canvas)\n print(f'Decompostion error: {d_error}, {d_error/np.sum(clustering_matrix)}')\n\n # build cluster id to client id user dict\n cluster_user_dict = { i : idx[cluster_memberships==i] for i in range(1,nr_clusters+1)}\n\n # Test overlaps within clusters\n collected = []\n for i, cluster_members_a in cluster_user_dict.items():\n for j, cluster_members_b in cluster_user_dict.items():\n assert np.all(cluster_members_a != cluster_members_b) or set(cluster_members_a).intersection(set(cluster_members_b)) != {}, f'clusters {i} and {j} are not disjoint'\n collected.extend(cluster_members_a)\n assert np.all(np.arange(0,len(clustering_matrix),1) == np.sort(np.array(collected)))\n\n return cluster_user_dict",
"def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters"
]
| [
"0.6737825",
"0.642241",
"0.6357446",
"0.6354768",
"0.6282258",
"0.61936045",
"0.6147",
"0.6068303",
"0.58993065",
"0.58847624",
"0.5868176",
"0.5837447",
"0.57862705",
"0.5782571",
"0.57547903",
"0.57543516",
"0.57309794",
"0.5684379",
"0.5684267",
"0.56757045",
"0.563804",
"0.5632539",
"0.5620934",
"0.5614034",
"0.56038177",
"0.559933",
"0.5584878",
"0.5583949",
"0.558254",
"0.55803144"
]
| 0.6725608 | 1 |
! Finds the another nearest medoid for the specified point that is different from the specified medoid. | def __find_another_nearest_medoid(self, point_index, current_medoid_index):
other_medoid_index = -1
other_distance_nearest = float('inf')
for index_medoid in self.__current:
if (index_medoid != current_medoid_index):
other_distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
if other_distance_candidate < other_distance_nearest:
other_distance_nearest = other_distance_candidate
other_medoid_index = index_medoid
return other_medoid_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __find_another_nearest_medoid(self, point_index, current_medoid_index):\n other_medoid_index = -1\n other_distance_nearest = float(\"inf\")\n for index_medoid in self.__current:\n if index_medoid != current_medoid_index:\n other_distance_candidate = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[current_medoid_index],\n )\n\n if other_distance_candidate < other_distance_nearest:\n other_distance_nearest = other_distance_candidate\n other_medoid_index = index_medoid\n\n return other_medoid_index",
"def FindClosestPoint(self, ):\n ...",
"def nearest_neighbor_within(others, point, max_distance):\n search_region = point.buffer(max_distance)\n interesting_points = search_region.intersection(MultiPoint(others))\n \n if not interesting_points:\n closest_point = None\n elif isinstance(interesting_points, Point):\n closest_point = interesting_points\n else: \n distances = [point.distance(ip) for ip in interesting_points\n if point.distance(ip) > 0]\n closest_point = interesting_points[distances.index(min(distances))]\n \n return closest_point",
"def find_nearest_d(self, point, layers):\n if isinstance(point, tuple):\n point = geojson.Point(coordinates=[point[0], point[1]])\n if len(point) == 3:\n point = self.transform(point, point[2], self.db_proj)\n gd = self.find_nearest(point, layers)\n if gd:\n return gd, distance(point, gd.data)\n else:\n return None, None",
"def FindClosestInsertedPoint(self, ):\n ...",
"def find_closest_point(point, street, streetvolume):\r\n streetdf = streetvolume[streetvolume['streetname'] == street]\r\n if streetdf.shape[0] == 0:\r\n streetdf = streetvolume\r\n streetdf['pdistance'] = streetdf['geometry'].apply(lambda x: point.distance(x))\r\n streetdf.sort_values(by = 'pdistance', ascending = True, inplace = True)\r\n return streetdf['lineid'].iloc[0]",
"def findNewMedoid(self, group):\n\n each_sum_dists = [sum((self.distMat.dist(m, e) for e in group)) for m in group]\n best_medoid = group[each_sum_dists.index(max(each_sum_dists))]\n\n return best_medoid",
"def closest_object(geometries, point): \n min_dist, min_index = min((point.distance(geom), k) \n for (k, geom) in enumerate(geometries))\n \n return geometries[min_index], min_dist, min_index",
"def nearest(source):\n def mycmp(a,b):\n return -cmp(a[1],b[1])\n dmin = 999.999\n smin = 'Unknown'\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n sdlist=[]\n for s in stars_:\n d = distance(s[0],source)\n sdlist.append((s[0],d))\n if d < dmin:\n dmin = d\n smin = s[0]\n sdlist.sort(mycmp)\n for sd in sdlist:\n print \"%s at %g\" % (sd[0],sd[1])\n print \"Nearest object from stars() to %s is %s at %g deg\" % (source,smin,dmin)",
"def closest_point_on_segment(point, segment):\n a, b = segment\n p = closest_point_on_line(point, segment)\n d = distance_point_point_sqrd(a, b)\n d1 = distance_point_point_sqrd(a, p)\n d2 = distance_point_point_sqrd(b, p)\n if d1 > d or d2 > d:\n if d1 < d2:\n return a\n return b\n return p",
"def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))",
"def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a",
"def pointfind2(plat, plon, lat, lon, pdif=1):\n\n\tdist_min = 1000000.\n\t\n\t\n\tfor i in range(lon.shape[0]):\n\t\tfor j in range(lon.shape[1]):\n\t\t\tdist = Ngl.gc_dist(plat,plon,lat[i,j],lon[i,j])\n\t\t\tif dist_min > dist:\n\t\t\t\tdist_min = dist\n\t\t\t\ti_min = i\n\t\t\t\tj_min = j\n\t\t\t\tlat_min = lat[i,j]\n\t\t\t\tlon_min = lon[i,j]\n\t\n\tprint(i_min,j_min,lat_min,lon_min)\n\tgg1 = i_min, j_min\n\t\n\treturn(gg1, lat_min, lon_min)",
"def getNearestEdge(self, point):\n edge = mm.idx.nearest((point.getPoint().x, point.getPoint().y), objects=True)\n edges = [e.object for e in edge]\n if len(edges) == 1:\n result = edges[0]\n else:\n dist = 99999999999999999999999999999999999999999\n for edge in edges:\n distance = point.getPoint().distance(edge.getGeometry())\n if distance < dist:\n dist = distance\n result = edge\n return result",
"def _closest_opponent_to_object(self, raw_obs, o):\n min_d = None\n closest = None\n for p in raw_obs['right_team']:\n d = self._object_distance(o, p)\n if min_d is None or d < min_d:\n min_d = d\n closest = p\n assert closest is not None\n return closest",
"def closest_point(point, points):\n return points[cdist([point], points).argmin()]",
"def closest_point_in_cloud(point, cloud):\n data = sort_points(point, cloud)\n return data[0]",
"def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point",
"def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y",
"def closest_point(graph, point_3d):\n current_point = (point_3d[0], point_3d[1])\n closest_point = None\n dist = 100000\n for p in graph.nodes:\n d = LA.norm(np.array(p) - np.array(current_point))\n if d < dist:\n closest_point = p\n dist = d\n return closest_point",
"def find_closest_points_gensim(sample, records, excluding, k, gensimdata):\n #vec_bow = dictionary.doc2bow(doc.lower().split())\n #vec_lsi = lsi[vec_bow] # convert the query to LSI space\n vec_representation = gensimdata.transform(sample)\n sims = gensimdata.index[vec_representation] # perform a similarity query against the corpus\n sims = sorted(enumerate(sims), key=lambda item: -item[1])",
"def replace_nearest(word): \n nearest = spellcheck.correction(word)\n #When there is no valid word, the nearest word\n #is the same as the original\n if word == nearest:\n #This implies we need to try splitting it\n return split_word(word)\n return nearest",
"def getNearestNode(self, point):\n nodes = list(mm.nodeidx.nearest((point.getPoint().x, point.getPoint().y)))\n return self.node_counter__node.get(nodes[0])",
"def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1",
"def k_nearest(node, pt, k, result):\n if node.items:\n visit_k_nearest(node, pt, k, result)\n return 1\n else:\n dx = pt[node.cutdim] - node.cutval\n if dx <= 0:\n near = node.left\n far = node.right\n else:\n near = node.right\n far = node.left\n ct_near = k_nearest(near, pt, k, result)\n # check if we found results, \n # if we have sufficient results and the closest of these\n # is closer than the split line, we do not have to search further\n if result and len(result) >= k and pow(dx, 2) >= result[0][0]:\n return ct_near \n ct_far = k_nearest(far, pt, k, result)\n return ct_near + ct_far",
"def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...",
"def _partition_nearest(self, medoids, dists, only_these=set()):\n if len(only_these) == 0:\n allowed_inds = self._not_ignored_inds\n else:\n allowed_inds = self._not_ignored_inds & only_these\n closest_medoid_ind = np.argmin(dists[:,medoids], 1) # If len(medoids)==3, would look like [2,1,1,0,1,2,...].\n clusts = [[] for i in medoids]\n for node_ind, med_ind in enumerate(closest_medoid_ind):\n if node_ind in allowed_inds:\n clusts[med_ind].append(node_ind)\n return clusts",
"def __optimize_configuration(self):\n index_neighbor = 0\n counter = 0\n while index_neighbor < self.__maxneighbor:\n # get random current medoid that is to be replaced\n current_medoid_index = self.__current[\n random.randint(0, self.__number_clusters - 1)\n ]\n current_medoid_cluster_index = self.__belong[current_medoid_index]\n\n # get new candidate to be medoid\n candidate_medoid_index = random.randint(\n 0, len(self.__pointer_data) - 1\n )\n\n while candidate_medoid_index in self.__current:\n candidate_medoid_index = random.randint(\n 0, len(self.__pointer_data) - 1\n )\n\n candidate_cost = 0.0\n for point_index in range(0, len(self.__pointer_data)):\n if point_index not in self.__current:\n # get non-medoid point and its medoid\n point_cluster_index = self.__belong[point_index]\n point_medoid_index = self.__current[point_cluster_index]\n\n # get other medoid that is nearest to the point (except current and candidate)\n other_medoid_index = self.__find_another_nearest_medoid(\n point_index, current_medoid_index\n )\n other_medoid_cluster_index = self.__belong[\n other_medoid_index\n ]\n\n # for optimization calculate all required distances\n # from the point to current medoid\n distance_current = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[current_medoid_index],\n )\n\n # from the point to candidate median\n distance_candidate = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[candidate_medoid_index],\n )\n\n # from the point to nearest (own) medoid\n distance_nearest = float(\"inf\")\n if (point_medoid_index != candidate_medoid_index) and (\n point_medoid_index != current_medoid_cluster_index\n ):\n distance_nearest = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[point_medoid_index],\n )\n\n # apply rules for cost calculation\n if point_cluster_index == current_medoid_cluster_index:\n # case 1:\n if distance_candidate >= distance_nearest:\n candidate_cost += (\n distance_nearest - distance_current\n )\n\n # case 2:\n else:\n candidate_cost += (\n distance_candidate - distance_current\n )\n\n elif point_cluster_index == other_medoid_cluster_index:\n # case 3 ('nearest medoid' is the representative object of that cluster and object is more\n # similar to 'nearest' than to 'candidate'):\n if distance_candidate > distance_nearest:\n pass\n\n # case 4:\n else:\n candidate_cost += (\n distance_candidate - distance_nearest\n )\n\n if candidate_cost < 0:\n counter += 1\n # set candidate that has won\n self.__current[\n current_medoid_cluster_index\n ] = candidate_medoid_index\n\n # recalculate clusters\n self.__update_clusters(self.__current)\n\n # reset iterations and starts investigation from the begining\n index_neighbor = 0\n\n else:\n\n index_neighbor += 1\n\n print(\"Medoid set changed {0} times\".format(counter))",
"def get_nearest(infected_coordinates, uninfected_coordinates, d):\n # Create tree from the GPS coordinates of uninfected users\n tree = BallTree(uninfected_coordinates, leaf_size=15, metric='haversine')\n indices,distances=tree.query_radius(infected_coordinates, r=d,return_distance=True)\n indices=indices.transpose()\n distances=distances.transpose()\n return indices,distances",
"def find_point(self, point: Point):\n for internal_point in self.points:\n if internal_point == point:\n return internal_point\n return None"
]
| [
"0.72442216",
"0.6210788",
"0.61261934",
"0.6079808",
"0.60431045",
"0.6032891",
"0.60166126",
"0.58206975",
"0.5710699",
"0.569829",
"0.56964684",
"0.56876916",
"0.5655259",
"0.5644433",
"0.5593935",
"0.55899715",
"0.558293",
"0.5577618",
"0.55623066",
"0.55480283",
"0.5545868",
"0.5541472",
"0.54990906",
"0.5488135",
"0.5469087",
"0.54644656",
"0.546263",
"0.5459003",
"0.5451504",
"0.5433838"
]
| 0.7339319 | 0 |
Concatenate n frames before and after the current frame | def concatenate_x_frames(x, y, num_of_frames, is_y=True):
if is_y:
items_x = list()
items_y = list()
for item in range(len(x)):
x_concat = []
for i in range(num_of_frames, len(x[item]) - num_of_frames):
tmp_x = None
is_first = True
# before the current frame
for j in range(num_of_frames):
tmp_x = np.concatenate((tmp_x, x[item][i - num_of_frames + j].T)) if not is_first else x[item][
i - num_of_frames + j]
is_first = False
tmp_x = np.concatenate((tmp_x, x[item][i].T))
# after the current frame
for j in range(num_of_frames):
tmp_x = np.concatenate((tmp_x, x[item][i + j + 1].T))
x_concat.append(tmp_x)
items_y.append(y[item][num_of_frames:len(x[item]) - num_of_frames])
items_x.append(x_concat)
return np.array(items_x), np.array(items_y)
else:
items_x = list()
for item in range(len(x)):
x_concat = []
for i in range(num_of_frames, len(x[item]) - num_of_frames):
tmp_x = None
is_first = True
# before the current frame
for j in range(num_of_frames):
tmp_x = np.concatenate((tmp_x, x[item][i - num_of_frames + j].T)) if not is_first else x[item][
i - num_of_frames + j]
is_first = False
tmp_x = np.concatenate((tmp_x, x[item][i].T))
# after the current frame
for j in range(num_of_frames):
tmp_x = np.concatenate((tmp_x, x[item][i + j + 1].T))
x_concat.append(tmp_x)
items_x.append(x_concat)
return np.array(items_x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def concat_inputs(context, num_frames, adjacent_frames):\n buffer = context[0:num_frames, :]\n for i in range(0, adjacent_frames*2):\n buffer = np.concatenate((buffer, context[i + 1 : num_frames + i + 1, :]), axis=1) \n return buffer",
"def revert_frames(self, n_frames: int = 25) -> None:\n if self.__layout.count() > 0 and self.__trajectory_writer is not None and self.mol_widget is not None:\n self.__trajectory_writer.pause()\n traj = self.__trajectory_writer.get_trajectory()\n n_back = min(n_frames, len(traj) - 1)\n wanted_frame = traj[-n_back]\n atoms = utils.AtomCollection(traj.elements, wanted_frame)\n # delete all frames after the wanted one\n # slices not supported in python bindings of delitem of trajectory when writing this\n for _ in range(n_back):\n del traj[-1]\n self.mol_widget.update_molecule(atoms=atoms)\n self.__trajectory_writer.unpause()",
"def step(self, frame):\n if not self._stack:\n # Fill stack with copies of first frame if empty.\n self._stack.extend([frame] * (self._num_frames - 1))\n self._stack.append(frame)\n # Match BCAgent's stacking along axis 2.\n stacked_frames = np.stack(self._stack, axis=2)\n\n if not self._flatten:\n return stacked_frames\n else:\n new_shape = stacked_frames.shape[:-2] + (-1,)\n return stacked_frames.reshape(*new_shape)",
"def _postprocess_frames(self, frames):\n num_frames = frames.shape[0]\n if num_frames > 0:\n first_frame = self._frames[0]\n pos_start = self.get_frame_root_pos(first_frame)\n\n for f in range(num_frames):\n curr_frame = frames[f]\n\n root_pos = self.get_frame_root_pos(curr_frame)\n root_pos[0] -= pos_start[0]\n root_pos[1] -= pos_start[1]\n\n root_rot = self.get_frame_root_rot(curr_frame)\n root_rot = pose3d.QuaternionNormalize(root_rot)\n root_rot = motion_util.standardize_quaternion(root_rot)\n\n self.set_frame_root_pos(root_pos, curr_frame)\n self.set_frame_root_rot(root_rot, curr_frame)\n\n return",
"def append_frames(self, motion_frames):\n with self.condition:\n motion_frames = self.filter_images(motion_frames)\n for item in motion_frames:\n self.frames.append(item)\n sys.stdout.flush()\n self.condition.notify()",
"def codons(self, frame):\n start = frame\n while start + 3 <= self.size:\n yield self.sequence[start : start + 3], start\n start += 3",
"def _nextAnimFrame(step=0):\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure",
"def add_empty_frames(frames, start):\n updated = copy.copy(frames)\n for i in range(1, start):\n updated.insert(0, None)\n return updated",
"def _pad_frames_list(self, frames):\n if len(frames) < config.RGB_N_FRAMES:\n n_pad_frames = config.RGB_N_FRAMES - len(frames)\n for _ in range(n_pad_frames):\n blank_frame = np.zeros((config.RGB_FRAME_HEIGHT, config.RGB_FRAME_WIDTH, config.CHANNELS))\n frames.append(blank_frame)\n\n return frames",
"def _restructure_frames(self, frame_list):\n restructured = []\n for index, (start_frame, end_frame) in enumerate(frame_list, 1):\n if index < self._sub:\n restructured.append([start_frame, end_frame])\n elif index == self._sub:\n restructured.append([start_frame, frame_list[len(frame_list) - 1][1]])\n return restructured",
"def make_frames_list(frames):\n aux = \"\".join([f\"eq(n\\\\,{x})+\" for x in frames])[:-1]\n\n return f\"select='{aux}'\"",
"def show_next_frame(self):\n if self.frames:\n self.config(image=next(self.frames))\n self.after(self.delay, self.show_next_frame)",
"def overlap_and_add(signal, frame_step):\n outer_dimensions = signal.size()[:-2]\n frames, frame_length = signal.size()[-2:]\n\n subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor\n subframe_step = frame_step // subframe_length\n subframes_per_frame = frame_length // subframe_length\n output_size = frame_step * (frames - 1) + frame_length\n output_subframes = output_size // subframe_length\n\n subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)\n\n frame = torch.arange(0, output_subframes, dtype=torch.int64, device=signal.device).unfold(0, subframes_per_frame, subframe_step)\n frame = frame.contiguous().view(-1)\n\n result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)\n result.index_add_(-2, frame, subframe_signal)\n result = result.view(*outer_dimensions, -1)\n return result",
"def print_frames(frames):\n for i, frame in enumerate(frames):\n clear_output(wait=True)\n print(frame['frame'])\n print(f\"Episode: {frame['episode']}\")\n print(f\"Timestep: {i + 1}\")\n print(f\"State: {frame['state']}\")\n print(f\"Previous action: {frame['action']}\")\n if frame['action'] == 0:\n print(\"Action is: south\")\n if frame['action'] == 1:\n print(\"Action is: north\")\n if frame['action'] == 2:\n print(\"Action is: east\")\n if frame['action'] == 3:\n print(\"Action is: west\")\n if frame['action'] == 4:\n print(\"Action is: pickup passenger 1 \") \n if frame['action'] == 5:\n print(\"Action is: dropoff passenger 1\")\n if frame['action'] == 6:\n print(\"Action is: pickup passenger 2\")\n if frame['action'] == 7:\n print(\"Action is: dropoff passenger 2\")\n print(f\"Reward: {frame['reward']}\")\n print(f\"Total Reward: {frame['total reward']}\")\n time.sleep(.5)",
"def padFrames(frameList, maxHeight):\n \n # writeText('\\n')\n for i, frame in enumerate(frameList):\n extraSpace = maxHeight - frame.shape[0]\n #frameList[i] = np.lib.pad(frame,((int(np.floor(extraSpace/2)),int(np.ceil(extraSpace/2))),(0,0)),'constant', constant_values=(4000,8000))\n frameList[i] = np.lib.pad(frame,((extraSpace,0),(0,0)),'constant', constant_values=0)\n print('\\rPadding Frames: {:.2f} % done'.format((100.0*((i+1)/len(frameList)))),end='',flush=True)\n print('\\n')\n stack = np.stack(frameList, axis=0)\n \n return stack",
"def add_frame(R):\n\n x_frames.append(copy.copy(R[0]))\n y_frames.append(copy.copy(R[1]))\n z_frames.append(copy.copy(R[2]))",
"def add_frames(\n self : \"animation\",\n frameList : \"list[matplotlib.figure.Figure]\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list(frameList, facecolor=facecolor)",
"def up(n=1):\n f = gdb.selected_frame()\n\n for i in range(n):\n o = f.older()\n if o:\n o.select()\n\n bt = pwndbg.commands.context.context_backtrace(with_banner=False)\n print('\\n'.join(bt))\n\n j()",
"def anim_produce_frame(up_to_line, *fargs):\n #unpack *fargs\n axes,running_reward_exists,running_loss_exists,actions_exists,\\\n running_reward_file,running_loss_file,actions_file,actions_to_plot, \\\n actions_per_log,is_tri,actions_ylim = fargs\n #produce the plots for the current frame\n axis_ind = 0\n if running_reward_exists:\n axes[axis_ind].clear()\n plot_running_reward_on_axis(running_reward_file, axes[axis_ind], up_to_line)\n axis_ind += 1\n if running_loss_exists:\n axes[axis_ind].clear()\n axes[axis_ind+1].clear()\n plot_running_loss_on_axis(running_loss_file, axes[axis_ind],axes[axis_ind+1], up_to_line)\n axis_ind += 2\n if actions_exists:\n axes[axis_ind].clear()\n plot_actions_on_axis(actions_file,axes[axis_ind],is_tri,actions_to_plot=actions_to_plot,\n plot_to_file_line=int(up_to_line*actions_per_log),\n actions_ylim=actions_ylim)",
"def _append_frame(self, observation):\n if self._counter % self._record_every == 0:\n self._frames.append(self._render_frame(observation[self.visual_key]))",
"def _insert_smoothed_frames(self : \"animation\",\n cFrame : \"np.ndarray\"\n ):\n if self._prevFrame is not None:\n finalImage = cFrame.copy()\n transitionFrame = np.zeros_like(finalImage)\n for frameID in range(self._smoothingFrames):\n transitionFrame = self._smoothingFunction(\n self._prevFrame,\n finalImage,\n frameID,\n self._smoothingFrames\n )\n self._write_frame(transitionFrame)",
"def overlap_and_add(signal, frame_step):\n outer_dimensions = signal.size()[:-2]\n frames, frame_length = signal.size()[-2:]\n\n subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor\n subframe_step = frame_step // subframe_length\n subframes_per_frame = frame_length // subframe_length\n output_size = frame_step * (frames - 1) + frame_length\n output_subframes = output_size // subframe_length\n\n subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)\n\n frame = torch.arange(0, output_subframes).unfold(0, subframes_per_frame, subframe_step)\n frame = signal.new_tensor(frame).long() # signal may in GPU or CPU\n frame = frame.contiguous().view(-1)\n\n result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)\n result.index_add_(-2, frame, subframe_signal)\n result = result.view(*outer_dimensions, -1)\n return result",
"def set_frames(self, frames, index_start, ambulance_id, xs, ys, display=10):\n\n if not xs: return\n\n curr_index = index_start\n start_position = 0\n end_position = 1\n last_position = len(xs)\n\n # Enumerate the historically most recent coordinates.\n while start_position < end_position:\n\n frames[curr_index][ambulance_id][0] += xs[start_position: end_position]\n frames[curr_index][ambulance_id][1] += ys[start_position: end_position]\n\n if end_position < last_position:\n end_position += 1\n\n if end_position - start_position > display or end_position == last_position:\n start_position += 1\n\n curr_index += 1",
"def printframe(frame,endframe):\n line = \"\\r timeframe: {:d} / {:d}\".format(frame, endframe)\n #print(line),\n sys.stdout.write(line)\n sys.stdout.flush()",
"def padFrames(frameList, maxHeight):\n \n print('\\n')\n for i, frame in enumerate(frameList):\n extraSpace = maxHeight - frame.shape[0]\n #frameList[i] = np.lib.pad(frame,((int(np.floor(extraSpace/2)),int(np.ceil(extraSpace/2))),(0,0)),'constant', constant_values=(4000,8000))\n frameList[i] = np.lib.pad(frame,((extraSpace,0),(0,0)),'constant', constant_values=0)\n print('\\rPadding Frames {:.2f}% done'.format(100.0*((i+1)/len(frameList))),end='', flush=True)\n stack = np.stack(frameList, axis=0)\n \n return stack",
"def stack_fbanks(features, nframes=7):\n assert nframes % 2 == 1, 'number of stacked frames must be odd'\n dim = features.shape[1]\n pad = np.zeros((nframes/2, dim), dtype=features.dtype)\n features = np.concatenate((pad, features, pad))\n aux = np.array([features[i:i-nframes+1]\n for i in xrange(nframes-1)] + [features[nframes-1:]])\n return np.reshape(np.swapaxes(aux, 0, 1), (-1, dim * nframes))\n\n # Old version:\n # b_a = (nframes - 1) / 2\n # fbanks_s = np.zeros((fbanks.shape[0], fbanks.shape[1] * nframes),\n # dtype='float32')\n # for i in xrange(b_a + 1):\n # fbanks_s[i] = np.pad(fbanks[max(0, i - b_a):i + b_a + 1].flatten(),\n # (max(0, (b_a - i) * fbanks.shape[1]),\n # max(0, ((i+b_a+1) - fbanks.shape[0]) * fbanks.shape[1])),\n # 'constant', constant_values=(0, 0))\n # for i in xrange(b_a + 1, fbanks.shape[0] - b_a):\n # fbanks_s[i] = fbanks[i - b_a:i + b_a + 1].flatten()\n # for i in xrange(fbanks.shape[0] - b_a - 1, fbanks.shape[0]):\n # fbanks_s[i] = np.pad(fbanks[max(0, i - b_a):i + b_a + 1].flatten(),\n # (max(0, (b_a - i) * fbanks.shape[1]),\n # max(0, ((i+b_a+1) - fbanks.shape[0]) * fbanks.shape[1])),\n # 'constant', constant_values=(0, 0))\n # return fbanks_s",
"def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):\n output = F.interpolate(\n framewise_output.unsqueeze(1),\n size=(frames_num, framewise_output.size(2)),\n align_corners=True,\n mode=\"bilinear\").squeeze(1)\n\n return output",
"def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):\n output = F.interpolate(\n framewise_output.unsqueeze(1),\n size=(frames_num, framewise_output.size(2)),\n align_corners=True,\n mode=\"bilinear\").squeeze(1)\n\n return output",
"def on_pre_render(self, event, signal):\n t = ppb.get_time() - self.start_time\n self.frames += 1\n print(f\"Frame {self.frames} rendered at {t}\")",
"def stack_frame(stacked_frames, frame, is_new):\n if is_new:\n stacked_frames = np.stack(arrays=[frame, frame, frame, frame])\n else:\n stacked_frames[:-1] = stacked_frames[1:]\n stacked_frames[-1] = frame\n\n return stacked_frames"
]
| [
"0.6015416",
"0.5966462",
"0.59200066",
"0.57837373",
"0.57320315",
"0.567933",
"0.56246394",
"0.5597678",
"0.55933285",
"0.5558255",
"0.5511402",
"0.5479374",
"0.5469259",
"0.54489094",
"0.5431185",
"0.5431053",
"0.5430827",
"0.54303914",
"0.5425497",
"0.53956336",
"0.5393957",
"0.5372111",
"0.53650385",
"0.53649455",
"0.53525203",
"0.53497434",
"0.5334496",
"0.5334496",
"0.53191394",
"0.530412"
]
| 0.60769045 | 0 |
Wraps and indents a string ``s``. | def indent_wrap(s, indent=0, wrap=80):
split = wrap - indent
chunks = [indent * " " + s[i:i + split] for i in range(0, len(s), split)]
return "\n".join(chunks) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wrap_string(input_str):\r\n return textwrap.wrap(input_str, 80)",
"def to_string_wrap(s: str) -> str:\n return f\"to_string({s})\"",
"def indent(string, level=1):\n spaces = ' ' * (level * 4)\n return \"%s%s\" % (spaces, string)",
"def wrap(s, width, hyphen_break=False, break_chrs=''):\n assert type(s) == str\n return [pad_str(substr, width) for substr in\n break_to_width(s, width, hyphen_break=hyphen_break,\n break_chrs=break_chrs)]",
"def _indent(s, width=4, skip_first_line=False):\n lines = s.splitlines(1)\n indentstr = ' '*width\n if skip_first_line:\n return indentstr.join(lines)\n else:\n return indentstr + indentstr.join(lines)",
"def wrap_with_span(string, arg):\n words = arg.split(' ')\n\n for word in words:\n if word[-1].lower() == 's':\n word = word[:-1]\n pattern = re.compile(r'\\b({0}[\\w\\d]*)\\b'.format(word), flags=re.I)\n\n for (match) in re.findall(pattern, string):\n string = re.sub(r'{0}'.format(match),\n '<span>{0}</span>'.format(match), string)\n break;\n\n return string.replace('&#x', '&#x')",
"def indent_multiline_string(in_string, spaces):\n if in_string.find(\"\\n\") != -1:\n return \"\\n\".join([(' ' * spaces) + line for line in in_string.split(\"\\n\")])\n else:\n return in_string",
"def _wrap_command_line(s: str) -> str:\n if not _command_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[len(indent):]\n return f\"{indent}pass # {cmd}{_command_escape_comment}\"",
"def str_wrap_double(s):\n s = str(s)\n return '\"' + s + '\"'",
"def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)",
"def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)",
"def prettyPrintStringHelper_ (s, stream, indent, pretty_print=True, indent_additive=4):\r\n stream.write(repr(s))",
"def wrap(self, wrap):\n return f\"| {wrap} |\"",
"def WordWrap(cmd, linelen=80):\n indent = cmd.index(\"(\")+1\n out = []\n first = True\n x = re.compile(\"^(.{,%d})\\0\" % (linelen-indent,))\n while True:\n if not first:\n out.append(\" \" * indent)\n first = False\n m = x.search(cmd)\n if not m:\n parts = cmd.split(\"\\0\", 1)\n out.append(parts[0]+\"\\n\")\n if len(parts) == 1:\n break\n else:\n cmd = parts[1]\n continue\n out.append(m.group(1)+\"\\n\")\n cmd = cmd[m.end():]\n\n return \"\".join(out).replace(\"\\0\", \" \").rstrip(\"\\n\")",
"def _display(s):\n if not isinstance(s, unicode):\n s = s.decode(\"utf-8\")\n s = _indent(_escaped_text_from_text(s, \"whitespace\"), 4)\n if not s.endswith('\\n'):\n s += '\\n'\n return s",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def mywrap(text):\n text = text.replace(\"\\n\\n\", \"\\n\").replace(\"\\n\", \"\\n \").rstrip()\n return text",
"def wrap(cls, text, first=0, indent=15, maxwidth=75):\n outstr = []\n sentence = []\n if not text:\n return \"\"\n for word in text.split():\n if len(\" \".join(sentence)) + len(word) + first > maxwidth:\n outstr.append(\" \".join(sentence))\n sentence = [\" \" * indent, word]\n first = 0\n else:\n sentence.append(word.strip())\n outstr.append(\" \".join(sentence))\n return \"\\n\".join(outstr)",
"def wrap(self, ind, text, rhs=0):\n l = 79 - ind * self.indSize - rhs\n return textwrap.wrap(text, l)",
"def spacify(string):\n return \" \" + \" \".join(string.splitlines(True))",
"def wordwrap(value, arg=80):\n\treturn \"\\n\".join(textwrap.wrap(value, int(arg)))",
"def indent(str, level):\n if level == 0: return str\n return \"\\n\".join(\"\\t\" * level + line for line in str.splitlines())",
"def indent(input_string):\n return '\\n '.join(input_string.split('\\n'))",
"def wrappedchars(string,chars):\n index = string.index(chars)\n if index != 0:\n chars = string[index-1] + chars\n else:\n chars = \" \" + chars\n if index + len(chars) + 1 <= len(chars):\n chars += string[index + len(chars) + 1]\n else:\n chars += \" \"\n return chars",
"def wrap(self, text, width=None, indent=None):\n width = width if width is not None else self.options.wrap_length\n indent = indent if indent is not None else self.indentation\n initial_indent = self.initial_indentation\n return textwrap.fill(text, width=width,\n initial_indent=initial_indent,\n subsequent_indent=indent)",
"def wrap_message(message, width=MAX_LINE_LENGTH):\r\n lines = message.split('\\n')\r\n wrapped_lines = [textwrap.fill(\r\n line, width, expand_tabs=False, replace_whitespace=False, drop_whitespace=False, break_on_hyphens=False\r\n ) for line in lines]\r\n wrapped_message = '\\n'.join(wrapped_lines)\r\n\r\n return wrapped_message",
"def indent_lines(s, n):\n return \"\\n\".join(map(lambda line: \" \" * n + line, \n s.split('\\n')))",
"def wrap(cls, color, s):\n if platform.system() == 'Windows':\n # If we want to support colored printing on Windows.\n # The easiest way is https://github.com/tartley/colorama,\n # but this introduces extra dependency.\n # The implementation for Windows is not small enought to self-shipped.\n return s\n\n return \"{}{}{}\".format(color, s, cls.default)",
"def append(self, s):\n self.result += ' ' * self.indent + s + '\\n'",
"def encode(s):\n return ' '.join(encode_to_words(s))"
]
| [
"0.7181232",
"0.69954926",
"0.6415862",
"0.6409179",
"0.6396516",
"0.6327478",
"0.63242805",
"0.6322026",
"0.6222691",
"0.6214404",
"0.6214404",
"0.61937743",
"0.6112519",
"0.60538924",
"0.6023239",
"0.60085034",
"0.59799033",
"0.5958307",
"0.59397006",
"0.5934544",
"0.58830047",
"0.5843594",
"0.5833606",
"0.5759392",
"0.5738587",
"0.5703554",
"0.568123",
"0.56702393",
"0.5665626",
"0.5645951"
]
| 0.7855075 | 0 |
Recursively traverse through iterable object ``d`` and convert all occuring ndarrays to lists to make it JSON serializable. | def serialize_ndarrays(d):
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate,
set: enumerate, frozenset: enumerate,
dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_list_from_dict(d, l):\n\n new_list = []\n\n for val in l:\n subdict = d[val]\n inner_list = []\n for subval in l:\n inner_list.append(subdict[subval])\n new_list.append(inner_list)\n\n return np.array(new_list)",
"def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):\n is_numpy = is_numpy and use_numpy\n if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):\n return transform_array(np.asarray(datum))\n datum_copy = []\n for item in datum:\n if isinstance(item, (list, tuple)):\n datum_copy.append(traverse_data(item))\n elif isinstance(item, float):\n if np.isnan(item):\n item = 'NaN'\n elif np.isposinf(item):\n item = 'Infinity'\n elif np.isneginf(item):\n item = '-Infinity'\n datum_copy.append(item)\n else:\n datum_copy.append(item)\n return datum_copy",
"def _tolist(ndarray):\n elem_list = []\n for sub_elem in ndarray:\n if isinstance(sub_elem, sio.matlab.mio5_params.mat_struct):\n elem_list.append(_todict(sub_elem))\n elif isinstance(sub_elem, np.ndarray):\n elem_list.append(_tolist(sub_elem))\n else:\n elem_list.append(sub_elem)\n return elem_list",
"def jsonify(data):\n\n for key in data:\n if type(data[key]) == numpy.ndarray:\n data[key] = data[key].tolist()\n\n if isinstance(data[key], list):\n data[key] = [0 if isinstance(x, float) and math.isnan(x) else x for x in data[key]]\n\n return data",
"def _walk(self, d, depth=0):\n\n output = ''\n indent = 3\n header_width = 35 - depth*indent\n\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n output += \"\".ljust(depth * indent)+k+'\\n'\n output += self._walk(v, depth + 1)\n else:\n if isinstance(v, np.ndarray):\n # np array or matrix\n shape = v.shape\n if len(shape) == 1:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"array (%d)\" % (v.shape[0]) + '\\n'\n\n elif len(shape) == 2:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"matrix (%d,%d)\" % (v.shape[0], v.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], str):\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : \" + str(item) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], np.ndarray):\n # List of arrays\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n if len(item.shape) == 1:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : array (%d)\" % (item.shape[0]) + '\\n'\n\n elif len(item.shape) == 2:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : matrix (%d,%d)\" % (item.shape[0], item.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], dict):\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent) + \"[\"+str(item_id)+\"]\" + '\\n'\n output += self._walk(item, depth + 2)\n\n else:\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : \" + str(v) + '\\n'\n\n return output",
"def _tolist(self, ndarray, parameter_list):\n self.elem_list = parameter_list\n for sub_elem in ndarray:\n if isinstance(sub_elem, scipy.io.matlab.mio5_params.mat_struct):\n self.elem_list.append(self._todict(sub_elem))\n elif isinstance(sub_elem, numpy.ndarray):\n self._tolist(sub_elem, self.elem_list)\n else:\n self.elem_list.append(sub_elem)\n return self.elem_list",
"def _json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n todecode = dct['__ndarray__'].encode(\"ascii\")\n data = base64.b64decode(todecode)\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def _deep_list(array_like):\n if isinstance(array_like, (list, tuple)):\n return list(map(_deep_list, array_like))\n return array_like",
"def deep_flatten(iterable):\n pass",
"def __json_data__(self):\n if schema.is_dense(self.namespace):\n dense_records = dict()\n for field in Observation._fields:\n dense_records[field] = []\n\n for obs in self.data:\n for key, val in six.iteritems(obs._asdict()):\n dense_records[key].append(serialize_obj(val))\n\n return dense_records\n\n else:\n return [serialize_obj(_) for _ in self.data]",
"def flatten(d):\n\n c = {}\n\n def _flatten(parents, items):\n for k, v in items:\n cur = parents + [k]\n if isinstance(v, list):\n _flatten(cur, enumerate(v))\n elif isinstance(v, dict):\n _flatten(cur, v.items())\n else:\n if v is None:\n cur.append('$NULL')\n v = ''\n name = str(cur[0]) + ''.join(['['+str(x)+']' for x in cur[1:]])\n c[name] = v\n \n _flatten([], d.items())\n\n return c",
"def _serialize(self, data):\n data = [np.array(j) for j in data]\n self._data_shape_list = [j.shape for j in data]\n serialized_data = [j.ravel() for j in data]\n serialized_data = np.hstack(serialized_data)\n return serialized_data",
"def dict_to_array(self, d):\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = [d['fit_parameters'][p] for p in self.fit_parameters]\n arr[n_fit_p:n_fit_p+n_nui_p] = [d['nuisance_parameters'][p] for p in self.nuisance_parameters]\n arr[n_fit_p+n_nui_p:] = [d['fit_wc'][c] for c in self.fit_wc_names]\n return arr",
"def flatten_list(l):\n obj = []\n\n def recurse(ll):\n if isinstance(ll, list) or isinstance(ll, np.ndarray):\n for i, _ in enumerate(ll):\n recurse(ll[i])\n else:\n obj.append(ll)\n\n recurse(l)\n return obj",
"def deserialize(self, indv):\n for key in indv:\n if key in self.evolvables and not key in self.scalars:\n if not isinstance(indv[key], np.ndarray):\n indv[key] = np.array(indv[key])\n\n return indv",
"def jsonify_01(data):\n if isinstance(data,dict):\n serialized_summary= dict()\n for key,value in data.items():\n if isinstance(value, list):\n value = [jsonify_01(item) for item in value]\n elif isinstance(value, list):\n value = jsonify_01(value)\n elif type(value).__module__=='numpy':\n value=value.tolist()\n else:\n if isinstance(value, dict):\n for key2,value2 in value.items():\n value[key2]=jsonify_01(value2)\n if isinstance(value,scipy.sparse.coo.coo_matrix):\n value=\"not serializable\"\n serialized_summary[key]=value\n elif type(data).__module__=='numpy':\n serialized_summary=data.tolist()\n else:\n serialized_summary=data\n return serialized_summary",
"def serialize(self, data):\n if isinstance(data, dict):\n return json.dumps(\n {\n key: value.tolist() if isinstance(value, np.ndarray) else value\n for key, value in data.items()\n }\n )\n\n if hasattr(data, \"read\"):\n return data.read()\n\n if isinstance(data, np.ndarray):\n return json.dumps(data.tolist())\n\n return json.dumps(data)",
"def dlist(src):\n if isinstance(src, dict):\n for k in src:\n src[k] = dlist(src[k])\n if set(src) == set([str(k) for k in range(len(src))]):\n src = [src[str(k)] for k in range(len(src))]\n return src",
"def json_ld(self, output, **kwargs):\n raw_json_ld = output.serialize(format='json-ld',\n context=self.context).decode()\n # if there are fields that should be returned as arrays convert all\n # non-array fields to an array\n if not self.array_fields:\n return raw_json_ld\n json_data = json.loads(raw_json_ld)\n for i, item in enumerate(json_data['@graph']):\n if item.get(\"@type\") in self.array_fields:\n test_flds = self.array_fields[item['@type']]\n for key, val in item.items():\n if key in test_flds and not isinstance(val, list):\n json_data['@graph'][i][key] = [val]\n # print(json.dumps(json_data, indent=4))\n return json.dumps(json_data, indent=4)",
"def flatten_list_gen(alist):\n for item in alist:\n if isinstance(item, list) or isinstance(item, np.ndarray):\n for i in flatten_list_gen(item):\n yield i\n else:\n yield item",
"def vectorized_flatten(self, data):\n\n # TODO: finish this.\n flatten_data = data.flatten()\n flatten_data = flatten_data[flatten_data>0]\n return Counter(flatten_data).items()",
"def info(d, return_dict=False, print_=True):\n info_ = {}\n for k,v in d.items():\n if isinstance(v, dict):\n info_.update(info(v))\n else:\n info_[k] = {\n 'size': np.asarray(v).ravel().shape,\n 'shape' :np.asarray(v).shape,\n 'dtype': np.asarray(v).dtype.name\n }\n if print_:\n _v = np.asarray(v)\n print('key -', k)\n print('dtype -', _v.dtype.name)\n print('size -', np.asarray(v).ravel().shape)\n print('shape -', _v.shape)\n print()\n if return_dict:\n return info_",
"def do_flatten(obj):\n if type(obj) == list:\n return np.array(obj).flatten()\n return obj.flatten()",
"def flatten_array(nested, prefix=''):\n result = dict()\n for index in range(len(nested)):\n prefix_key = '__'.join([prefix, str(index)]) if len(prefix) else str(index)\n\n value = nested[index]\n if isinstance(value, (list, np.ndarray)):\n result.update(flatten_array(value, prefix=prefix_key))\n\n elif isinstance(value, dict):\n result.update(flatten_dict(value, prefix=prefix_key))\n\n else:\n result[prefix_key] = value\n\n return result",
"def gen_flatten_data(data):\r\n # python 3 hack to basestr\r\n try:\r\n u = str\r\n except NameError:\r\n # 'unicode' is undefined, must be Python 3\r\n check_type = str\r\n else:\r\n # 'unicode' exists, must be Python 2\r\n\r\n check_type = str\r\n\r\n for el in data:\r\n if isinstance(el, collections.Iterable) and not isinstance(el, check_type):\r\n for sub in gen_flatten_data(el):\r\n yield sub\r\n else:\r\n yield el",
"def flatten(parsed):\r\n parsedList = [parsed] if isinstance(parsed, dict) else parsed\r\n result = []\r\n for dico in parsedList:\r\n #Sort keys between actual values and nested dicts\r\n listKeys = []\r\n standardKeys = []\r\n for key in dico:\r\n if isinstance(dico[key], list):\r\n listKeys.append(key)\r\n else:\r\n standardKeys.append(key)\r\n if not listKeys:\r\n #Terminal condition: no nested dict\r\n result.append(dico)\r\n else:\r\n partialResult = [{x:dico[x] for x in standardKeys}]\r\n for key in listKeys:\r\n #Create a dict with the keys from partialResult and\r\n #from the nested dicts\r\n recurs = Bricks.flatten(dico[key])\r\n partialResult = [{**x, **y} for x in partialResult for y in recurs]\r\n result.extend(partialResult)\r\n return result"
]
| [
"0.5972985",
"0.5779406",
"0.5766659",
"0.5579728",
"0.55737484",
"0.5479306",
"0.5460912",
"0.5404746",
"0.5390378",
"0.5390378",
"0.5390378",
"0.53863674",
"0.53602403",
"0.5325386",
"0.5264046",
"0.52487487",
"0.52456796",
"0.52453446",
"0.5200569",
"0.519804",
"0.5175681",
"0.5150198",
"0.5146969",
"0.5091723",
"0.5065577",
"0.506215",
"0.5030594",
"0.50276506",
"0.5014664",
"0.49632576"
]
| 0.73105025 | 0 |
Populate dictionary with data from a given dict ``d``, and check if ``d`` has required and optional keys. Set optionals with default if not present. If input ``d`` is None and ``required_keys`` is empty, just return ``opt_keys``. | def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):
if required_keys is None:
required_keys = []
if opt_keys is None:
opt_keys = {}
if d is None:
if not required_keys:
if opt_keys is None:
raise TypeError("`d` and òpt_keys` are both None.")
return opt_keys.copy()
else:
raise ValueError("`d` is None, but `required_keys` is not empty.")
d = d.copy()
out = {}
# Set required keys
for key in required_keys:
if key in d:
out[key] = d.pop(key)
else:
raise KeyError("Dict is missing required key '{}'.".format(key))
# Set optional values, if key not given
for key, val in opt_keys.items():
out[key] = d.pop(key, val)
# Complain when extra keys are left and noleft is True
if d and noleft:
raise KeyError("Leftover keys ['{}'].".format(
"', '".join(list(d.keys()))))
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults",
"def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults",
"def all_keys_not_none(d: dict, required: list):\n passed = 0\n for r in required:\n v = d.get(r)\n if v is not None:\n passed += 1\n\n return len(required) == passed",
"def from_dict(cls, d):\n kwargs = {k: d[k] for k in cls.required()} # required\n kwargs.update({k: d[k] for k in cls.optional() if k in d}) # optional\n return cls(**kwargs)",
"def dict_get_first_of(d: dict, key, *opt_keys, return_key: bool = False, **kw):\n knf = KeyNotFound()\n k = key\n v = d.get(key, knf)\n n = len(opt_keys)\n i = 0\n while isinstance(v, KeyNotFound) and i < n:\n k = opt_keys[i]\n v = d.get(k, knf)\n i += 1\n\n if isinstance(v, KeyNotFound):\n if 'default' in kw:\n _def = dict_default(kw['default'])\n if return_key:\n return None, _def\n else:\n return _def\n else:\n raise KeyError('none of the provided keys found in the dict')\n if return_key:\n return k, v\n else:\n return v",
"def subdict(d: dict, *args, keys: list = None, default=None) -> dict:\n keys = set(keys or []) | set(args)\n # TODO: way to not provide default and raise KeyError on missing keys\n # TODO: move to openeo-python-driver?\n return {k: d.get(k, default) for k in keys}",
"def underride(d, **options):\n if d is None:\n d = {}\n\n for key, val in options.items():\n d.setdefault(key, val)\n\n return d",
"def _dict_sanity_check(data, mandatory_keys, optional_keys, obj=None):\n # Both mandatory and optional key lists contain tuples, with key name being the first\n # element in each tuple. The validator is not needed for the mandatory / optional key\n # check, so we can ignore it here.\n missing_keys = [k for k, _ in mandatory_keys if k not in data]\n if missing_keys:\n raise ValueError(f\"missing mandatory key(s): {', '.join(missing_keys)}\")\n\n # Create a lookup of key name to validator function. The lookup will have an entry for all\n # possible keys, so we can also use it to check if we have any invalid keys.\n keys_to_validators = dict(mandatory_keys + optional_keys)\n always_allowed = [\"id\", \"_created\", \"_updated\", \"_links\", \"_embedded\"]\n invalid_keys = [k for k in data\n if k not in keys_to_validators and k not in always_allowed]\n if invalid_keys:\n raise ValueError(f\"invalid key(s) in request body: {', '.join(invalid_keys)}\")\n\n # Now we individually call the validators on all values in the data, producing a useful\n # error message if possible\n res = {}\n for key, value in data.items():\n try:\n res[key] = keys_to_validators[key](value)\n except ValueError as ex:\n raise ValueError(f\"key '{key}': {str(ex)}\")\n\n now_time_string = datetime.datetime.now().isoformat()\n if obj is None:\n res['_created'] = now_time_string\n res['_updated'] = now_time_string\n return res",
"def do_attrs(d, *ks, **kwargs):\n if not isinstance(d, dict):\n raise TypeError(\"expected dict as first argument, \"\n \"got {}\".format(type(d)))\n if len(ks) == 1 and isinstance(ks[0], collections.Iterable):\n ks = ks[0]\n if \"default\" in kwargs:\n default = kwargs['default']\n ret_val = map(lambda kw: d.get(kw, default), ks)\n else:\n try:\n ret_val = map(lambda kw: d[kw], ks)\n except KeyError:\n t, v, tb = sys.exc_info()\n msg = \"{} not found in {}\".format(v, d.keys())\n raise t, msg, tb\n return ret_val",
"def any_keys_not_none(d: dict, required: list):\n passed = 0\n for r in required:\n v = d.get(r)\n if v is not None:\n passed += 1\n\n if len(required) == 1 and passed == len(required): # Exclusion for sequence with 1 element\n return True\n\n return 0 < passed < len(required)",
"def setup_dict(self, keys=None):\n keys = keys or []\n return {key: True for key in keys}",
"def validate_required_keys(input_dict, filename, required_keys):\n passed = True\n for req_key in required_keys:\n if not input_dict.get(req_key):\n print(\"{}: missing required key {}\".format(filename, req_key))\n passed = False\n return passed",
"def setdefault(*dicts):\n param_complete = dict(dicts[0])\n for d in dicts[1:]:\n for k,v in d.items():\n param_complete.setdefault(k, v)\n\n return param_complete",
"def _get_option_level_dict(self, start_dict, options, is_in_reverse, is_assign):\n present_dict = start_dict\n has_default, default_N = self._check_global_dict_empty(present_dict)\n if len(options) == 0 and is_assign and has_default:\n # for default_N option, False will not be used.\n present_dict[\"option\"][\"default_\" + default_N] = {\n True: {\"defined\": [], \"undefined\": [], \"option\": {}, \"is_replace\": True},\n False: {\"defined\": [], \"undefined\": [], \"option\": {}, \"is_replace\": False},\n }\n present_dict = present_dict[\"option\"][\"default_%d\" % default_N][True]\n for option, reverse_stat in zip(options, is_in_reverse):\n if option not in present_dict[\"option\"]:\n present_dict[\"option\"][option] = {\n True: {\"defined\": [], \"undefined\": [], \"option\": {}, \"is_replace\": False},\n False: {\"defined\": [], \"undefined\": [], \"option\": {}, \"is_replace\": False},\n }\n present_dict = present_dict[\"option\"][option][not reverse_stat]\n return present_dict",
"def required_dict_validator(self, dict_fields, model_name, erp_required=[]):\n required_fields = self.env['settings.field'].sudo().search([('model_id.model', '=', model_name)])\n\n if required_fields:\n erp_required.extend(required_fields.required_field_ids.filtered(lambda x: x.id not in [er.id for er in erp_required]))\n\n for field in erp_required:\n if field.name in dict_fields and 'required' not in dict_fields[field.name]:\n dict_fields[field.name]['required'] = True\n dict_fields[field.name]['empty'] = False\n\n return dict_fields",
"def _pick(d, keys):\n return {k: v for k, v in d.items() if k in keys}",
"def _config_helper(config_key, required_dict, config, filename, defaults):\n this_type = config.get(config_key)\n if this_type not in required_dict:\n config[config_key] = defaults.get(config_key)\n print(f\"{config_key} {this_type} does not exist. Using default of \"\n f\"{defaults.get(config_key)} instead.\")\n\n new_type = config.get(config_key)\n for c in required_dict.get(new_type):\n if c not in config:\n config[c] = defaults.get(c)\n print(f\"{config_key} is {new_type} however {c} not specified \"\n f\"in {filename}. Default is {defaults.get(c)}.\")",
"def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)",
"def check_required(self, required):\n for k in required:\n if self.__dict__.get(k) is None:\n raise ValueError(\n \"Required argument: '{0}' not provided\".format(k))",
"def _get_default(ddict, key, default):\n if ddict is None or key not in ddict or ddict[key] is None:\n return default\n return ddict[key]",
"def dict2arr(d, keys, default=0):\n return [d.get(k, default) for k in keys]",
"def defaulted_values(source_dict, defaults):\n return {\n k: v if v is not None else defaults[k] for k, v in source_dict.items()\n }.values()",
"def _dict_keys(typingctx, d):\n resty = types.DictKeysIterableType(d)\n sig = resty(d)\n codegen = _iterator_codegen(resty)\n return sig, codegen",
"def validate_dict(in_dict, **kwargs):\n\n if not isinstance(in_dict, dict):\n raise ValueError('requires a dictionary')\n\n for key, value in iteritems(kwargs):\n\n if key == 'required':\n for required_key in value:\n if required_key not in in_dict:\n return False\n\n elif key not in in_dict:\n continue\n\n elif value == bool:\n\n in_dict[key] = (True\n if str(in_dict[key]).lower() == 'true'\n else False)\n\n else:\n\n if (isinstance(in_dict[key], list) and\n len(in_dict[key]) == 1 and\n value != list):\n in_dict[key] = in_dict[key][0]\n\n try:\n if key in in_dict:\n in_dict[key] = value(in_dict[key])\n except ValueError:\n return False\n\n return True",
"def default_to_regular(d):\n if isinstance(d, defaultdict):\n d = {k: default_to_regular(v) for k, v in d.items()}\n return d",
"def _default_to_regular(d):\n if isinstance(d, defaultdict):\n d = {k: _default_to_regular(v) for k, v in d.items()}\n return d",
"def do_get(d, *ks, **kwargs):\n try:\n res = reduce (lambda acc, k: acc[k], ks, d)\n except (KeyError, TypeError):\n if \"default\" in kwargs:\n return kwargs[\"default\"]\n else:\n t, v, tb = sys.exc_info()\n if t == KeyError:\n msg = \"nested keys {} not found in {}\".format(ks, d)\n else:\n msg = \"nesting of keys {} too is too deep for {}\".format(ks, d)\n raise KeyError, msg, tb\n else:\n return res",
"def _populate_always_present_fields(self, field):\n defaults = [\n (\"label\", \"\"),\n (\"instructions\", \"\"),\n (\"placeholder\", \"\"),\n (\"defaultValue\", \"\"),\n (\"restrictions\", {}),\n (\"errorMessages\", {}),\n ]\n field.update({\n key: value\n for key, value in defaults if key not in field\n })",
"def apply_dict_overrides(dictionary: dict, **overrides) -> dict:\n # I'm not entirely sure the treatment of None is the right thing. Need to look into that.\n # Then again, if None were stored, then apply_dict_overrides(d, var1=1, var2=2, var3=None)\n # would be no different than (dict(d, var1=1, var2=2, var3=None). It might be more useful\n # and/or interesting if it would actually remove the key instead. -kmp 18-Jul-2020\n for k, v in overrides.items():\n if v is not None:\n dictionary[k] = v\n # This function works by side effect, but getting back the changed dict may be sometimes useful.\n return dictionary",
"def non_none_dict(dikt: List[Tuple[str, Any]]) -> Dict[Any, Any]:\n return {k: v for k, v in dikt if v is not None}"
]
| [
"0.6829249",
"0.6829249",
"0.61653477",
"0.60751134",
"0.5929873",
"0.5821945",
"0.5796474",
"0.5693985",
"0.56603634",
"0.54797363",
"0.5449738",
"0.54321885",
"0.5375609",
"0.53658336",
"0.533189",
"0.5303772",
"0.521909",
"0.52113986",
"0.5090213",
"0.5082031",
"0.5071563",
"0.5035086",
"0.5005346",
"0.49985978",
"0.4964264",
"0.4948857",
"0.49293604",
"0.49101254",
"0.49058977",
"0.49028635"
]
| 0.7746933 | 0 |
Given a list of dicts/objects return a dict mapping item[key_name] > item | def list_to_map(item_list, key_name):
return {x.pop(key_name): x for x in item_list} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def items_dict(slist, key=None):\n fields = slist.fields()\n items = [collections.OrderedDict((k, f) for k, f in zip(fields[0], item))\n for item in fields[1:]]\n if key:\n return collections.OrderedDict((i[key], i) for i in items)\n else:\n return items",
"def list_to_dict(list_of_dicts):\n output = defaultdict(list)\n for dict_ in list_of_dicts:\n for key, value in dict_.items():\n dict_[key].append(value)\n return dict(output)",
"def pair_keys(list_of_dicts, first_key, second_key):\n return [{ dictionary[first_key]: dictionary[second_key] } \n for dictionary in list_of_dicts]",
"def _coalesce_dicts(self, list_of_dicts):\n coalesced_list_of_dicts = [{}]\n for item in list_of_dicts:\n found = False\n for dict_items in coalesced_list_of_dicts:\n if list(item.keys())[0] not in dict_items:\n dict_items.update(item)\n found = True\n break\n if not found:\n coalesced_list_of_dicts.append(item)\n return coalesced_list_of_dicts",
"def _prefixed_items_from_list(items: List[namedtuple], item_prefix, prefix, tag_names: Set[str] = set([])):\n result = {}\n for index, nt in enumerate(items):\n result[\"%s%d\" % (item_prefix, index)] = _parse(nt, prefix, tag_names)\n return result",
"def list_flattened_to_dict(self, listH, defaultItem={}):\n dictList = defaultItem\n for name in reversed(listH):\n dictList = {name: dictList}\n return dictList",
"def merge_dicts(listDicts) :\n return dict(itertools.chain.from_iterable([x.items() for x in listDicts]))",
"def merge_animal_dict_list(dict_list):\n full = dict_list[0]\n for d in dict_list:\n full.update(d)\n return full",
"def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))",
"def get_uniquely_named_objects_by_name(object_list):\n if not object_list:\n return dict()\n\n result = dict()\n for obj in object_list:\n name = obj.name.value\n if name in result:\n raise GraphQLCompilationError(u'Found duplicate object key: '\n u'{} {}'.format(name, object_list))\n result[name] = obj\n\n return result",
"def fusion_api_get_dict_from_list(self, listofDict, key, value):\n for d in listofDict:\n if d[key] == value:\n return d\n return {}",
"def transform_list_to_dict(list):\n\n ret = {}\n\n for value in list:\n if isinstance(value, dict):\n ret.update(value)\n else:\n ret[str(value)] = True\n\n return ret",
"def mapList(results, key):\n newResult = results.map(lambda x: ee.Dictionary(x).get(key))\n return newResult",
"def combine_dicts(dicts_list):\n\n # Combine all dictionarys keys into a single\n # list and find the unique set of them.\n all_keys = []\n for freq_dict in dicts_list:\n all_keys += list(freq_dict.keys())\n keys = set(all_keys)\n\n # Generate the new dictionary with all keys\n combined_dict = {key: 0 for key in keys}\n\n # Iterate over the list of keys so that\n # the memory access pattern to the combined_dict\n # avoids jumping around. If key is not found in\n # a given fdict, just pass over.\n for key in keys:\n for fdict in dicts_list:\n try:\n combined_dict[key] += fdict[key]\n except:\n pass\n\n return combined_dict",
"def group_list_dict(matches, keys):\n target = collections.OrderedDict((key, []) for key in keys)\n for entry in matches:\n if entry is None:\n continue\n for key, value in entry.items():\n target[key].append(value)\n return target",
"def dict_by_attr(collection, attrname, value_attrname=None):\n mapping = {}\n for item in collection:\n if callable(attrname):\n key = attrname(item)\n else:\n key = extended_getattr(item, attrname)\n if value_attrname:\n item = extended_getattr(item, value_attrname)\n mapping[key] = mapping.get(key, []) + [item]\n return mapping",
"def diff_list_of_dict(old_list, new_list):\n new_set = set([dict2str(i) for i in new_list])\n old_set = set([dict2str(i) for i in old_list])\n added = new_set - old_set\n removed = old_set - new_set\n return [str2dict(a) for a in added], [str2dict(r) for r in removed]",
"def fuse_dicts(ds):\n ks = set((k for d in ds for k in d.keys()))\n fused_d = {k: [] for k in ks}\n for d in ds:\n for k in ks:\n fused_d[k].append(d[k] if k in d else None)\n return fused_d",
"def _assoc_list_to_map(lst):\n d = {}\n for run_id, metric in lst:\n d[run_id] = d[run_id] + [metric] if run_id in d else [metric]\n return d",
"def dict_from_items_with_values(*dictionaries, **items):\n dict_list = list(dictionaries)\n dict_list.append(items)\n result = {}\n for d in dict_list:\n for key, value in d.items():\n if value is not None:\n result[key] = value\n return result",
"def remove_duplicate_dicts(data: List[dict]) -> List[dict]:\n return [dict(y) for y in set(tuple(x.items()) for x in data)]",
"def removeDups(lst):\n\n return list(dict.fromkeys(lst) )",
"def return_dictionary_list(lst_of_tuples):\r\n orDict = defaultdict(list)\r\n # iterating over list of tuples\r\n for key, val in lst_of_tuples:\r\n orDict[key].append(val)\r\n return orDict",
"def groupby(f, coll):\n d = dict()\n for item in coll:\n key = f(item)\n if key not in d:\n d[key] = []\n d[key].append(item)\n return d",
"def convert_list2map(variable_lists):\n return_dict = dict()\n for single_variable in variable_lists:\n return_dict[single_variable[\"name\"]] = single_variable[\"value\"]\n return return_dict",
"def remove_duplicates_in_items(items: list, id_key: str) -> list:\n ids = {}\n new_items = []\n for item in items:\n item_id = item.get(id_key)\n if item_id not in ids:\n ids[item_id] = True\n new_items.append(item)\n\n return new_items",
"def convertValueNameListToDict(valueList):\r\n return {k[\"name\"]: k[\"value\"] for k in valueList}",
"def to_listdict(list_list):\n # Fastest way to this is cycle through every row but the first\n # and use zip with the first row and subsequent rows\n return [dict(zip(list_list[0], i)) for i in list_list[1:]]",
"def dict_filter(indict, key_list):\n \n return dict((key, value) for key, value in list(indict.items()) if key in key_list)",
"def multimap(f: Callable, *xs: Any) -> Any:\n first = xs[0]\n if isinstance(first, dict) or isinstance(first, DictType):\n assert all(isinstance(x, dict) or isinstance(x, DictType) for x in xs)\n assert all(sorted(x.keys()) == sorted(first.keys()) for x in xs)\n return {k: multimap(f, *(x[k] for x in xs)) for k in sorted(first.keys())}\n else:\n return f(*xs)"
]
| [
"0.668586",
"0.6637244",
"0.63673234",
"0.6252041",
"0.60777247",
"0.59447753",
"0.59235644",
"0.59206873",
"0.58975065",
"0.58687013",
"0.58588576",
"0.58409035",
"0.57967824",
"0.5757043",
"0.5666372",
"0.5651253",
"0.56499344",
"0.56259024",
"0.56049645",
"0.55956835",
"0.55873156",
"0.5573879",
"0.55643624",
"0.5528581",
"0.55248654",
"0.5511118",
"0.5493228",
"0.54603934",
"0.545923",
"0.54512954"
]
| 0.7173263 | 0 |
Run a command from a list with optional environemnt and return a tuple (rc, stdout_str, stderr_str) | def run_command_list(cmd_list, env=None):
rc = -1
sout = serr = None
cmd_list = run_sanitize(cmd_list)
try:
if env:
pipes = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
else:
pipes = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout, serr = pipes.communicate()
rc = pipes.returncode
except Exception as err:
raise err
#sout = ensure_str(sout)
#serr = ensure_str(serr)
return rc, sout, serr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_command(lst, decode_output = True):\n if is_verbose():\n print(\"Executing command: %s\" % (\" \".join(lst)))\n proc = subprocess.Popen(lst, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n (proc_stdout, proc_stderr) = proc.communicate()\n if decode_output and not isinstance(proc_stdout, str):\n proc_stdout = proc_stdout.decode()\n if decode_output and not isinstance(proc_stderr, str):\n proc_stderr = proc_stderr.decode()\n returncode = proc.returncode\n if 0 != proc.returncode:\n raise RuntimeError(\"command failed: %i, stderr output:\\n%s\" % (proc.returncode, proc_stderr))\n return (proc_stdout, proc_stderr)",
"def exec_command_args_list_one_string(args_list):\n outputlogMessage(output_commandString_from_args_list(args_list))\n ps = subprocess.Popen(args_list,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n # returncode = ps.wait()\n out, err = ps.communicate()\n returncode = ps.returncode\n if returncode != 0:\n outputlogMessage(err.decode())\n return False\n\n if len(out) > 0:\n return out\n else:\n outputlogMessage('return codes: ' + str(returncode))\n return False",
"def exec_command_args_list(args_list):\n outputlogMessage(output_commandString_from_args_list(args_list))\n ps = subprocess.Popen(args_list)\n returncode = ps.wait()\n outputlogMessage('return codes: '+ str(returncode))\n return returncode",
"def run(command_list: List[str]) -> str:\n r = subprocess.run(command_list, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='utf-8')\n logging.debug(f\"subprocess.run({command_list}) got {r.returncode}.\")\n if r.returncode != 0:\n logging.error(f\"subprocess.run({command_list}) failed with code {r.returncode}.\")\n return f\"Error {r.returncode} trying to run({command_list})\"\n return r.stdout",
"def execute(self, cmd_list, stdin = \"\") :\n\n try :\n self.logger.info(\"executing command: '%s'\" % ' '.join(cmd_list))\n except :\n raise RuntimeError(\"command_list %s contains non-string elements.\" % cmd_list)\n \n if stdin == \"\" :\n pipo = subprocess.Popen(cmd_list, stdout = subprocess.PIPE, \\\n stderr = subprocess.PIPE)\n _output, _outerr = pipo.communicate()\n else :\n pipo = subprocess.Popen(cmd_list, stdin = subprocess.PIPE, \\\n stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n _output, _outerr = pipo.communicate(stdin)\n\n if pipo.returncode != 0 :\n raise RuntimeError(\"cmd: \\\"%s\\\" failed with ret=%d, stdout=%s and stderr=%s\" % \\\n (' '.join(cmd_list), pipo.returncode, _output, _outerr))\n\n for line in _output + _outerr :\n if \"not authorized\" in line.lower() :\n raise RuntimeError(\"cmd: \\\"%s\\\" failed with ret=%d, stdout=%s and stderr=%s\" % \\\n (' '.join(cmd_list), pipo.returncode, _output, _outerr))\n \n # get rid of whitespace\n output = []\n for line in _output.split(\"\\n\") :\n line = line.strip()\n if line == \"\" : \n continue\n output.append(line)\n \n outerr = []\n for line in _outerr.split(\"\\n\") :\n line = line.strip()\n if line == \"\" : \n continue\n outerr.append(line)\n\n self.logger.debug(\\\n \"returning (ret, output[:10], outerr[:10] to 10 lines ) %s\" %\\\n ((pipo.returncode, output[:10], outerr[:10]), ))\n\n return pipo.returncode, output, outerr",
"def run(str_or_list):\n\tsubprocess.run(str_or_list, check=True, shell=True)",
"def exec_command_args_list_one_file(args_list,output):\n outputlogMessage(output_commandString_from_args_list(args_list))\n ps = subprocess.Popen(args_list)\n returncode = ps.wait()\n if os.path.isfile(output):\n return output\n else:\n outputlogMessage('return codes: '+ str(returncode))\n return False",
"def get_cmd_args(\n cmd_args: List[str],\n env_names: Iterable[str],\n kwargs: Dict[str, bool],\n) -> Union[List[str], str]:\n for env_name in env_names:\n env_value = os.environ.get(env_name)\n if env_value is not None:\n settings = f'{env_value}/settings64.sh'\n if os.path.isfile(settings):\n kwargs['shell'] = True\n kwargs['executable'] = 'bash'\n return ' '.join([\n 'source',\n shlex.quote(settings),\n ';',\n 'exec',\n *map(shlex.quote, cmd_args),\n ])\n return cmd_args",
"def exec_bash(self, cmd_list):\n for cmd in cmd_list:\n _, result = self.run_cmd(cmd)\n result = result.decode('utf-8').strip()\n return result",
"def run_command(\n commands: List[str],\n args: List[str],\n cwd: Optional[str] = None,\n verbose: bool = False,\n hide_stderr: bool = False,\n env: Optional[Dict[str, str]] = None,\n) -> Tuple[Optional[str], Optional[int]]:\n assert isinstance(commands, list)\n process = None\n\n popen_kwargs: Dict[str, Any] = {}\n if sys.platform == \"win32\":\n # This hides the console window if pythonw.exe is used\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n popen_kwargs[\"startupinfo\"] = startupinfo\n\n for command in commands:\n try:\n dispcmd = str([command] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n process = subprocess.Popen([command] + args, cwd=cwd, env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None), **popen_kwargs)\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None, None\n stdout = process.communicate()[0].strip().decode()\n if process.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n print(\"stdout was %s\" % stdout)\n return None, process.returncode\n return stdout, process.returncode",
"def shell ( cmd ):\n p = subprocess.Popen( cmd, shell=True, stderr=subprocess.PIPE,\n stdout=subprocess.PIPE )\n x = p.communicate()\n p.stdout.close()\n p.stderr.close()\n if x[1] == '':\n status = True\n else:\n status = False\n \n return [ status, x[0].split( '\\n' ), x[1].split( '\\n' ) ]",
"def run_command(cmd):\n\n return filter(lambda x: x, Popen(cmd.split(), stdout = PIPE).communicate()[0].split(\"\\n\"))",
"def pipe_open(commands: list):\n process = Popen(commands, stdout=PIPE, stderr=PIPE)\n output, error = process.communicate()\n return output, error",
"def run_module_command(cmd_list, env_vars, exit_on_error=True):\n if exit_on_error:\n try:\n subprocess.check_call(cmd_list, env=env_vars)\n except subprocess.CalledProcessError as shelloutexc:\n sys.exit(shelloutexc.returncode)\n else:\n subprocess.check_call(cmd_list, env=env_vars)",
"def run_commands(*commands: str, **kwargs) -> Tuple[Optional[str], Optional[str], int]:\n command = ' ; '.join(commands)\n # Indirectly waits for a return code.\n process = subprocess.run(command, **kwargs)\n stdout = process.stdout\n stderr = process.stderr\n # Decode stdout and stderr to strings if needed.\n if isinstance(stdout, bytes):\n stdout = str(stdout, 'utf-8').strip()\n if isinstance(stderr, bytes):\n stderr = str(stderr, 'utf-8').strip()\n return stdout, stderr, process.returncode",
"def system(cmds):\n if isinstance(cmds, six.string_types):\n cmds = [cmds]\n\n output = None\n if isinstance(cmds, (tuple, list)):\n for cmd in cmds:\n logger.debug(cmd)\n\n try:\n output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n if output:\n logger.info(output.decode('utf-8'))\n\n\n except subprocess.CalledProcessError as e:\n if e.returncode != 2:\n msg = \"Command failed: \\n {} \\n \\n Return code: {} \".format(cmd, e.returncode)\n logger.error(msg)\n logger.error(e.output.decode(\"utf-8\"))\n\n sys.exit(1)\n\n else:\n raise TypeError(\"cmd argument is wrong type\")\n\n return output",
"def call_command(command, env=None, cwd=None):\n\n try:\n LOG.debug('Run %s', ' '.join(command))\n out = subprocess.check_output(command,\n bufsize=-1,\n env=env,\n stderr=subprocess.STDOUT,\n cwd=cwd)\n LOG.debug(out)\n return out, 0\n except subprocess.CalledProcessError as ex:\n LOG.debug('Running command \"%s\" Failed.', ' '.join(command))\n LOG.debug(str(ex.returncode))\n LOG.debug(ex.output)\n return ex.output, ex.returncode\n except OSError as oerr:\n LOG.warning(oerr.strerror)\n return oerr.strerror, oerr.errno",
"def exec_command_string(command_str):\n print(command_str)\n (status, result) = getstatusoutput(command_str)\n return status, result",
"def exec_command_all(*cmdargs, **kwargs):\n proc = subprocess.Popen(cmdargs, bufsize=-1, # Default OS buffer size.\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n # Waits for subprocess to complete.\n out, err = proc.communicate()\n # Python 3 returns stdout/stderr as a byte array NOT as string.\n # Thus we need to convert that to proper encoding.\n if is_py3:\n encoding = kwargs.get('encoding')\n if encoding:\n out = out.decode(encoding)\n err = err.decode(encoding)\n else:\n # If no encoding is given, assume we're reading filenames from stdout\n # only because it's the common case.\n out = os.fsdecode(out)\n err = os.fsdecode(err)\n\n\n return proc.returncode, out, err",
"def _run(args: List[str], check: bool = False) -> Tuple[int, str]:\n result = subprocess.run(args=args, stdout=subprocess.PIPE)\n if check and result.returncode != 0:\n raise subprocess.CalledProcessError(result.returncode, args)\n return result.returncode, result.stdout.decode('utf-8', 'strict')",
"def _callOnCommandLine(self, cmd=[]):\n\t\tp = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n\t\tstdout, stderr = p.communicate()\n\t\treturn stdout, stderr",
"def run_cmd(cmd, chdir=None, env=None):\n l = logging.getLogger('screpper.util')\n cmd_l = shlex.split(cmd)\n l.debug('exec command %s' % (cmd))\n l.debug('as list %s' % (cmd_l))\n\n # set directory to current if not defined\n if not chdir:\n chdir = os.getcwd()\n\n # prepare environemtn\n if env:\n new_env = dict(os.environ.items() + env.items())\n else:\n new_env = os.environ\n p = subprocess.Popen(cmd_l, cwd=chdir, stdout=subprocess.PIPE, \n stderr=subprocess.PIPE, env=new_env)\n \n try:\n out, err = p.communicate()\n l.debug('stdout: ' + str(out))\n l.debug('stderr: ' + str(err))\n except Exception, e:\n l.error('failed to run process: %s' % (str(e)))\n pass\n\n l.debug('process finished, retcode %d' % (p.returncode))\n return p.returncode, out, err",
"def RunShellWithReturnCodeAndStderr(command, print_output=False,\r\n universal_newlines=True,\r\n env=os.environ):\r\n logging.info(\"Running %s\", command)\r\n env = env.copy()\r\n env['LC_MESSAGES'] = 'C'\r\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\r\n shell=use_shell, universal_newlines=universal_newlines,\r\n env=env)\r\n if print_output:\r\n output_array = []\r\n while True:\r\n line = p.stdout.readline()\r\n if not line:\r\n break\r\n print line.strip(\"\\n\")\r\n output_array.append(line)\r\n output = \"\".join(output_array)\r\n else:\r\n output = p.stdout.read()\r\n p.wait()\r\n errout = p.stderr.read()\r\n if print_output and errout:\r\n print >>sys.stderr, errout\r\n p.stdout.close()\r\n p.stderr.close()\r\n return output, errout, p.returncode",
"def get_output_error(cmd, **kwargs):\n if not isinstance(cmd, list):\n cmd = [cmd]\n logging.debug(\"Running: %s\", ' '.join(map(quote, cmd)))\n try:\n result = Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)\n except OSError as e:\n return -1, '', f'Failed to run {cmd!r}: {e!r}'\n so, se = result.communicate()\n # unicode:\n so = so.decode('utf8', 'replace')\n se = se.decode('utf8', 'replace')\n\n return result.returncode, so, se",
"def GetCmdStatusOutputAndError(args,\n cwd=None,\n shell=False,\n env=None,\n merge_stderr=False):\n _ValidateAndLogCommand(args, cwd, shell)\n stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE\n pipe = Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=stderr,\n shell=shell,\n cwd=cwd,\n env=env)\n stdout, stderr = pipe.communicate()\n return (pipe.returncode, stdout, stderr)",
"def run_command_popen(args, shell=False, log_errors=False, logger_func=None):\n # type: (List[str], bool, bool, Callable) -> Tuple[int, str, str]\n logger_func = logger_func if logger_func else print\n\n process = subprocess.Popen(\n args, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE # nosec\n )\n stdout, stderr = process.communicate()\n\n if process.returncode != 0 and log_errors:\n command = \" \".join(args)\n logger_func(\n 'Command \"%s\" returned exit code %s.' % (command, process.returncode)\n )\n logger_func(\"Stdout: %s\" % (stdout.decode(\"utf-8\")))\n logger_func(\"Stderr: %s\" % (stderr.decode(\"utf-8\")))\n\n return (process.returncode, stdout.decode(\"utf-8\"), stderr.decode(\"utf-8\"))",
"def cli(self, cmd):\n p1 = Popen(cmd,stdout=PIPE, shell=True)\n output = p1.communicate()\n if p1.returncode != 0 :\n print('error returned from shell command: %s was %s'%(cmd,output[0]))\n return output[0],p1.returncode",
"def cmd_gather(cmd):\n\n if not isinstance(cmd, list):\n cmd_list = shlex.split(cmd)\n else:\n cmd_list = cmd\n\n cwd = pushd.Dir.getcwd()\n cmd_info = '[cwd={}]: {}'.format(cwd, cmd_list)\n\n logger.debug(\"Executing:cmd_gather {}\".format(cmd_info))\n proc = subprocess.Popen(\n cmd_list, cwd=cwd,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rc = proc.returncode\n logger.debug(\n \"Process {}: exited with: {}\\nstdout>>{}<<\\nstderr>>{}<<\\n\".\n format(cmd_info, rc, out, err))\n return rc, out, err",
"def checked_subprocess_run(command):\n args = shlex.split(command)\n completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = completed.stdout.decode()\n err = completed.stderr.decode()\n\n # Print the subprocess output to include in the test output\n print(out, file=sys.stdout)\n print(err, file=sys.stderr)\n\n # After printing the output, raise an exception on a non-zero exit status.\n completed.check_returncode()\n\n return out, err",
"def proc_select(exec_list):\n\n # if the user didn't select any particular\n # executable for a command, it'll be selected from\n # exec_list\n for prog in exec_list:\n r = envoy.run(\"which {}\".format(prog))\n if r.status_code == 0:\n return r.std_out[:-1]\n return None"
]
| [
"0.71225667",
"0.6955543",
"0.6849619",
"0.6824012",
"0.6708122",
"0.644725",
"0.63306963",
"0.629695",
"0.62924445",
"0.62071675",
"0.6191846",
"0.61890286",
"0.614715",
"0.6129071",
"0.6083207",
"0.59565616",
"0.593073",
"0.5911763",
"0.5898761",
"0.5893061",
"0.585685",
"0.584717",
"0.58383286",
"0.5835596",
"0.5825846",
"0.580807",
"0.5807218",
"0.5712426",
"0.5702762",
"0.57001996"
]
| 0.8288501 | 0 |
Returns a string for use with acquire() calls optionally. Constructs a consistent id from the platform node, process_id and thread_id | def get_threadbased_id(guarantee_uniq=False):
return '{}:{}:{}:{}'.format(platform.node(), os.getpid(), str(threading.get_ident()),uuid.uuid4().hex if guarantee_uniq else '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pid_tid():\n # noinspection PyBroadException\n try:\n return \"(pid=%s) (tid=%s)\" % (\n six.text_type(os.getpid()),\n six.text_type(six.moves._thread.get_ident()),\n )\n except Exception:\n return \"(pid=%s) (tid=Unknown)\" % (six.text_type(os.getpid()))",
"def _get_thread_id() -> int:\n # NOTICE:\n # we do not use threading.get_ident() to identify a thread, as Python recycles these identifiers\n return id(threading.current_thread())",
"def generate_fwan_process_id() -> str:\n return str(uuid.uuid4())",
"def establish_id(self):\n if self.config.node_id is None:\n self.config.node_id = str(uuid4()).replace('-', '')\n return self.config.node_id",
"def get_identity():\n identity = multiprocessing.current_process()._identity\n identity = 0 if not identity else identity[0]\n\n identity = (identity, threading.current_thread().ident)\n return identity",
"def get_generator_id() -> str:\n res = os.name + str(os.getpid()) + str(random.randint(-1000, 1000))\n res = hashlib.sha224(res.encode('utf-8')).digest()\n res = base64.b64encode(res).decode('utf-8')\n return res",
"def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)",
"def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id,\n f\"{self._process_type}_cpu_usage\",\n self._cam_name,\n )",
"def _get_cache_identifier():\n return '{}:{}'.format(os.getpid(), threading.get_ident())",
"def create_task_id():\n return str(int(round(time.time() * 10**9)))",
"def GenDistinctId(self):\t\n \"\"\"4 bits to unique a machine \\\n\t5 bits for processes\"\"\"\n\tmachineId = format(self.mid, 4)\n processId = format(self.pid) \n \treturn machineId + processId",
"def __gettid():\r\n try:\r\n import platform\r\n if not platform.system().startswith('Linux'):\r\n raise ValueError\r\n syscalls = {\r\n 'i386': 224, # unistd_32.h: #define __NR_gettid 224\r\n 'x86_64': 186, # unistd_64.h: #define __NR_gettid 186\r\n }\r\n import ctypes\r\n tid = ctypes.CDLL('libc.so.6').syscall(syscalls[platform.machine()])\r\n except:\r\n tid = -1\r\n return tid",
"def hardware_id(self):\n return uuid.uuid4()",
"def get_ticket_id():\n return str(time.time()) + str(uuid.uuid4())",
"def get_id():\n global UNIQUE_GAME_ID\n with threadLock:\n UNIQUE_GAME_ID += 1\n return str(UNIQUE_GAME_ID)",
"def unique_id(self) -> str:\n return f\"{self._device.mac}_{self._router.config_entry.entry_id}\"",
"def get_full_id(self, module_name, file_name):\n\n host_id = self.hostname\n process_id = self.os_pid\n thread_id = self.get_thread_id()\n\n file_basename = os.path.basename(file_name) # remove the path for better readability\n full_id = \"host={0} filename={1} module={2} process={3} thread={4}\".format(host_id, file_basename, module_name,\n process_id, thread_id)\n\n return full_id",
"def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"",
"def unique_id(self):\n return f\"{self.device.id}-{self.key}\"",
"def detect_own_id() -> str:\n\n pod = os.environ.get('POD_ID', None)\n if pod is not None:\n return pod\n\n user = getpass.getuser()\n host = socket.getfqdn()\n now = datetime.datetime.utcnow().isoformat()\n rnd = ''.join(random.choices('abcdefhijklmnopqrstuvwxyz0123456789', k=6))\n return f'{user}@{host}/{now}/{rnd}'",
"def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")",
"def unique_id() -> str:",
"def unique_id(self):\n return f\"{self._mac_address}:{self._device_id}:{self._zone_id}:switch\"",
"def unique_id(self):\n return self._device.serial",
"def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"gpu_load\", self._gpu_name\n )",
"def unique_id(self) -> str:\n return f\"{self._inst.lower()}-{self._sid_data['sid']}_switch-{self._data[self._sid_data['sid_ref']]}\"",
"def load_thread_id() -> str:\n with open('post_id.txt', 'r') as files:\n thread_id = files.read()\n\n return thread_id",
"def _get_device_id(api: Mobileclient) -> str:\n\n try:\n _get_device_id_from_environment()\n except KeyError:\n pass\n\n return _get_device_id_from_registered(api)",
"def zend_thread_id():\n raise NotImplementedError()",
"def unique_id(self) -> Optional[str]:\n return self._device.device_id"
]
| [
"0.6859263",
"0.67202514",
"0.6645354",
"0.6566175",
"0.65168583",
"0.6495521",
"0.64781684",
"0.645064",
"0.6420917",
"0.64113784",
"0.63737696",
"0.63713926",
"0.63129073",
"0.6301101",
"0.62372375",
"0.6235125",
"0.6218468",
"0.61973315",
"0.61922956",
"0.61841655",
"0.6172348",
"0.61702955",
"0.6164359",
"0.613064",
"0.61240953",
"0.61237305",
"0.6110151",
"0.61060333",
"0.6073755",
"0.60637605"
]
| 0.7741454 | 0 |
Convert the rfc3339 formatted string (UTC only) to a datatime object with tzinfo explicitly set to utc. Raises an exception if the parsing fails. | def rfc3339str_to_datetime(rfc3339_str):
ret = None
for fmt in rfc3339_date_input_fmts:
try:
ret = datetime.datetime.strptime(rfc3339_str, fmt)
# Force this since the formats we support are all utc formats, to support non-utc
if ret.tzinfo is None:
ret = ret.replace(tzinfo=datetime.timezone.utc)
continue
except:
pass
if ret is None:
raise Exception("could not convert input created_at value ({}) into datetime using formats in {}".format(rfc3339_str, rfc3339_date_input_fmts))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_date(s):\n return parse(s).astimezone(pytz.utc)",
"def datetime_parser(s):\n try:\n ts = arrow.get(s)\n # Convert UTC to local, result of get is UTC unless it specifies\n # timezone, bonfire assumes all time to be machine local\n if ts.tzinfo == arrow.get().tzinfo:\n ts = ts.replace(tzinfo='local')\n except:\n c = pdt.Calendar()\n result, what = c.parse(s)\n\n ts = None\n if what in (1, 2, 3):\n ts = datetime.datetime(*result[:6])\n\n ts = arrow.get(ts)\n ts = ts.replace(tzinfo='local')\n return ts\n\n if ts is None:\n raise ValueError(\"Cannot parse timestamp '\" + s + \"'\")\n\n return ts",
"def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue",
"def rfc3339_to_datetime(data):\n try:\n ts = time.strptime(data, '%Y-%m-%d')\n return date(*ts[:3])\n except ValueError as error:\n pass\n\n try:\n dt, _, tz = data.partition('Z')\n if tz:\n tz = offset(tz)\n else:\n tz = offset('00:00')\n ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S.%f')\n return datetime(*ts[:6], tzinfo=tz)\n except ValueError:\n raise ValueError('date-time {!r} is not a valid rfc3339 date representation'.format(data)) # noqa",
"def _parse_datetime(s):\r\n if s:\r\n return datetime.strptime(s, ISO8601)\r\n else:\r\n return datetime.fromtimestamp(0)",
"def _parse_datetime(value):\n return parse(value).replace(tzinfo=pytz.utc) if value else None",
"def parsedate(s):\n dt = dateutil.parser.parse(s)\n if dt.tzinfo:\n dt = dt.astimezone(dateutil.tz.tzlocal()).replace(tzinfo=None)\n return dt",
"def parse_datetime(dt_str, format):\n t = time.strptime(dt_str, format)\n return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)",
"def from_string(representation: str) -> datetime:\n return parse(representation).replace(tzinfo=timezone.utc)",
"def _parse_iso_datetime(string):\n try:\n string = string.split('.', 1)[0] # strip out microseconds\n return calendar.timegm(time.strptime(string, '%Y-%m-%dT%H:%M:%S'))\n except ValueError, e:\n raise ValueError('Invalid ISO date/time %r' % string)",
"def fromisoformat(string):\n string = string.replace(\"T\", \" \")\n if \".\" in string:\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S.%f\")\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S\")",
"def __parse_date_time(date_time_string: str) -> DateTime:\n return datetime.datetime.strptime(date_time_string, \"%Y-%m-%dT%H:%M:%S.%fZ\")",
"def parse_utc_string(self, utc_string: str) -> dt.datetime:\n return dt.datetime(*map(int, re.split(r\"[^\\d]\", utc_string)[:-1])).timestamp() + self.get_tz_offset() * 60",
"def parse_time_string(time_str, tz=\"US/Pacific\"):\n\n # parsedatetime doesn't handle ISO-8601 time strings (YYYY-MM-DDThh:mm:ss+zz) so\n # try to parse it with arrow first and then use parsedatetime as a fallback (grumble)\n t = None\n try:\n t = arrow.get(time_str)\n # If the input string didn't specify a timezone, fill in the default\n if len(time_str.split(\"+\")) == 1:\n t = t.replace(tzinfo=tz)\n except arrow.parser.ParserError:\n cal = parsedatetime.Calendar()\n parse_result = cal.parse(time_str)\n if parse_result[1] == 0:\n raise ValueError(\"Could not understand time {time}\".format(time=time_str))\n t = arrow.get(parse_result[0]).replace(tzinfo=tz)\n return t.to(\"utc\")",
"def parse_datetime(datestr):\r\n try:\r\n return dateutil.parser.parse(datestr).replace(tzinfo=utc)\r\n except ValueError:\r\n raise DashboardError(_(\"Unable to parse date: \") + datestr)",
"def parse_utc_datetime(dt_str: str) -> datetime:\n if dt_str is None:\n return None\n\n db_datetime_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n dt_utc = datetime.strptime(dt_str, db_datetime_format)\n dt_utc = dt_utc.replace(tzinfo=timezone.utc)\n return dt_utc",
"def parse_time(text):\n try:\n if len(text) == 17:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%MZ')\n elif len(text) == 20:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%M:%SZ')\n else:\n date = datetime.datetime.utcnow()\n except Exception as _:\n date = datetime.datetime.utcnow()\n return date",
"def parse_datetime(date_string):\n try:\n return datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S%z')\n except ValueError:\n result = datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')\n # Workaround for https://github.com/pyvec/naucse.python.cz/issues/525\n return pytz.timezone('Europe/Prague').localize(result)",
"def parse(date_string: str):\n # parse the date string\n date = dateutil.parser.parse(date_string)\n # convert to UTC if containing time-zone information\n # then drop the timezone information to prevent unsupported errors\n if date.tzinfo:\n date = date.astimezone(dateutil.tz.UTC).replace(tzinfo=None)\n # return the datetime object\n return date",
"def parse(date_string: str):\n # parse the date string\n date = dateutil.parser.parse(date_string)\n # convert to UTC if containing time-zone information\n # then drop the timezone information to prevent unsupported errors\n if date.tzinfo:\n date = date.astimezone(dateutil.tz.UTC).replace(tzinfo=None)\n # return the datetime object\n return date",
"def parse_rfc3339(datetime_string, report_precision=False):\n timezone = \"+0000\"\n if datetime_string.endswith(\"Z\"):\n datetime_string = datetime_string[0:-1]\n _, _, datetime_time_string = datetime_string.partition(\"T\")\n zone_sep = \"\"\n if \"+\" in datetime_time_string:\n zone_sep = \"+\"\n elif \"-\" in datetime_time_string:\n zone_sep = \"-\"\n if not zone_sep == \"\":\n datetime_string, _, datetime_offset = datetime_string.partition(zone_sep)\n timezone = zone_sep+datetime_offset.replace(\":\", \"\")\n microseconds = 0\n precision = 0\n if \".\" in datetime_time_string:\n datetime_string, _, datetime_ns = datetime_string.partition(\".\")\n microseconds = int(float(\"0.\"+datetime_ns)*1000)\n precision = len(datetime_ns)\n time_3339 = datetime.datetime.strptime(datetime_string, \"%Y-%m-%dT%H:%M:%S\")\n hours = int(timezone[1:3])\n minutes = int(timezone[3:5])\n timezone_delta = datetime.timedelta(hours=hours, minutes=minutes)\n if timezone[0] == '+':\n time_3339 -= timezone_delta\n else:\n time_3339 += timezone_delta\n time_3339 = time_3339.replace(microsecond=microseconds)\n if report_precision:\n return (time_3339, precision)\n else:\n return time_3339",
"def make_datetime_from_string(string):\n return datetime.datetime.strptime(string, \"%Y-%m-%dT%H:%M:%S%z\")",
"def cvt_time(dt_str):\n # Note, these timestamps don't include time zones\n return datetime.strptime(dt_str, '%Y-%m-%dT%H:%M:%S.%fZ')",
"def decode_datetime(self, string):\n if isinstance(string, str):\n if 'T' in string:\n return datetime.strptime(string, \"%Y%m%dT%H%M%S\")\n else:\n return datetime.strptime(string, \"%Y%m%d\")\n else:\n return string",
"def parse_datetime(date_string: str) -> datetime:\n # Tries out any given format of DatetimeFormat enumeration\n return dateparser.parse(\n date_string, date_formats=[dt_format.value for dt_format in DatetimeFormat]\n ).replace(tzinfo=pytz.UTC)",
"def time_string2dt(time_string: str)-> datetime:\n return parse(time_string, fuzzy=True)",
"def deserialize_datetime(string):\n try:\n from dateutil.parser import parse\n return parse(string)\n except ImportError:\n return string",
"def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))",
"def convert_utc(utc) -> dt.datetime:\n return iso8601.parse_date(utc)",
"def datetime_from(text):\n eastern = pytz.timezone(\"US/Eastern\")\n if text.endswith(\"T00:00:00\"):\n text = text[:-len(\"T00:00:00\")]\n time = datetime.strptime(text, \"%Y-%m-%d\")\n time = time.replace(hour=23, minute=59, second=59)\n time = eastern.localize(time)\n return time.astimezone(pytz.utc)"
]
| [
"0.70798886",
"0.70414937",
"0.6984492",
"0.69358456",
"0.678945",
"0.67742926",
"0.6738815",
"0.66999805",
"0.6650111",
"0.661826",
"0.65997696",
"0.6588446",
"0.65588635",
"0.6555089",
"0.6531995",
"0.6470953",
"0.6461898",
"0.64420307",
"0.63758594",
"0.63758594",
"0.6373666",
"0.6370552",
"0.6366406",
"0.63254803",
"0.6316967",
"0.6286941",
"0.6280608",
"0.6265936",
"0.62649554",
"0.6263641"
]
| 0.75160867 | 0 |
Convert an epoch int value to a RFC3339 datetime string | def epoch_to_rfc3339(epoch_int):
return datetime_to_rfc3339(datetime.datetime.utcfromtimestamp(epoch_int)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def epoch_to_str(epoch: int) -> str:\n return datetime_to_str(datetime.fromtimestamp(epoch, tz=timezone.utc))",
"def epoch_to_format(epoch, format='%Y-%m-%dT%H:%M:%SZ'):\n\n return datetime.fromtimestamp(int(epoch[:10]), tz=timezone.utc).strftime(format)",
"def epoch_to_format(format, epoch):\n\n return datetime.fromtimestamp(int(epoch[:10]), tz=timezone.utc).strftime(format)",
"def epoch(value):\n if isinstance(value, datetime.datetime):\n return int(calendar.timegm(value.timetuple())*1000)\n return '' #fails silently for non-datetime objects",
"def epoch2UTCstr(timestamp=time(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return strftime(fmat, gmtime(timestamp))",
"def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)",
"def datetime_to_epoch(datetime_obj):\n return int(datetime_obj.strftime(\"%s\")) * 1000",
"def convert_epoch(aepoch):\n\tprint \"time given: \" + aepoch\n\tepoch = time.strftime(\"%a %d %b %Y %H:%M:%S +0000\", time.gmtime(float(aepoch)))\n\tprint \"converted time: \" + epoch",
"def epoch2datetime(epoch):\n return datetime.fromtimestamp(epoch)",
"def epoch():\n return datetime2epoch(datetime.now())",
"def convert_epoch_to_date(epoch_time):\n local_time = datetime.datetime.fromtimestamp(epoch_time).strftime(TIME_FORMAT_YSI) \n\n return local_time",
"def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal",
"def epoch2localstr(timestamp=time(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return strftime(fmat, localtime(timestamp))",
"def format_datetime(epoch):\n return strftime(\"%a %b %d %Y %I:%M:%S%p\", localtime(epoch))",
"def datetime2epoch(dt):\n return int(mktime(dt.timetuple())*1000)",
"def get_datetime(epoch):\n\n t = time.gmtime(epoch)\n dt = datetime.datetime(*t[:6])\n\n return dt",
"def convert_epoch_to_timestamp(cls, timestamp, tsformat):\n return time.strftime(tsformat, time.gmtime(timestamp))",
"def format_date(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d')",
"def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000",
"def datetime_to_epoch(datetime):\n return datetime.astype('int64') // 1e9",
"def epoch_time_standardization(epoch_time):\n epoch_time_string = str(epoch_time)\n # if the given epoch time appears to include milliseconds (or some other level of precision)...\n # and does not have a decimal in it, add a decimal point\n if len(epoch_time_string) > 10 and '.' not in epoch_time_string:\n epoch_time = f'{epoch_time_string[:10]}.{epoch_time_string[10:]}'\n return epoch_time",
"def epoch_to_dt(epoch):\n if type(epoch) in (str, unicode):\n epoch = float(epoch)\n return datetime.fromtimestamp(epoch)",
"def to_epoch(datetime_obj):\n if sys.version_info[0:2] < (3, 3):\n import calendar\n\n return (\n calendar.timegm(datetime_obj.timetuple())\n + datetime_obj.microsecond / 1000000\n )\n else:\n return datetime_obj.timestamp()",
"def date_to_epoch(date):\n epoch_time = date.strftime('%s')\n return int(epoch_time)",
"def format_time(value: int) -> str:\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d%H%M%S')",
"def http_date(epoch_seconds=None):\n return formatdate(epoch_seconds, usegmt=True)",
"def epoch_to_date(epoch_time):\n epoch_time = float(epoch_time_standardization(epoch_time))\n return datetime.datetime.fromtimestamp(epoch_time)",
"def human_readable_time_from_epoch_time(epoch_time: int, utc_time: bool = False):\n result = datetime.fromtimestamp(epoch_time / 1e6).isoformat() if epoch_time else None\n if result:\n result += 'Z' if utc_time else ''\n return result",
"def _hx_time_to_epoch(self, timestr: str) -> int: # pragma: no cover\n\n time_obj = datetime.datetime.strptime(timestr, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n return int(time_obj.strftime(\"%s\"))",
"def rfc3339(self):\n if self._nanosecond == 0:\n return _to_rfc3339(self)\n nanos = str(self._nanosecond).rjust(9, \"0\").rstrip(\"0\")\n return \"{}.{}Z\".format(self.strftime(_RFC3339_NO_FRACTION), nanos)"
]
| [
"0.75380003",
"0.74161756",
"0.7317242",
"0.71895885",
"0.71866465",
"0.68813205",
"0.68426865",
"0.6692456",
"0.6676125",
"0.6659056",
"0.6645829",
"0.6564905",
"0.65551674",
"0.6492377",
"0.6463422",
"0.64625967",
"0.64381254",
"0.6437414",
"0.6381773",
"0.63762414",
"0.6369685",
"0.63510257",
"0.63415384",
"0.63413215",
"0.6291371",
"0.6290818",
"0.62631357",
"0.62518364",
"0.6230341",
"0.61919385"
]
| 0.7752254 | 0 |
Takes a CPE 2.3 formatted string and returns a CPE object. This is the only supported method to create an instance of this class This is not entirely true to the spec, it does not unbind all the elements as wfn representation is not used. All of unbinding logic is concentrated in the conversion from wfn to uri format in as_cpe22_uri() | def from_cpe23_fs(cpe23_fs):
cpe_parts = cpe23_fs.split(':')
if cpe_parts and len(cpe_parts) == 13:
return CPE(
part=cpe_parts[2],
vendor=cpe_parts[3],
product=cpe_parts[4],
version=cpe_parts[5],
update=cpe_parts[6],
edition=cpe_parts[7],
language=cpe_parts[8],
sw_edition=cpe_parts[9],
target_sw=cpe_parts[10],
target_hw=cpe_parts[11],
other=cpe_parts[12]
)
elif len(cpe_parts) > 13:
# logger.debug('{} did not split nicely into 13 parts'.format(cpe23_fs))
adjusted_cpe_parts = []
counter = 1
# start from the third element in the list and iterate through the penultimate element
while counter < len(cpe_parts) - 1:
counter += 1
part = cpe_parts[counter]
# if the element ends with a '\', good chance its an escape for ':', concatenate the elements together
if part.endswith('\\'):
new_part = part
while counter < len(cpe_parts) - 1:
counter += 1
part = cpe_parts[counter]
new_part += ':' + part
if part.endswith('\\'):
continue
else:
break
adjusted_cpe_parts.append(new_part)
else:
adjusted_cpe_parts.append(part)
if len(adjusted_cpe_parts) == 11:
# logger.debug('Adjusted cpe components: {}'.format(adjusted_cpe_parts))
return CPE(
part=adjusted_cpe_parts[0],
vendor=adjusted_cpe_parts[1],
product=adjusted_cpe_parts[2],
version=adjusted_cpe_parts[3],
update=adjusted_cpe_parts[4],
edition=adjusted_cpe_parts[5],
language=adjusted_cpe_parts[6],
sw_edition=adjusted_cpe_parts[7],
target_sw=adjusted_cpe_parts[8],
target_hw=adjusted_cpe_parts[9],
other=adjusted_cpe_parts[10]
)
else:
raise Exception('Cannot convert cpe 2.3 formatted string {} into wfn'.format(cpe23_fs))
else:
raise Exception(
'Invalid cpe 2.3 formatted string {} Splitting with : delimiter resulted in less than 13 elements'.format(
cpe23_fs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_cpe(cpe_uri):\n parts = CPE_REGEX.match(cpe_uri)\n return parts.group(\"vendor\"), parts.group(\"package\"), parts.group(\"version\")",
"def parser(text, utcnow=None, ugc_provider=None, nwsli_provider=None):\n # Careful here, see if we have two CLIs in one product!\n return CLIProduct(text, utcnow, ugc_provider, nwsli_provider)",
"def _get_clipping_object(strlist):\n if len(strlist) < 4:\n raise SyntaxError('Insufficient strings to constitute a clipping')\n # zeroth line is title and author\n title,author = _get_book(strlist[0])\n # next line is the clip metadata\n clip_type,page,location_range,datetime = _get_clip_meta(strlist[1])\n # clip metadata is followed by a line that seems to be always blank and is not part of clip text. \n # To be safe, if it does happen to be non-empty, preserve it in the clip_text.\n text_start = 3 \n if strlist[2].strip() != '':\n text_start = 2 # ensure this non-blank line becomes part of the clip_text\n clip_text = ''.join(strlist[text_start::])\n\n return Clipping(title, author, clip_type, page, location_range, datetime, clip_text)",
"def _decompose(cls,\n s = '',\n element = False):\n\n s = s.strip()\n\n x = cls._html.findall(s)\n if len(x) > 0:\n s = ''.join(x[0][::-1])\n\n s = cls._translate.get(s.lower(), s)\n\n name = s.strip()\n n = len(name)\n el = ''\n a = ''\n e = ''\n\n # get numbers\n n = re.findall(\"\\d+\", name)\n\n # get strings\n cx = re.findall(\"\\D+\", name)\n\n c = []\n for x in cx:\n xx = x.split('-')\n cy = [y for y in xx if y != '']\n c += cy\n if len(c) == 2:\n if c[0] in ('m', 'g'):\n c = c[::-1]\n if c[0][0] == '*':\n c = c[::-1]\n if len(n) > 0: a = n[0]\n if len(n) > 1: e = n[1]\n if len(n) > 2: raise ValueError(\"Can't understand isotope '{}'.\".format(s))\n if len(c) > 0: el = c[0]\n if len(el) > 0:\n if el[-1] in cls.EXCITE and len(c) == 1 and len(n) == 2:\n c.append(el[-1])\n el = el[:-1]\n if len(c) == 2 and c == ['(', ')']:\n if len(n) == 1:\n a = n[0]\n el = 'Z='\n e = ''\n c = []\n n = []\n else:\n return (s,) + ('',)*3\n if len(c) == 2:\n if c[1] in ('g', 'G'):\n e = '0'\n if len(n) > 1:\n return (s,) + ('',)*3\n elif c[1] in ('m', 'M') and len(n) == 1:\n e = '1'\n elif c[1][0] == '*' and len(n) == 1:\n e = str(len(c[1]))\n assert c[1].count('*') == len(c[1])\n if e == '1':\n e = str(cls.EANY)\n if not c[1] in ('m', 'g', 'M', 'G') and not c[1][0] == '*':\n return (s,) + ('',)*3\n\n if len(c) == 1 and c[0][-1] == '*':\n e = 0\n while c[0][-1] == '*':\n c[0] = c[0][:-1]\n e += 1\n assert e == 1\n e = str(e)\n el = c[0]\n\n if len(c) == 1 and c[0][0] == '*':\n e = 0\n while c[0][0] == '*':\n c[0] = c[0][1:]\n e += 1\n assert e == 1\n e = str(e)\n el = c[0]\n\n if s == 'a' and a == '':\n el = 'He'\n a = '4'\n # this is a possible conflict with potassium\n elif (element) and s == 'p':\n el = 'P'\n elif s == 'p':\n el = 'H'\n a = '1'\n elif el in ('p', 'pn') and a == '1':\n el = 'H'\n elif s == 'pn':\n el = 'H'\n a = ''\n elif el in ('d', 'D'):\n el = 'H'\n if not a in ('', '2'):\n raise AttributeError('\"d\" already implies mass; if supplied needs to be \"2\".')\n a = '2'\n elif el in ('t','T'):\n el = 'H'\n if not a in ('', '3'):\n raise AttributeError('\"t\" already implies mass; if supplied needs to be \"3\"')\n a = '3'\n elif (element) and s == 'n':\n el = 'N'\n elif s == 'n':\n el = 'nt'\n a = '1'\n elif el in ('n', 'nt') and a == '1':\n el = 'nt'\n elif s in ('g', 'G'):\n el = ''\n a = ''\n e = '1'\n elif (s.lower() in ('e-', 'b-', 'bd', 'pc')):\n s = el = 'e-'\n elif ((s.lower() in ('e+', 'b+', 'ec'))\n or ((not element) and (s.lower() == 'pd'))):\n s = el = 'e+'\n elif ((not element) and (s.lower() == 'ps')):\n s = 'h1'\n a = '1'\n el = 'h'\n elif ((not element) and (s.lower() == 'ns')):\n s = 'nt1'\n a = '1'\n el = 'nt'\n el = el.strip()\n# if len(el) == 2 and el(2)\n a = a.strip()\n e = e.strip()\n return s, el, a, e",
"def PPString(inp, mol, i, n, outFile):\n alchemy = re.compile('^\\w*2\\w*_\\d\\d\\d$')\n ppstr = re.sub('\\*', '', mol.string[i])\n if ppstr:\n PPStr = ppstr\n pp_root, pp_ext = os.path.split(ppstr)\n else:\n if inp.setting['pp_type'] == 'geodecker':\n element = mol.type_list[i].title()\n if 'd_shell' in inp.setting:\n if type(inp.setting['d_shell']) is not list:\n inp.setting['d_shell'] = [inp.setting['d_shell']]\n if qtk.n2ve(mol.type_list[i].title()) > 10:\n shell = '-d'\n elif 'd_shell' in inp.setting \\\n and element in inp.setting['d_shell']:\n shell = '-d'\n else:\n element = qtk.element[mol.type_list[i].title()]\n if element.group < 3 and mol.Z[i] > 1:\n if mol.Z[i] != 3:\n shell = '-sp'\n else:\n shell = '-s'\n else:\n shell = ''\n pp_xc_dict = {\n 'lda': 'pz',\n 'pbe0': 'pbe',\n 'b3lyp': 'blyp',\n }\n pp_xc = inp.setting['pp_theory'].lower()\n if pp_xc in pp_xc_dict:\n pp_xc = pp_xc_dict[pp_xc]\n PPStr = ''.join([c for c in mol.type_list[i] if not c.isdigit()])\\\n + '.' + pp_xc + shell + '-hgh.UPF'\n elif inp.setting['pp_type'] == 'cpmd':\n PPStr = PPName(inp, mol, i, n)\n xc = inp.setting['pp_theory'].lower()\n if not mol.string[i]:\n if inp.setting['pp_type'] == 'geodecker':\n PPCheck(pp_xc, mol.type_list[i].title(), PPStr)\n elif inp.setting['pp_type'] == 'cpmd':\n saved_pp = PPCheck_cpmd(pp_xc, mol.type_list[i].title(), PPStr)\n new_pp1 = saved_pp + '.UPF'\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, saved_pp),\n shell=True)\n conv_pp.wait()\n new_pp1_file = os.path.split(new_pp1)[1]\n new_pp1_trg = os.path.join(qtk.setting.espresso_pp, new_pp1_file)\n if not os.path.exists(new_pp1_trg):\n shutil.copy(new_pp1, qtk.setting.espresso_pp)\n PPStr = PPStr + '.UPF'\n\n elif alchemy.match(mol.string[i]):\n cpmd_pp = alchemyPP(xc, PPStr)\n new_pp1 = cpmd_pp + '.UPF'\n if not os.path.exists(new_pp1):\n qtk.report('espresso', \"rewrite Goedecker's PP to UPF\")\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, cpmd_pp),\n shell=True)\n conv_pp.wait()\n if conv_pp.returncode != 0:\n # dirty fix for espresso alchemy conversion routine\n qtk.warning('conversion failed..., trying path end points')\n root, _ = os.path.splitext(PPStr)\n element_str = re.sub('_.*', '', root)\n element1 = re.sub('2.*', '', element_str)\n element2 = re.sub('.*2', '', element_str)\n fraction = float(re.sub('.*_', '', root))/100\n if fraction == 0.0:\n strpp = element1 + \"_q\" + str(qtk.n2ve(element1)) +\\\n \"_\" + xc + '.psp'\n elif fraction == 1.0:\n strpp = element2 + \"_q\" + str(qtk.n2ve(element2)) +\\\n \"_\" + xc + '.psp'\n else:\n qtk.exit(\"PP conversion failed for intermediate lambda\")\n strpp = os.path.join(qtk.setting.cpmd_pp, strpp)\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, strpp),\n shell=True)\n conv_pp.wait()\n os.rename(strpp + '.UPF', new_pp1)\n new_pp1_file = os.path.split(new_pp1)[1]\n new_pp1_trg = os.path.join(qtk.setting.espresso_pp, new_pp1_file)\n if not os.path.exists(new_pp1_trg):\n shutil.copy(new_pp1, qtk.setting.espresso_pp)\n PPStr = PPStr + '.UPF'\n\n return PPStr",
"def parse_contract(data: bytes):\n res = parse_address(data[:22])\n if len(data) > 22:\n res += f'%{data[22:].decode()}'\n return res",
"def test_s2():\n vc = vtec.parse(EX1)\n assert vc[0].s2() == \"TO.W\"",
"def from_spec(cls, string_spec):\n old_ns, new_ns = string_spec.split('=')\n return cls(old_ns, new_ns)",
"def decode(ew):\n #XXX: We could perhaps do some heuristic recovery here.\n _, charset, cte, cte_string, _ = ew.split('?')\n cte = cte.lower()\n # Recover the original bytes and do CTE decoding.\n bstring = cte_string.encode('ascii', 'surrogateescape')\n bstring, defects = cte_decoders[cte](bstring)\n # Turn the CTE decoded bytes into unicode.\n try:\n string = bstring.decode(charset)\n except UnicodeError:\n defects.append(errors.UndecodableBytesDefect())\n string = bstring.decode(charset, 'replace')\n # XXX: more code to handle malformed ews?\n return string, defects",
"def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self",
"def cpe22(self, param):\n data = self._http_get(\"cpe2.2\", query=param)\n return data",
"def parse_string(cstr):\n ret = ''\n if _RUNNING_PYTHON3 and ULog._disable_str_exceptions:\n ret = _parse_string(cstr, 'ignore')\n else:\n ret = _parse_string(cstr)\n return ret",
"def from_filename(cls, filename_str, convert=False, compact=False):\n\n return super().from_filename(filename_str, BMonFilename.fields_def, pad=BMonFilename.pad,\n delimiter=BMonFilename.delimiter, convert=convert, compact=compact)",
"def parse(cls, to_parse):\n\n payload, checksum, x, y = _parse_bcur_helper(bcur_string=to_parse)\n\n if x != 1 or y != 1:\n raise BCURStringFormatError(\n f\"BCURSingle must have x=1 and y=1, instead got x={x} and y={y}\"\n )\n\n # will throw an error if checksum is incorrect\n enc = bcur_decode(data=payload, checksum=checksum)\n return cls(\n text_b64=b2a_base64(enc).strip().decode(),\n encoded=payload,\n checksum=checksum,\n )",
"def cpe23(self, param):\n data = self._http_get(\"cpe2.3\", query=param)\n return data",
"def from_str(cls, value: str) -> \"ELBScheme\":\n value = value.replace(\"-\", \"_\")\n return cls[value]",
"def test_parses_map_3(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM,X*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"X\", p.faa_mode)\n self.assertEquals(\"11\", p.checksum)",
"def get_struc_from__parser(p):\n from pymatgen.util.coord import find_in_coord_list_pbc\n from collections import OrderedDict\n from pymatgen.core.periodic_table import get_el_sp\n from pymatgen.io.cif import str2float\n import numpy as np\n\n def get_matching_coord(coord, ops, atol=1e-4):\n keys = list(coord_to_species.keys())\n coords = np.array(keys)\n for op in ops:\n c = op.operate(coord)\n inds = find_in_coord_list_pbc(coords, c, atol=atol)\n if len(inds):\n return keys[inds[0]]\n return False\n\n\n for i, d in enumerate(p._cif.data.values()):\n ops = p.get_symops(d)\n coord_to_species = OrderedDict()\n d0 = {\"_atom_site_label\": [], \n \"_atom_site_fract_x\": [],\n \"_atom_site_fract_y\": [],\n \"_atom_site_fract_z\": [],\n }\n for i in range(len(d[\"_atom_site_label\"])):\n try:\n symbol = p._parse_symbol(d[\"_atom_site_type_symbol\"][i])\n except KeyError:\n symbol = p._parse_symbol(d[\"_atom_site_label\"][i])\n \n el = get_el_sp(symbol)\n x = str2float(d[\"_atom_site_fract_x\"][i])\n y = str2float(d[\"_atom_site_fract_y\"][i])\n z = str2float(d[\"_atom_site_fract_z\"][i])\n \n coord = (x, y, z)\n match = get_matching_coord(coord, ops)\n if not match:\n d0['_atom_site_label'].append(el)\n d0[\"_atom_site_fract_x\"].append(str(x))\n d0[\"_atom_site_fract_y\"].append(str(y))\n d0[\"_atom_site_fract_z\"].append(str(z))\n coord_to_species[coord] = el\n d.data['_atom_site_label'] = d0['_atom_site_label']\n d.data['_atom_site_fract_x'] = d0['_atom_site_fract_x']\n d.data['_atom_site_fract_y'] = d0['_atom_site_fract_y']\n d.data['_atom_site_fract_z'] = d0['_atom_site_fract_z']\n \n s = p._get_structure(d, primitive=False, symmetrized=False)\n return s",
"def test_psi4_efp_5c():\n subject = subject5 + '\\nno_com\\nfix_orientation\\nsymmetry c1'\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)",
"def from_str ( cls, s, strict=False ):\n rwx_user = RWX.from_str ( s[0:3], strict=strict )\n rwx_group = RWX.from_str ( s[3:6], strict=strict )\n rwx_others = RWX.from_str ( s[6:9], strict=strict )\n return cls ( rwx_user, rwx_group, rwx_others )",
"def create_plaquette(self, baseStr, fidpairs=None):\n if fidpairs is None:\n fidpairs = list(_itertools.product(range(len(self.prepStrs)),\n range(len(self.effectStrs))))\n\n elements = [(j, i, self.prepStrs[i] + baseStr + self.effectStrs[j])\n for i, j in fidpairs] # note preps are *cols* not rows\n real_fidpairs = [(self.prepStrs[i], self.effectStrs[j]) for i, j in fidpairs] # strings, not just indices\n\n return CircuitPlaquette(baseStr, len(self.effectStrs),\n len(self.prepStrs), elements,\n self.aliases, real_fidpairs)",
"def deserialize(self, str):\n try:\n if self.icon is None:\n self.icon = rocon_std_msgs.msg.Icon()\n if self.remappings is None:\n self.remappings = None\n if self.pairing is None:\n self.pairing = rocon_interaction_msgs.msg.Pairing()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.compatibility = str[start:end].decode('utf-8')\n else:\n self.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.display_name = str[start:end].decode('utf-8')\n else:\n self.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.description = str[start:end].decode('utf-8')\n else:\n self.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.resource_name = str[start:end].decode('utf-8')\n else:\n self.icon.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.format = str[start:end].decode('utf-8')\n else:\n self.icon.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.icon.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parameters = str[start:end].decode('utf-8')\n else:\n self.parameters = str[start:end]\n start = end\n end += 4\n (self.max,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pairing.rapp = str[start:end].decode('utf-8')\n else:\n self.pairing.rapp = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.pairing.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.parameters = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.key = str[start:end].decode('utf-8')\n else:\n val1.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.pairing.parameters.append(val1)\n start = end\n end += 4\n (self.hash,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.role = str[start:end].decode('utf-8')\n else:\n self.role = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def get_string2(self):\n pass",
"def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)",
"def create_com_from_pdb_str(pdb_str, gau_tpl_content, com_fname):\n coord_list = []\n pdb_str_list = pdb_str.split(\"\\n\")\n for line in pdb_str_list:\n if line.startswith('ATOM') or line.startswith('HETATM'):\n element = line[PDB_BEFORE_ELE_LAST_CHAR:PDB_ELE_LAST_CHAR].strip()\n pdb_xyz = line[PDB_MOL_NUM_LAST_CHAR:PDB_Z_LAST_CHAR]\n coord_list.append([\"{:6}\".format(element), pdb_xyz])\n elif line.startswith('CONECT') or line.startswith('END'):\n break\n list_to_file(gau_tpl_content[SEC_HEAD] + coord_list + gau_tpl_content[SEC_TAIL], com_fname, print_message=False)",
"def from_bytes(cls, bytes):\n construct = _constructs.CertificateURL.parse(bytes)\n return cls(\n type=construct.type,\n url_and_hash_list=[\n URLAndHash(\n url=url_and_hash.url,\n padding=url_and_hash.padding,\n sha1_hash=url_and_hash.sha1_hash,\n )\n for url_and_hash in construct.url_and_hash_list],\n )",
"def loadTFC(contactString):\n protocol = tfcProtocol(contactString)\n catalog = tfcFilename(contactString)\n instance = readTFC(catalog)\n instance.preferredProtocol = protocol\n return instance",
"def make_filter_specification(cls, filter_string):\n try:\n return parse_filter(filter_string)\n except ParseException as err:\n raise ValueError('Expression parameters have errors. %s' % err)",
"def parse_noclue1(self, data):\n\n model = [\n data[9], data[10],\n data[11], data[12], data[13],\n data[14], data[15], data[16],\n ]\n model_name = \"\".join(map(chr, model))\n self.model_name = model_name.strip()\n\n self.cfg_sig = f\"{data[18]:x}{data[19]:x}{data[20]:x}{data[21]:x}\"\n self.sw_vers = f\"{str(data[7])}.{str(data[8])}\"\n self.setup = data[17]\n self.ssid = f\"M{str(data[5])}_{str(data[6])} V{self.sw_vers}\"",
"def __new__(cls, format):\n self = super(SF_Pattern, cls).__new__(cls)\n\n if isinstance(format, bytes):\n uni_str = format.decode('ISO-8859-1') # decode to unicode\n trans_str = translate(uni_str) # translate only works with unicode\n re_fmt = trans_str.encode('ISO-8859-1') # encode back to bytes\n self._spec = _gbspec\n else:\n re_fmt = translate(format)\n self._spec = _gspec\n\n self._format = format\n self._re = cre = re.compile(re_fmt)\n\n if cre.groupindex and len(cre.groupindex) != cre.groups:\n raise RuntimeError('cannot mix mapped and unmapped specifiers')\n elif not cre.groupindex:\n self._retfunc = self._return_tuple\n self._type = tuple\n else:\n self._retfunc = self._return_dict\n self._type = dict\n\n self._casts = self._get_types()\n\n return self"
]
| [
"0.5067945",
"0.47294074",
"0.46910343",
"0.4539366",
"0.45285475",
"0.45284045",
"0.4522765",
"0.44751477",
"0.44448897",
"0.44306594",
"0.4416067",
"0.44121942",
"0.4408843",
"0.43579754",
"0.43081334",
"0.43073845",
"0.4301337",
"0.43006054",
"0.42711237",
"0.42651075",
"0.4261146",
"0.424162",
"0.4232954",
"0.42294905",
"0.42230695",
"0.4217875",
"0.4215208",
"0.42131823",
"0.42083794",
"0.42076433"
]
| 0.63144124 | 0 |
Helper method for escaping the Ensures that resulting version is CPE 2.3 formatted string compliant, this is necessary for as_cpe22_uri() to do its thing affected version data in nvd json data which is usually unescaped. Converts the supplied version | def update_version(self, version):
self.version = CPE.escape_for_cpe23_fs(version) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __format_golang_version(self, version):\n if '.' in version and version[0].isdigit():\n version = 'v' + version\n return version",
"def format_version(version):\n\n return \"v%03d\" % version",
"def _canonicalize_version(_version: str) -> str:\n\n try:\n version = PythonVersion(_version)\n except InvalidVersion:\n return _version\n\n parts = []\n\n # Epoch\n if version.epoch != 0:\n parts.append(\"{0}!\".format(version.epoch))\n\n # Release segment\n # NB: This strips trailing '.0's to normalize\n parts.append(re.sub(r\"(\\.0)+$\", \"\", \".\".join(\n str(x) for x in version.release\n )))\n\n # Pre-release\n if version.pre is not None:\n parts.append(\"\".join(str(x) for x in version.pre))\n\n # Post-release\n if version.post is not None:\n parts.append(\".post{0}\".format(version.post))\n\n # Development release\n if version.dev is not None:\n parts.append(\".dev{0}\".format(version.dev))\n\n # Local version segment\n if version.local is not None:\n parts.append(\"+{0}\".format(version.local))\n\n return \"\".join(parts)",
"def get_vspk_version(cls, version):\n return (\"v%s\" % version).replace(\".\", \"_\")",
"def to_pep440(version: str) -> str:\n return str(parse_version(version))",
"def _version_to_shorthand(version):\n parts = version.split('.')\n if len(parts) != 2 and len(parts) != 3:\n tmpl = 'Version string must be like X.Y or X.Y.Z, not `{}`'\n raise ValueError(tmpl.format(version))\n return parts[0] + parts[1]",
"def _parse_version_string(cmd_result: str) -> str:\n lines = cmd_result.splitlines()\n split_lines = [line.split(\" \") for line in lines]\n version_line = [\n line for line in split_lines if len(line) > 0 and line[1] == \"version\"\n ][0]\n version_string = version_line[2].replace('\"', \"\")\n return version_string",
"def convert_version_from_depsolver(semantic_version):\n return str(semantic_version)",
"def get_version_string():\n vl = TopicTreeExtractCVS.get_version_number()\n\n return '''TopicTreeExtractCVS {0}.{1}.{2}\nNew BSD License.\nCopyright (C) 2017 Hitoshi Yamauchi\n'''.format(vl[0], vl[1], vl[2])",
"def normalize_version_number(version):\n return _discover.normalize_version_number(version)",
"def safe_version(version):\n\n return version.replace(\".\", \"_\")",
"def _parse_version(version):\n return version.split(\".\")[0]",
"def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version",
"def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub",
"def unsafe_version(version):\n\n return version.replace(\"_\", \".\")",
"def coerce_version(value):\n if not isinstance(value, Version):\n value = Version.from_string(value)\n return value",
"def repr2to3 (v):\n if isinstance(v, six.string_types):\n qu = QuotedEscaped(v)\n if 'u' == qu[0]:\n return qu[1:]\n return qu\n if isinstance(v, six.integer_types):\n vs = repr(v)\n if vs.endswith('L'):\n return vs[:-1]\n return vs\n return repr(v)",
"def _fix_version(name, version):\n version = sanitise_utf8(version)\n if version.lower().startswith(name.lower()):\n version = version[len(name):].lstrip()\n # Some engines unfortunately include usage instructions in the version\n # string (apparently for the sake of kgsGTP); try to clean this up.\n if len(version) > 64:\n # MoGo\n a, b, c = version.partition(\". Please read http:\")\n if b:\n return a\n # Pachi\n a, b, c = version.partition(\": I'm playing\")\n if b:\n return a\n # Other\n return version.split()[0]\n return version",
"def _convert_version(tup):\n ret_val = str(tup[0]) # first is always digit\n next_sep = \".\" # separator for next extension, can be \"\" or \".\"\n for x in tup[1:]:\n if isinstance(x, int):\n ret_val += next_sep + str(x)\n next_sep = '.'\n continue\n first_letter = x[0].lower()\n next_sep = ''\n if first_letter in 'abcr':\n ret_val += 'rc' if first_letter == 'r' else first_letter\n elif first_letter in 'pd':\n ret_val += '.post' if first_letter == 'p' else '.dev'\n return ret_val",
"def test_parse_version():\n version = VersionUtils.parse_version('9.5.3')\n assert version == VersionInfo(9, 5, 3)\n\n # Test #.# style versions\n v10_2 = VersionUtils.parse_version('10.2')\n assert v10_2 == VersionInfo(10, 2, 0)\n\n v11 = VersionUtils.parse_version('11')\n assert v11 == VersionInfo(11, 0, 0)\n\n # Test #beta# style versions\n beta11 = VersionUtils.parse_version('11beta3')\n assert beta11 == VersionInfo(11, 0, 0, prerelease='beta.3')\n\n assert v10_2 < beta11\n assert v11 > beta11\n\n # Test #rc# style versions\n version = VersionUtils.parse_version('11rc1')\n assert version == VersionInfo(11, 0, 0, prerelease='rc.1')\n\n # Test #nightly# style versions\n version = VersionUtils.parse_version('11nightly3')\n assert version == VersionInfo(11, 0, 0, 'nightly.3')\n\n v12_3_tde = VersionUtils.parse_version('12.3_TDE_1.0')\n assert v12_3_tde == VersionInfo(12, 3, 0)",
"def sanitize_version(version):\n if version[0].isalpha():\n version = version[1:]\n\n return version.split(\"-\", 1)[0]",
"def set_solc_version(version: Union[str, Version]) -> str:\n if not isinstance(version, Version):\n version = Version(version.lstrip(\"v\"))\n if version < Version(\"0.4.22\"):\n raise IncompatibleSolcVersion(\"Brownie only supports Solidity versions >=0.4.22\")\n try:\n solcx.set_solc_version(version, silent=True)\n except solcx.exceptions.SolcNotInstalled:\n if version not in _get_solc_version_list()[0]:\n raise IncompatibleSolcVersion(\n f\"Cannot install Solidity v{version} on this OS. You may be able to \"\n f\"manually compile from source with `solcx.compile_solc('{version}')`\"\n )\n install_solc(version)\n solcx.set_solc_version(version, silent=True)\n return str(solcx.get_solc_version())",
"def format_version(epoch, version, release):\n full_version = \"{}:{}\".format(epoch, version) if epoch else version\n if release:\n full_version += \"-{}\".format(release)\n return full_version",
"def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))",
"def test_new_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('1202.1234v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v12'))",
"def GetVersionStr(version):\n if isinstance(version, str):\n version = int(version, 0)\n major = ((version>>24) & 0xff)\n minor = ((version>>16) & 0xff)\n release = ((version>> 8) & 0xff)\n subrelease = (version & 0xff)\n if major >= 6:\n if subrelease > 0:\n subreleasestr = str(subrelease)\n else:\n subreleasestr = ''\n else:\n if subrelease > 0:\n subreleasestr = str(chr(subrelease+ord('a')-1))\n else:\n subreleasestr = ''\n return \"{:d}.{:d}.{:d}{}{}\".format( major, minor, release, '.' if (major >= 6 and subreleasestr != '') else '', subreleasestr)",
"def convert(self, value, param, ctx):\n converted_ver = Version(value)\n if converted_ver < Version('2') or converted_ver >= Version('4'):\n self.fail(\n \"Pulp Smash can test Pulp version 2.y and 3.y. It can't test \"\n 'Pulp version {}.'.format(converted_ver),\n param,\n ctx\n )\n return converted_ver",
"def versionstr():\n return \"%d.%d.%d%s\" % (version[0], version[1], version[2],\n '-' + gitstr() if gitstr() else '')",
"def versioned(filename, version, force_version=False, full_path=True):\n if not '.' in filename:\n return None\n\n if USE_VERSIONING or force_version:\n dotindex = filename.rindex('.')\n filename = u'%s.%s%s' % (filename[:dotindex], version, filename[dotindex:])\n\n if full_path:\n return static(filename)\n\n return filename",
"def calculate_new_release_version(version):\n split_version_string = version.split(\".\")\n part_zero = split_version_string[0]\n part_one = int(split_version_string[1]) + 1\n part_two = split_version_string[2]\n new_version = f\"{part_zero}.{part_one}.{part_two}\"\n return new_version"
]
| [
"0.63605845",
"0.6071471",
"0.59519637",
"0.58833855",
"0.58813864",
"0.5815081",
"0.57764536",
"0.5725392",
"0.567802",
"0.56723124",
"0.56363887",
"0.5627636",
"0.5595905",
"0.5481817",
"0.5464057",
"0.5450415",
"0.54397607",
"0.5431349",
"0.5430239",
"0.54297805",
"0.54265136",
"0.5399847",
"0.5304876",
"0.5294812",
"0.5289327",
"0.5287388",
"0.5279081",
"0.5263653",
"0.522289",
"0.5206139"
]
| 0.60912997 | 1 |
This is a very limited implementation of cpe matching. other_cpe is a wildcard ridden base cpe used by range descriptors other_cpe checked against this cpe for an exact match of part and vendor. For all the remaining components a match is positive if the other cpe is an exact match or contains the wild char | def is_match(self, other_cpe):
if not isinstance(other_cpe, CPE):
return False
if self.part == other_cpe.part and self.vendor == other_cpe.vendor:
if other_cpe.product not in ['*', self.product]:
return False
if other_cpe.version not in ['*', self.version]:
return False
if other_cpe.update not in ['*', self.update]:
return False
if other_cpe.edition not in ['*', self.edition]:
return False
if other_cpe.language not in ['*', self.language]:
return False
if other_cpe.sw_edition not in ['*', self.sw_edition]:
return False
if other_cpe.target_sw not in ['*', self.target_sw]:
return False
if other_cpe.target_hw not in ['*', self.target_hw]:
return False
if other_cpe.other not in ['*', self.other]:
return False
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):\n vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)\n if vendor_cmp != 0:\n return vendor_cmp\n\n name_cmp = compare_fields(lhs.name, rhs.name)\n if name_cmp != 0:\n return name_cmp\n\n version_cmp = compare_fields(lhs.version, rhs.version)\n if version_cmp != 0:\n return version_cmp\n\n update_cmp = compare_fields(lhs.update, rhs.update)\n if update_cmp != 0:\n return update_cmp\n\n meta_cmp = compare_fields(lhs.meta, rhs.meta)\n if meta_cmp != 0:\n return meta_cmp\n\n # all avenues of comparison have been depleted, the two cpes are same for all practical purposes\n return 0",
"def test_coden(self):\n inv_search = \"journal:aphys\"\n spi_search = \"find coden aphys\"\n self._compare_searches(inv_search, spi_search)",
"def parse_cpe(cpe_uri):\n parts = CPE_REGEX.match(cpe_uri)\n return parts.group(\"vendor\"), parts.group(\"package\"), parts.group(\"version\")",
"def test_partial_charge_resolution(self, inputs):\n toolkit_wrapper_class = inputs[\"toolkit\"]\n if not (toolkit_wrapper_class.is_available()):\n pytest.skip(f\"{toolkit_wrapper_class} is not available.\")\n toolkit_wrapper = toolkit_wrapper_class()\n partial_charge_method = inputs[\"partial_charge_method\"]\n expected_exception = inputs[\"exception\"]\n expected_exception_match = inputs[\"exception_match\"]\n ethanol = create_ethanol()\n ethanol.generate_conformers()\n if expected_exception is None:\n ethanol.assign_partial_charges(\n partial_charge_method=partial_charge_method,\n toolkit_registry=toolkit_wrapper,\n )\n abs_charge_sum = 0.0 * unit.elementary_charge\n\n # Ensure that nonzero charges were assigned\n for pc in ethanol.partial_charges:\n abs_charge_sum += abs(pc)\n assert abs_charge_sum > 0.5 * unit.elementary_charge\n\n else:\n with pytest.raises(expected_exception, match=expected_exception_match):\n ethanol.assign_partial_charges(\n partial_charge_method=partial_charge_method,\n toolkit_registry=toolkit_wrapper,\n )",
"def engel_filter(self,\n stock1: str,\n stock2: str,\n critical_pvalue: float = 0.01) -> bool:\n\n if coint(self.__price_data[stock1], self.__price_data[stock2])[1] < critical_pvalue:\n return True\n elif coint(self.__price_data[stock2], self.__price_data[stock1])[1] < critical_pvalue:\n return True\n else:\n return False",
"def match(self, other):",
"def partial_match(self, other_product: Product):\n if self.product_id and other_product.product_id and self.product_id == other_product.product_id:\n return True\n if self.quote_currency and other_product.quote_currency and self.quote_currency == other_product.quote_currency:\n return True\n if self.base_currency and other_product.base_currency and self.base_currency == other_product.base_currency:\n return True\n return False",
"def __eq__(self, other):\n if isinstance(other, CPF):\n return self.cpf == other.cpf\n return False",
"def match(self, dc):\n raise NotImplemented",
"def test_charge_increment_model_partially_overlapping_matches_both_apply(self):\n file_path = get_data_file_path(\"test_forcefields/test_forcefield.offxml\")\n ff = ForceField(file_path, xml_charge_increment_model_ff_both_apply)\n del ff._parameter_handlers[\"ToolkitAM1BCC\"]\n\n ethanol = create_ethanol()\n top = ethanol.to_topology()\n sys = ff.create_openmm_system(top)\n nonbonded_force = [\n force\n for force in sys.getForces()\n if isinstance(force, openmm.NonbondedForce)\n ][0]\n expected_charges = [\n 0.35,\n -0.05,\n 0,\n -0.1,\n -0.1,\n -0.1,\n 0.0,\n 0.0,\n 0.0,\n ] * openmm_unit.elementary_charge\n for idx, expected_charge in enumerate(expected_charges):\n charge, _, _ = nonbonded_force.getParticleParameters(idx)\n assert (\n abs(charge - expected_charge) < 1.0e-6 * openmm_unit.elementary_charge\n )",
"def pick_winner(self):\n cpe_dicts = self._get_cpe_dicts(self._cve.configurations)\n cpe_versions = self._get_cpe_versions(cpe_dicts)\n\n if cpe_versions:\n hit = False\n for candidate in self._candidates:\n package = candidate['package']\n\n # check if at least one version mentioned in the CVE exists\n # for given package name; if not, this is a false positive\n upstream_versions = self._get_upstream_versions(package)\n\n if cpe_versions & set(upstream_versions):\n # exact match, great!\n hit = True\n else:\n # upstream versions sometime contain suffixes like '.Final', '.RELEASE', etc.,\n # but those are ignored by NVD. try to detect such cases here.\n # TODO: refactoring needed\n for cpe_version in cpe_versions:\n for upstream_version in upstream_versions:\n if upstream_version.startswith(cpe_version):\n if len(upstream_version) > len(cpe_version):\n version_suffix = upstream_version[len(cpe_version):]\n version_suffix = version_suffix.lstrip('.-_')\n if version_suffix and not version_suffix[0].isdigit():\n hit = True\n break\n\n if hit:\n return candidate",
"def cpe22(self, param):\n data = self._http_get(\"cpe2.2\", query=param)\n return data",
"def test_charge_increment_model_completely_overlapping_matches_override(self):\n file_path = get_data_file_path(\"test_forcefields/test_forcefield.offxml\")\n ff = ForceField(file_path, xml_charge_increment_model_ff_override)\n del ff._parameter_handlers[\"ToolkitAM1BCC\"]\n\n ethanol = create_ethanol()\n top = ethanol.to_topology()\n sys = ff.create_openmm_system(top)\n nonbonded_force = [\n force\n for force in sys.getForces()\n if isinstance(force, openmm.NonbondedForce)\n ][0]\n expected_charges = [\n 0.3,\n 0,\n 0,\n -0.1,\n -0.1,\n -0.1,\n 0.0,\n 0.0,\n 0.0,\n ] * openmm_unit.elementary_charge\n for idx, expected_charge in enumerate(expected_charges):\n charge, _, _ = nonbonded_force.getParticleParameters(idx)\n assert (\n abs(charge - expected_charge) < 1.0e-6 * openmm_unit.elementary_charge\n )",
"def testMatch(self):\n\n self.inv._literals_filter['fruit'] = ['pear', 'apple']\n self.inv._literals_filter['xfruit'] = None\n self.inv._compiled_filter['shape'] = None\n self.inv._compiled_filter['xshape'] = None\n self.assertTrue(self.inv._Match('fruit', 'apple'))\n\n self.inv._literals_filter['fruit'] = None\n self.inv._compiled_filter['fruit'] = [re.compile('^apple$')]\n self.assertTrue(self.inv._Match('fruit', 'apple'))",
"def from_cpe23_fs(cpe23_fs):\n\n cpe_parts = cpe23_fs.split(':')\n\n if cpe_parts and len(cpe_parts) == 13:\n return CPE(\n part=cpe_parts[2],\n vendor=cpe_parts[3],\n product=cpe_parts[4],\n version=cpe_parts[5],\n update=cpe_parts[6],\n edition=cpe_parts[7],\n language=cpe_parts[8],\n sw_edition=cpe_parts[9],\n target_sw=cpe_parts[10],\n target_hw=cpe_parts[11],\n other=cpe_parts[12]\n )\n elif len(cpe_parts) > 13:\n # logger.debug('{} did not split nicely into 13 parts'.format(cpe23_fs))\n\n adjusted_cpe_parts = []\n counter = 1\n\n # start from the third element in the list and iterate through the penultimate element\n while counter < len(cpe_parts) - 1:\n counter += 1\n part = cpe_parts[counter]\n\n # if the element ends with a '\\', good chance its an escape for ':', concatenate the elements together\n if part.endswith('\\\\'):\n new_part = part\n\n while counter < len(cpe_parts) - 1:\n counter += 1\n part = cpe_parts[counter]\n new_part += ':' + part\n\n if part.endswith('\\\\'):\n continue\n else:\n break\n\n adjusted_cpe_parts.append(new_part)\n else:\n adjusted_cpe_parts.append(part)\n\n if len(adjusted_cpe_parts) == 11:\n # logger.debug('Adjusted cpe components: {}'.format(adjusted_cpe_parts))\n return CPE(\n part=adjusted_cpe_parts[0],\n vendor=adjusted_cpe_parts[1],\n product=adjusted_cpe_parts[2],\n version=adjusted_cpe_parts[3],\n update=adjusted_cpe_parts[4],\n edition=adjusted_cpe_parts[5],\n language=adjusted_cpe_parts[6],\n sw_edition=adjusted_cpe_parts[7],\n target_sw=adjusted_cpe_parts[8],\n target_hw=adjusted_cpe_parts[9],\n other=adjusted_cpe_parts[10]\n )\n else:\n raise Exception('Cannot convert cpe 2.3 formatted string {} into wfn'.format(cpe23_fs))\n else:\n raise Exception(\n 'Invalid cpe 2.3 formatted string {} Splitting with : delimiter resulted in less than 13 elements'.format(\n cpe23_fs))",
"def crossmatch_other_catalog(candid, ra, dec, catalog_name, radius_arcsec=None):\n pdf = pd.DataFrame(\n {\n 'ra': ra.values,\n 'dec': dec.values,\n 'candid': range(len(ra))\n }\n )\n\n curdir = os.path.dirname(os.path.abspath(__file__))\n if catalog_name.values[0] == 'gcvs':\n catalog = curdir + '/data/catalogs/gcvs.parquet'\n ra2, dec2, type2 = extract_gcvs(catalog)\n elif catalog_name.values[0] == 'vsx':\n catalog = curdir + '/data/catalogs/vsx.parquet'\n ra2, dec2, type2 = extract_vsx(catalog)\n elif catalog_name.values[0] == '3hsp':\n catalog = curdir + '/data/catalogs/3hsp.csv'\n ra2, dec2, type2 = extract_3hsp(catalog)\n elif catalog_name.values[0] == '4lac':\n catalog_h = curdir + '/data/catalogs/table-4LAC-DR3-h.fits'\n catalog_l = curdir + '/data/catalogs/table-4LAC-DR3-l.fits'\n ra2, dec2, type2 = extract_4lac(catalog_h, catalog_l)\n\n # create catalogs\n catalog_ztf = SkyCoord(\n ra=np.array(ra.values, dtype=float) * u.degree,\n dec=np.array(dec.values, dtype=float) * u.degree\n )\n\n catalog_other = SkyCoord(\n ra=np.array(ra2.values, dtype=float) * u.degree,\n dec=np.array(dec2.values, dtype=float) * u.degree\n )\n\n pdf_merge, mask, idx2 = cross_match_astropy(\n pdf, catalog_ztf, catalog_other, radius_arcsec=radius_arcsec\n )\n\n pdf_merge['Type'] = 'Unknown'\n pdf_merge.loc[mask, 'Type'] = [\n str(i).strip() for i in type2.astype(str).values[idx2]\n ]\n\n return pdf_merge['Type']",
"def test_combine_multiple(self):\n inv_search = 'author:\"gattringer, c*\" keyword:chiral keyword:symmetry -title:chiral'\n spi_search = \"find a c gattringer and k chiral symmetry and not title chiral\"\n self._compare_searches(inv_search, spi_search)",
"def isComrade(self, other): # are the pieces comrades ?\r\n \r\n if self.name == other.name: \r\n return True\r\n else:\r\n return False",
"def match_units(self, other):\n for key in self.photons.colnames:\n if key in other.photons.colnames:\n if other[key].unit:\n unit = other[key].unit\n if str(unit).lower() == 'none' and str(self[key].unit).lower() == 'none':\n continue\n self[key].convert_unit_to(unit)",
"def cpe23(self, param):\n data = self._http_get(\"cpe2.3\", query=param)\n return data",
"def match(self, *args):\n return _ida_hexrays.udc_filter_t_match(self, *args)",
"def _get_extended_candidate(self, old_cand, new_char, new_char_index):\n new_text_state, new_word = old_cand.text_state.extended(new_char, new_char_index, sep=self.sep)\n if self.allowed_prefixes is not None and (new_word or new_text_state.last_word) not in self.allowed_prefixes:\n return None, None\n new_cand = self.text_to_candidate.get(new_text_state.text, None)\n if new_cand is None:\n new_cand = CtcBeamSearchCandidate(old_cand)\n self.text_to_candidate[new_text_state.text] = new_cand\n new_cand.text_state = new_text_state\n new_cand.new_logp_blank = -np.inf\n new_cand.new_logp_non_blank = -np.inf\n return new_cand, new_word",
"def match(self, other: 'layout.Encoding') -> bool:\n return (\n '*' not in other.kind\n and fnmatch.fnmatch(other.kind, self.kind)\n and all(other.options.get(k) == v for k, v in self.options.items())\n )",
"def test_prefilter_exact_matches(self):\r\n seqs = [('s1 comment1', 'ACCTTGTTACTTT'), # three copies\r\n ('s2 comment2', 'ACCTTGTTACTTTC'), # one copy\r\n ('s3 comment3', 'ACCTTGTTACTTTCC'), # two copies\r\n ('s4 comment4', 'ACCTTGTTACTTT'),\r\n ('s5 comment5', 'ACCTTGTTACTTTCC'),\r\n ('s6 comment6', 'ACCTTGTTACTTT')]\r\n expected0 = [('QiimeExactMatch.s1', 'ACCTTGTTACTTT'),\r\n ('QiimeExactMatch.s2', 'ACCTTGTTACTTTC'),\r\n ('QiimeExactMatch.s3', 'ACCTTGTTACTTTCC')]\r\n expected1 = {'QiimeExactMatch.s1': ['s1', 's4', 's6'],\r\n 'QiimeExactMatch.s2': ['s2'],\r\n 'QiimeExactMatch.s3': ['s3', 's5']}\r\n expected = (expected0, expected1)\r\n p = OtuPicker({})\r\n actual = p._prefilter_exact_matches(seqs)\r\n self.assertEqual(actual, expected)",
"def component_similar ( same ) :\n if same is Ellipsis : return True\n elif same is NotImplemented : return True\n elif isinstance ( same , str ) \\\n and same.strip().lower() in ( 'ditto' , 'similar' ) : return True\n return False",
"def test_matches_both_exist_and_match(self):\n eq_(None,grepit(\"foobar\",[\"oo\"],[\"fo\"]))",
"def __cmp__(self, other):\n\n if self is None and other is None:\n return 0\n elif self is None:\n return -1\n elif other is None:\n return 1\n elif isinstance(other, str):\n return cmp(self.get_DER(), other)\n else:\n return cmp(self.get_DER(), other.get_DER())",
"def test_get_exact_bc_matches_no_hit(self):\r\n\r\n curr_bc = \"AAAA\"\r\n all_bcs = [\"AAAAA\", \"TTTT\", \"CC\"]\r\n actual_bc = get_exact_bc_matches(curr_bc, all_bcs)\r\n\r\n expected_bc = None\r\n self.assertEqual(actual_bc, expected_bc)",
"def _cp_embeds_into(cp1, cp2):\n # Check that any state in cp2 is matched in cp1\n # If the thing we're matching to is just a monomer pattern, that makes\n # things easier--we just need to find the corresponding monomer pattern\n # in cp1\n if cp1 is None or cp2 is None:\n return False\n cp1 = as_complex_pattern(cp1)\n cp2 = as_complex_pattern(cp2)\n if len(cp2.monomer_patterns) == 1:\n mp2 = cp2.monomer_patterns[0]\n # Iterate over the monomer patterns in cp1 and see if there is one\n # that has the same name\n for mp1 in cp1.monomer_patterns:\n if _mp_embeds_into(mp1, mp2):\n return True\n return False",
"def test_get_exact_bc_matches_hit(self):\r\n\r\n curr_bc = \"AAAA\"\r\n all_bcs = [\"AAAAA\", \"TTTT\", \"CC\", \"AAAA\"]\r\n actual_bc = get_exact_bc_matches(curr_bc, all_bcs)\r\n\r\n expected_bc = \"AAAA\"\r\n self.assertEqual(actual_bc, expected_bc)"
]
| [
"0.5382316",
"0.5212851",
"0.49537984",
"0.48303866",
"0.48274323",
"0.48095724",
"0.4796859",
"0.47573984",
"0.47336668",
"0.47266325",
"0.4703803",
"0.46701133",
"0.4657172",
"0.46518388",
"0.4650754",
"0.46407726",
"0.45443553",
"0.4544047",
"0.45435515",
"0.4538998",
"0.4525268",
"0.45178515",
"0.45175448",
"0.44834673",
"0.44684994",
"0.44561702",
"0.4430676",
"0.4419508",
"0.44173703",
"0.44160655"
]
| 0.70947486 | 0 |
Event handler for use with ijson parsers to output floats instead of Decimals for better json serializability downstream. | def ijson_decimal_to_float(event):
if event[1] == 'number' and isinstance(event[2], decimal.Decimal):
return event[0], event[1], float(event[2])
else:
return event | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def float_format(self):\n ...",
"def _serialize_decimal(val):\n return float(val)",
"def add_support_for_floats_to_dynamodb():\n\n # Ignore loss of precision rather than raising exception\n DYNAMODB_CONTEXT.clear_traps()\n\n # Keep a reference to the original serialization methods\n boto3_serialize_orig = TypeSerializer.serialize\n boto3_deserialize_orig = TypeDeserializer.deserialize\n\n # Wrap serialization methods to support floats\n def boto3_serialize(self, value):\n if isinstance(value, float):\n value = Decimal(value)\n return boto3_serialize_orig(self, value)\n\n def boto3_deserialize(self, value):\n value = boto3_deserialize_orig(self, value)\n if isinstance(value, Decimal):\n value = float(value)\n return value\n\n # Replace the serialization methods with wrapped versions\n TypeSerializer.serialize = boto3_serialize\n TypeDeserializer.deserialize = boto3_deserialize",
"def default(self, o):\n if isinstance(o, Decimal):\n return float(o)\n return super(DecimalJSONEncoder, self).default(o)",
"def _float_handler(self, values, style, caller, *args):\n\n if style == 'values':\n return values\n else:\n behaviors = {\"over\": operator.ge,\n \"under\": operator.lt}\n\n return behaviors[caller](values, self.margin)",
"def __float__(self):\n return float(self.number)",
"def visit_number(self, node, children):\n if self.debug:\n print(\"Converting {}.\".format(node.value))\n return float(node.value)",
"def test_deserialize_float_value(self):\n from petstore_api.model import banana\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=banana.Banana),\n },\n )\n data = {\n 'lengthCm': 3.1415\n }\n response = self.__response(data)\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, banana.Banana))\n self.assertTrue(isinstance(body.lengthCm, Decimal))\n self.assertEqual(body.lengthCm, 3.1415)\n\n \"\"\"\n Float value is serialized without decimal point\n The client receive it as an integer, which work because Banana.lengthCm is type number without format\n Which accepts int AND float\n \"\"\"\n data = {\n 'lengthCm': 3\n }\n response = self.__response(data)\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, banana.Banana))\n self.assertTrue(isinstance(body.lengthCm, Decimal))\n self.assertEqual(body.lengthCm, 3)",
"def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9",
"def process(self, value):\n return float(value)",
"async def put_float( # pylint: disable=inconsistent-return-statements\n self, complex_body: JSON, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def value(self) -> float:\n ...",
"def value(self) -> float:\n ...",
"def value(self) -> float:\n ...",
"def setFloat(self, key, value, prec):\n #value = (\"%(format)s\" % {'format': \"%%.%df\" % prec}) % value\n self.__config.setValue(key, QtCore.QVariant(value))\n self.__saved = False",
"def value(self) -> float:",
"def custom_result(result):\n for (k, v) in result.items():\n if pd.isna(v):\n result[k] = None\n elif re.match(r\"^[-+]?[0-9]+\\.[0-9]+$\", str(v)) is not None:\n result[k] = int(round(v))\n\n return json.dumps(result, ensure_ascii=False)",
"def parse_float(val, fn):\n return float(val)",
"def format_value(self, value: float) -> str:\r\n ...",
"def format_value(self, value: float) -> str:\r\n ...",
"async def put_float( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def __str__(self):\n # NOTE: as this may be used in f-strings just return the value as a str\n return str(float(self))",
"def __float__(self):\n return self.num / self.denom # result of / is of type float",
"def traverse(self, traverser, **kwargs):\n return traverser.float(self, **kwargs)",
"def __float__(self) -> float:\n return self._translate_in_type(float, self.integer)",
"def decode(val):\n if isinstance(val, Decimal):\n return float(val)\n return val",
"def getfloat(self, option):\n return getfloat(self.name, option)",
"def nodata_handler(ctx, param, value):\n retval = from_like_context(ctx, param, value)\n if retval is None and value is not None:\n try:\n retval = float(value)\n except:\n raise click.BadParameter(\n \"%s is not a number.\" % repr(value),\n param=param, param_hint='nodata')\n return retval",
"def testFloatValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, 42.1))",
"def give_me_a_float():\n return 5.8\n pass"
]
| [
"0.6859712",
"0.62191844",
"0.61126083",
"0.60944045",
"0.6072339",
"0.60572433",
"0.60548705",
"0.60541445",
"0.5945829",
"0.5910709",
"0.5849566",
"0.5847714",
"0.5847714",
"0.5847714",
"0.5756914",
"0.57321376",
"0.5715654",
"0.5713692",
"0.5697113",
"0.5697113",
"0.56542146",
"0.5634823",
"0.56037825",
"0.55707556",
"0.5566289",
"0.5566261",
"0.55408967",
"0.55360067",
"0.55252767",
"0.549643"
]
| 0.7431174 | 0 |
Sync the provided roles and permissions. | def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()",
"def sync_role_definitions(self) -> None:\n\n logger.info(\"Syncing role definition\")\n\n self.create_custom_permissions()\n\n # Creating default roles\n self.set_role(\"Admin\", self._is_admin_pvm)\n self.set_role(\"Alpha\", self._is_alpha_pvm)\n self.set_role(\"Gamma\", self._is_gamma_pvm)\n self.set_role(\"granter\", self._is_granter_pvm)\n self.set_role(\"sql_lab\", self._is_sql_lab_pvm)\n\n # Configure public role\n if current_app.config[\"PUBLIC_ROLE_LIKE\"]:\n self.copy_role(\n current_app.config[\"PUBLIC_ROLE_LIKE\"],\n self.auth_role_public,\n merge=True,\n )\n\n self.create_missing_perms()\n\n # commit role and view menu updates\n self.get_session.commit()\n self.clean_perms()",
"def _update(self, uuid, name, permissions):\n data = {\"name\": name, \"permissions\": permissions, \"uuid\": uuid}\n path = self.router.roles_by_uuid.format(uuid=uuid)\n return self.request(\n method=\"post\", path=path, json=data, error_json_invalid=False\n )",
"def _updateRoles(self, obj, updateRoles={}, deleteRoles=[], cancelRoles=[]):\n #user_role_delta = {}\n pending = obj.getPendingCollaborations()\n\n collabs = obj.getCollaborators()\n\n user_role_delta = obj.generateCollaborationRequests(\n newUser=False, newRoles=updateRoles, deleteRoles=deleteRoles)\n \n for p in user_role_delta.keys():\n if p in pending.keys():\n new_changes = pending[p].roles.copy()\n for role in user_role_delta[p]:\n delta = user_role_delta[p][role]\n if role in new_changes:\n if new_changes[role] != delta:\n new_changes.pop(role)\n elif new_changes[role] == delta:\n #Shouldn't happen\n pass\n else:\n new_changes[role] = delta\n if not new_changes:\n obj.manage_delObjects(pending[p].id)\n else:\n obj.editCollaborationRequest(pending[p].id, new_changes)\n else:\n obj.addCollaborator(p)\n obj.requestCollaboration(p, user_role_delta[p])\n\n for u in cancelRoles:\n if u in obj.getPendingCollaborations():\n # Revert the new roles back to the published version\n obj.reverseCollaborationRequest(pending[u].id)\n # Delete the collaboration request\n obj.manage_delObjects(pending[u].id)\n\n #Get the collaborators again if they have changed\n all_roles = {}\n for rolename in obj.default_roles + getattr(obj, 'optional_roles', {}).keys():\n for r in getattr(obj,rolename.lower()+'s',[]):\n all_roles[r]=None\n for r in getattr(obj, 'pub_'+rolename.lower()+'s', []):\n all_roles[r]=None\n \n collabs = obj.getCollaborators()\n for c in collabs:\n if c not in all_roles.keys():\n obj.removeCollaborator(c)",
"def _sync_all(cursor):\n _print_info('Syncing all privileges.')\n\n all_namespace_permissions = _fetch_all_namespace_permissions(cursor)\n\n for namespace_permission in all_namespace_permissions:\n namespace = namespace_permission['namespace']\n users = namespace_permission['users']\n\n _print_info('Working on namespace: \\'{}\\''.format(namespace))\n for user in users:\n _grant_select_privilege(cursor, user, namespace)",
"def save(self):\n body = {}\n body[\"permissions\"] = dict(self.permissions)\n body[\"name\"] = self.name\n body[\"description\"] = self.description\n _, role = self._requestor.patch('/roles/' + self._id, json=body)\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])",
"def roles(self, roles):\n\n self._roles = roles",
"def roles(self, roles):\n\n self._roles = roles",
"def roles(self, roles):\n\n self._roles = roles",
"def updateRoles(self, obj, dom):\n domRoles = self.validateRoles(self.getRolesFromDOM(dom))\n moduleRoles = self.validateRoles(self.getRolesFromModule(obj))\n\n updateRoles = {}\n deleteUsers = []\n cancelRoles = []\n \n if self.action == 'create' or self.update_semantics == 'replace':\n # set default roles only if the dom contains no roles\n if len(domRoles.keys()) == 0:\n updateRoles = self.getDefaultRoles(\n self.pmt.getAuthenticatedMember().getId())\n else:\n updateRoles.update(domRoles)\n\n elif self.update_semantics == 'merge':\n updateRoles.update(moduleRoles)\n for role, userids in domRoles.items():\n userids = set(userids)\n userids.union(updateRoles.get(role, []))\n updateRoles[role] = list(userids)\n\n elif self.update_semantics == 'replace':\n currentUsers = set()\n for userids in moduleRoles.values():\n currentUsers.update(userids)\n domUsers = set()\n for userids in domRoles.values():\n domUsers.update(userids)\n for userids in updateRoles.values():\n domUsers.update(userids)\n deleteUsers = currentUsers.difference(domUsers)\n\n # XXX: Workaround for bug in generateCollaborationRequests that\n # requires a user listed in deleteRoles to be present in\n # newRoles\n for role, userids in moduleRoles.items():\n for user in deleteUsers:\n if user in userids:\n updateRoles.setdefault(role, [])\n updateRoles[role].append(user)\n\n self._updateRoles(obj, updateRoles, deleteUsers, cancelRoles)",
"async def roles(self, ctx):\n\n pass",
"def grant_role(self, role, principal_ids):",
"def updateRoles(request):\n\n # update org admins\n #updateRole('gsoc_org_admin')\n\n # update mentors\n #updateRole('gsoc_mentor')\n\n # update students\n # we can assume that students cannot have any other roles, so we do not\n # need to set ETA\n updateRole('gsoc_student')",
"def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()",
"def init_role(self, role_name, perms) -> None:\n warnings.warn(\n \"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n self.bulk_sync_roles([{\"role\": role_name, \"perms\": perms}])",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"def set_resources_and_sync(videos, channels=[], update_kwargs={}):\n set_resources(videos, 'video')\n set_resources(channels, 'channel')\n sync.update_related_models_from_cache(**update_kwargs)\n return videos",
"async def setRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.edit(roles=roles)\n await ctx.send(f\"Setting {roles_str(person, roles)}\")",
"async def change_role_positions(self, roles: 'typing.Union[typing.Dict[role.Role, int], '\n 'typing.List[typing.Tuple[role.Role, int]]]'):\n if not self.me.guild_permissions.manage_roles:\n raise PermissionsError(\"manage_roles\")\n\n if isinstance(roles, dict):\n roles = roles.items()\n\n to_send = []\n for r, new_position in roles:\n if new_position >= self.me.top_role.position:\n raise HierarchyError(\"Cannot move role above our top role\")\n\n to_send.append((str(r.id), new_position))\n\n to_send = [(str(r.id), new_position) for (r, new_position) in roles]\n await self._bot.http.edit_role_positions(to_send)",
"async def channel_role_permissions(\n self,\n ctx: Context,\n role: discord.Role,\n excluded_channels: commands.Greedy[discord.TextChannel],\n *,\n permissions: str = \"\",\n ):\n if not role:\n await ctx.send(\n f\"Usage: `,,chrp <role> [excluded channels] permission1=True, permission2=None, permission3=False...`\"\n )\n return\n\n excluded_channel_ids = [ch.id for ch in excluded_channels]\n all_channels = [\n ch\n for ch in ctx.guild.text_channels # type: ignore\n if ch.category_id not in [360570306131132417, 704886695446839346]\n and ch.id not in excluded_channel_ids\n ]\n\n if not permissions:\n # delete permission overwrites\n for ch in all_channels:\n if role in ch.overwrites:\n await ch.set_permissions(role, overwrite=None)\n await ctx.send(f\"Deleted permission overwrites for {str(role)}\")\n return\n\n permission_list = permissions.split(\",\")\n overwrite_dict = dict()\n force = False\n for permission in permission_list:\n if \"-f\" in permission:\n force = True\n permission.replace(\"-f\", \"\")\n permission_key, permission_val = permission.split(\"=\")\n permission_key = permission_key.strip()\n if not permission_val:\n await ctx.send(\n f\"Permission must be specified in the format: permission_name=Value\"\n )\n continue\n permission_val = permission_val.strip().title()\n if permission_val not in [\"None\", \"True\", \"False\"]:\n await ctx.send(\n f\"{permission_val} is not a valid permission value. Use True, False, or None\"\n )\n continue\n if hasattr(Permissions(), permission_key):\n overwrite_dict[permission_key] = eval(permission_val)\n else:\n await ctx.send(f\"{permission_key} is not a valid permission\")\n continue\n\n if overwrite_dict:\n overwrite = PermissionOverwrite(**overwrite_dict)\n for ch in all_channels:\n if role in ch.overwrites:\n existing_ow = ch.overwrites_for(role)\n if existing_ow == overwrite:\n continue\n if not force:\n existing_ow.update(**overwrite_dict)\n await ch.set_permissions(role, overwrite=existing_ow)\n continue\n await ch.set_permissions(role, overwrite=overwrite)\n\n await ctx.send(\"Finished applying role permissions\")",
"def update_role_files(**kwargs):\n\n # Finds out which tracking branch you are on\n # Generates a commit in OA and each of its roles\n # Generates a git show output\n # Asks before triggering git review\n\n # Example commit message\n # Update all SHAs for 15.1.8\n # This patch updates all the roles to the latest available stable\n # SHA's, copies the release notes from the updated roles into the\n # integrated repo, updates all the OpenStack Service SHA's, and\n # updates the appropriate python requirements pins.\n click.echo(\"Not implemented yet\")",
"async def setoperator(self, ctx, role_id: int, perms: int):\n s = db.session()\n role = s.query(db.AdminRole).filter(db.AdminRole.role_id == role_id).first()\n if role:\n if perms == 0:\n s.delete(role)\n else:\n role.perms = perms\n else:\n s.add(db.AdminRole(role_id=role_id, perms=perms))\n s.commit()\n s.close()\n await ctx.send(\"Role set\")",
"def sync_dirs(self, *dirs, force_hash=False):\n roots = [SyncRoot(d) for d in dirs]\n if self._reverse_sync_order:\n roots = reversed(roots)\n synchronizer = Synchronizer(*roots, force_hash=force_hash)\n synchronizer.sync()",
"def update(request, role_id):\n\n role = get_object_or_404(ProjectRole, pk=role_id)\n\n # require permission to proceed\n must_have_permission(request.user, role.project, \"can_edit_roles\")\n\n permittee = Permittee.objects.get_as_permittee(request.user)\n\n initial_set = list(role.obj_permissions.values_list(\"pk\", flat=True))\n\n # Get the permissions that the user can delegate to others as well\n # as the ones that are already in the role. Obtain DISTINCT values.\n obj_permissions = ObjectPermission.objects.filter_from_instance(\n role.project).filter(\n Q(permissionownership__permittee=permittee,\n permissionownership__can_delegate=True) |\n Q(id__in=initial_set)\n ).distinct()\n\n project_url = reverse(\"project_detail\", args=[role.project.id])\n\n # Use to update the permissions in the ProjectRole object so\n # users with that role are affected from the time this is updated\n def post_save(instance, created):\n from expedient.clearinghouse.roles.models import ObjectPermission\n new_obj_permissions_pks = [ p.pk for p in instance.obj_permissions.all() ]\n for permission in obj_permissions:\n # Add and delete permissions accordingly...\n try:\n instance.remove_permission(permission)\n except:\n pass\n if permission.pk in new_obj_permissions_pks:\n instance.add_permission(permission)\n\n return generic_crud(\n request,\n obj_id=role_id,\n model=ProjectRole,\n template=TEMPLATE_PATH+\"/update.html\",\n redirect=lambda instance: project_url,\n template_object_name=\"role\",\n form_class=ProjectRoleForm,\n extra_form_params={\n \"obj_permissions\": obj_permissions,\n },\n extra_context={\n \"project\": role.project,\n \"breadcrumbs\": (\n (\"Home\", reverse(\"home\")),\n (\"Project %s\" % role.project.name, project_url),\n (\"Update Role %s\" % role.name, request.path),\n )\n },\n post_save = post_save,\n )",
"def set_role(\n self, role_name: str, pvm_check: Callable[[PermissionView], bool]\n ) -> None:\n\n logger.info(\"Syncing %s perms\", role_name)\n pvms = self.get_session.query(PermissionView).all()\n pvms = [p for p in pvms if p.permission and p.view_menu]\n role = self.add_role(role_name)\n role_pvms = [\n permission_view for permission_view in pvms if pvm_check(permission_view)\n ]\n role.permissions = role_pvms\n self.get_session.merge(role)\n self.get_session.commit()",
"def _add(self, name, permissions):\n data = {\"name\": name, \"permissions\": permissions}\n path = self.router.roles\n return self.request(method=\"put\", path=path, json=data)",
"def update(\n self,\n redditor: str | praw.models.Redditor,\n *,\n permissions: list[str] | None = None,\n ):\n url = API_PATH[\"setpermissions\"].format(subreddit=self.subreddit)\n data = self._handle_permissions(\n other_settings={\"name\": str(redditor), \"type\": \"moderator\"},\n permissions=permissions,\n )\n self.subreddit._reddit.post(url, data=data)",
"def update_roles_by_id(self, role_ids):\n return self.update_supergroups_by_id(role_ids, 'role')",
"async def _set_roles(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n host = await guild.create_role(\n name=\"Host\", colour=discord.Color(0xFFBF37),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).host_id.set(host.id)\n await ctx.author.add_roles(host)\n\n player = await guild.create_role(\n name=\"Player\", colour=discord.Color(0x37BFFF),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).player_id.set(player.id)\n\n repl = await guild.create_role(\n name=\"Replacement\", colour=discord.Color(0x86FF40)\n )\n await self.config.guild(guild).repl_id.set(repl.id)\n\n spec = await guild.create_role(\n name=\"Spectator\", colour=discord.Color(0xD837FF)\n )\n await self.config.guild(guild).spec_id.set(spec.id)\n\n dead = await guild.create_role(\n name=\"Dead\", colour=discord.Color(0xDC5757)\n )\n await self.config.guild(guild).dead_id.set(dead.id)\n\n txt = _(\n \"Host: {}\"\n \"\\nPlayer: {}\"\n \"\\nSpectator: {}\"\n \"\\nDead: {}\"\n \"\\nReplacement: {}\"\n ).format(\n host.mention,\n player.mention,\n spec.mention,\n dead.mention,\n repl.mention\n )\n\n embed = discord.Embed(\n color=0x37BFFF, title=\"Created Roles!\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Set up required roles!\")",
"def put(self):\n token = self.access_token_from_authorization_header()\n\n data = self.convert_argument_to_json()\n\n permissions = data['permissions']\n\n if len(permissions) is not 4:\n raise tornado.web.HTTPError(400, 'Some permissions are missing. Permissions count must be 4.')\n\n for ix, permission in enumerate(permissions):\n\n try:\n permission = int(permission)\n\n if permission not in [0, 1]:\n raise Exception('Permission must be either of 0 or 1.')\n\n permissions[ix] = int(permission)\n\n except Exception as ex:\n raise tornado.web.HTTPError(400, 'Permission must be integer')\n\n with self.session_scope() as session:\n token = convert_uuid_or_400(token)\n\n token = session.query(AuthToken).filter(AuthToken.uid == token).one_or_none()\n\n user = token.auth\n updated_permission = bitarray()\n\n updated_permission.extend(permissions)\n\n user.permissions = updated_permission.to01()\n\n session.flush()\n\n response = user.to_json()\n self.write(response)"
]
| [
"0.77906233",
"0.66263545",
"0.63187075",
"0.5924941",
"0.58685404",
"0.582195",
"0.5794141",
"0.5794141",
"0.5794141",
"0.5774081",
"0.5730462",
"0.57237566",
"0.5634548",
"0.5609491",
"0.5607976",
"0.5532661",
"0.5518215",
"0.5483266",
"0.54664904",
"0.54311615",
"0.54242206",
"0.53969777",
"0.53838164",
"0.53337824",
"0.5316053",
"0.530654",
"0.5277662",
"0.5226708",
"0.51739305",
"0.5173215"
]
| 0.77958214 | 0 |
Get all the roles associated with the user. | def get_user_roles(user=None):
if user is None:
user = g.user
return user.roles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)",
"def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')",
"def token_auth_get_user_roles(user):\n print(user)\n return user.get_roles()",
"def get_user_roles(user_id: str) -> list:\n response = api.get_user_roles(user_id)\n\n if not response.ok:\n print(response.data)\n sys.exit(1)\n\n return response.data.get(\"items\")",
"def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)",
"def list(self):\n return self.client.find_all_roles()",
"def get_roles(self, include_remote=True):\n rbac_service = get_rbac_backend().get_service_class()\n result = rbac_service.get_roles_for_user(\n user_db=self, include_remote=include_remote\n )\n return result",
"async def get_user_roles(request):\n\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(status=400, text=\"Incorrect user_id\")\n\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\n \"username\": user.username,\n \"user_id\": user.id,\n \"roles\": {\"owner\": [], \"member\": [], \"manager\": []}, # FIXME : use USER_ROLES\n }\n\n roles = (\n request.cirrina.db_session.query(UserRole)\n .filter_by(user_id=user_id)\n .join(Project)\n .filter(UserRole.project_id == Project.id)\n .order_by(Project.name)\n .values(UserRole.role, Project.id, Project.name)\n )\n\n for role in roles:\n data[\"roles\"][role.role].append({\"id\": role.id, \"name\": role.name})\n\n return web.json_response(data)",
"def all_organization_member_roles(\n self,\n id: str,\n user_id: str,\n page: int | None = None,\n per_page: int | None = None,\n ) -> list[dict[str, Any]]:\n params = {\"page\": page, \"per_page\": per_page}\n return self.client.get(\n self._url(id, \"members\", user_id, \"roles\"), params=params\n )",
"def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles",
"def listRoles(self):\n return self._client.listRoles()",
"def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)",
"def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()",
"def get_all_roles(self):\n token = self.get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n return self.jwt_role_callback(unverified_claims)",
"def user_roles():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user roles\")\n user_roles = get_user_roles(access_token)\n return json.dumps(list(user_roles))",
"def roles(self, user):\n return {}",
"def list_roles(self):\n resp, body = self.get(\"roles\")\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])",
"def get_roles(self) -> requests.models.Response:\n return self.get('v1/roles')",
"def get_roles_list(self):\n try:\n roles = self.db_handler.get_roles_list()\n self.logger.write_to_log('roles got', 'model')\n return roles\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def list_available_roles(self, uid):\n uid = self._check_uid(uid)\n role_data = self._router_request(\n self._make_request_data(\n 'getRolesList',\n data=dict(\n uid=uid\n )\n )\n )\n\n return role_data['data']",
"def list_user_roles_on_project(self, project_id, user_id):\n resp, body = self.get('projects/%s/users/%s/roles' %\n (project_id, user_id))\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])",
"def getAllRoles(self):\n\n # Learn URL of AllRoles service\n url = self.config.get(\"Authorization\",\"allroles\") # http://erra.ccss.cz/g4i-portlet/service/list/roles/en\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles url: %s\"% url)\n \n # Request all roles from LifeRay\n import httplib2\n h = httplib2.Http()\n header, content = h.request(url, \"GET\")\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response header: %s\"% header)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response content: %s\"% content)\n\n # Parse the response\n try:\n allRolesJson = json.loads(content)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles reply succesfully parsed\")\n except ValueError,e:\n logging.error(\"[LaymanAuthLiferay][getAllRoles] Cannot parse AllRoles reply: '%s'\"% content)\n raise AuthError(500, \"Cannot parse GET All Roles response [%s] as JSON:%s\"% (content,e)) \n \n roles = allRolesJson[\"roles\"]\n\n # lower() and spaces\n for rr in roles:\n rr[\"roleName\"] = rr[\"roleName\"].lower()\n rr[\"roleName\"] = \"_\".join(rr[\"roleName\"].split(' '))\n\n # Return roles\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] Return roles: %s\"% str(roles))\n return roles",
"def listUserRoles(self, name):\n return self._client.listUserRoles(name)",
"def list(self, **kwargs):\n params = {}\n url = '/openstack/roles?%(params)s' % {\n 'params': parse.urlencode(params, True)\n }\n return self._list(url, 'roles')",
"def roles(self):\n return self._roles",
"def get_roles(self):\n return [role.role_id for role in self.roles if role]",
"def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]",
"def get_roles(self, **search_args):\n return self.openbis.get_role_assignments(person=self, **search_args)",
"def get_roles():\n check_admin()\n roles = Role.query.all()\n\n return render_template('admin/roles/roles.html', roles=roles, title=\"Roles\")",
"def get_possible_roles(user_id):\n # --- Rechte des Verwalters\n my_role_id = get_role_by_user_path(request.user, item_container.container.path)\n roles_all = DmsRoles.objects.filter(id__gte=my_role_id)\n roles_all = DmsRoles.objects.filter(id__gte=my_role_id)\n # --- Rechte der betreffenden Person\n if user_id > -1:\n my_user_role = DmsUserUrlRole.objects.filter(user__id=user_id).\\\n filter(container__path=folder_path)\n if len(my_user_role) > 0:\n my_role_id = my_user_role[0].role.id\n else:\n my_role_id = 50\n else:\n my_role_id = 50\n roles = []\n for role in roles_all:\n roles.append({'id': role.id,\n 'name': role.name,\n 'description': role.description,\n 'checked': role.id == my_role_id})\n return roles"
]
| [
"0.83929795",
"0.79378843",
"0.7776199",
"0.76373804",
"0.7512251",
"0.7501195",
"0.7472107",
"0.73554534",
"0.73497117",
"0.73466504",
"0.73303014",
"0.72634876",
"0.7181796",
"0.71781325",
"0.71354675",
"0.7121091",
"0.711354",
"0.7098917",
"0.7070297",
"0.70572376",
"0.7052521",
"0.69667757",
"0.6940818",
"0.69059163",
"0.68917227",
"0.68830335",
"0.6835851",
"0.6829536",
"0.6797353",
"0.67889196"
]
| 0.81134564 | 1 |
Gets the DAGs readable by authenticated user. | def get_readable_dags(self, user) -> Iterable[DagModel]:
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_readable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])",
"def can_read_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)",
"def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }",
"def _get_read_accessible_workspaces_by_user(user):\n if not settings.CAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT and user.is_anonymous:\n accessible_workspaces = []\n else:\n # workspace case\n # list accessible workspaces\n accessible_workspaces = [\n workspace.id\n for workspace in workspace_api.get_all_workspaces_with_read_access_by_user(\n user\n )\n ]\n\n return accessible_workspaces",
"def get_editable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)",
"def get_editable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])",
"def can_read_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)",
"def can_edit_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)",
"def filter_granted(self, queryset):\n granted_runs = ContainerRun.filter_by_user(self.request.user)\n\n return queryset.filter(run_id__in=granted_runs)",
"def getDashboardsForUser(user):\n dashboards = Dashboard.objects(Q(analystId=user.id) | Q(isPublic=True))\n parents = []\n userDashboards = []\n #get all id's of parent dashboards\n for dash in dashboards:\n if dash.parent:\n parents.append(dash.parent)\n #remove any parent from the list to prevent duplicate dashboards\n for dash in dashboards:\n if not dash.id in parents:\n userDashboards.append(dash)\n return userDashboards",
"def get_access_list(self):\n return self.manager.get_access_list(self)",
"def get_all_access():\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\treturn get_all_access_helper(email)",
"def list_user_access(self):\n return self.manager.list_user_access(self)",
"def create_dag_specific_permissions(self) -> None:\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)",
"def filter_granted(self, queryset):\n granted_containers = Container.filter_by_user(self.request.user)\n\n return queryset.filter(container_id__in=granted_containers)",
"def get_queryset(self):\n user = self.request.user\n return Beer.objects.filter(owner=user)",
"def get_queryset(self):\n qs = self.character.valid_actions.order_by(\"-id\")\n user = self.request.user\n if not user or not user.is_authenticated:\n return qs.filter(public=True).filter(status=PlotAction.PUBLISHED)\n if (\n user.is_staff\n or user.check_permstring(\"builders\")\n or user.char_ob == self.character\n ):\n return qs\n return qs.filter(public=True).filter(status=PlotAction.PUBLISHED)",
"def getAccessLogForUser(cls, user):\n return cls.objects.filter(user_id=user.pk).order_by('timestamp')",
"def get_accessible_spaces(user):\n if not user:\n return []\n obj_list = get_objects_for_user(user, 'access_space',Space)\\\n .order_by('-created_at')\n return obj_list",
"def get_queryset(self):\n user_requested = self.kwargs['user']\n self.check_object_permissions(self.request, user_requested)\n return Poll.objects.filter(created_by__username=user_requested)",
"def get_queryset(self):\n user = self.request.user\n return user.group_set.all()",
"def filter_granted(self, queryset):\n granted_containers = Container.filter_by_user(self.request.user)\n\n return queryset.filter(app__container_id__in=granted_containers)",
"def get_queryset(self):\n user = self.request.user\n return Task.objects.filter(author=user)",
"def get_public_bags(store):\n usersign = {'name': 'GUEST', 'roles': []}\n for bag in store.list_bags():\n try:\n bag = store.get(bag)\n bag.policy.allows(usersign, 'read')\n yield bag\n except PermissionsError:\n pass",
"def get_queryset(self):\n qs = Log.objects.filter(user=self.request.user)\n return qs",
"def get_queryset(self):\n if self.request.user.is_staff:\n return WaitQueue.objects.all()\n return WaitQueue.objects.filter(user=self.request.user)",
"def get_allowed_topologies(user):\n try:\n up = user.get_profile()\n except AttributeError:\n return db.Topology.objects.none()\n\n if user.has_perm(\"vnswww.topology_use_any\"):\n # We can view and use any templates\n topos = db.Topology.objects.filter()\n else:\n q_own = Q(owner=user)\n q_permitted = Q(allowed_users=user)\n q_org = Q(org=user.get_profile().org)\n q_public = Q(public=True)\n if user.has_perm(\"vnswww.topology_use_org\"):\n print \"Allowed all topos in own org\"\n # We can view and use any from the user's organization\n topos = db.Topology.objects.filter(q_permitted | q_org | q_own)\n else:\n print \"NOT allowed all topos in own org\"\n # We can view any from our own organization which are protected\n topos = db.Topology.objects.filter(q_permitted | q_own)\n\n return topos",
"def get(self):\n response = {\"tasks\": []}\n user_roles = [role[\"name\"] for role in g._user[\"roles\"]]\n if \"administrator\" in user_roles or \"tasks_all\" in user_roles:\n for k, v in available_tasks_by_path.items():\n response[\"tasks\"].append(v)\n else:\n for k, v in available_tasks_by_path.items():\n if f\"task_{k}\" in user_roles:\n response[\"tasks\"].append(v)\n return response",
"def get_queryset(self):\n user = self.request.user\n return Interaction.objects.filter(owner=user)",
"def by_user(user):\n return Tag.objects.all().filter(owner=user)"
]
| [
"0.7089873",
"0.67098624",
"0.6468782",
"0.63686824",
"0.62738264",
"0.60500044",
"0.6023267",
"0.58576083",
"0.57512844",
"0.5697229",
"0.5695736",
"0.5688438",
"0.5622657",
"0.56148934",
"0.55736953",
"0.55663365",
"0.5542382",
"0.5535234",
"0.55239934",
"0.5515076",
"0.5510436",
"0.5503525",
"0.54975885",
"0.54919994",
"0.5448345",
"0.5422555",
"0.54224384",
"0.5397043",
"0.53867996",
"0.53803474"
]
| 0.6834099 | 1 |
Gets the DAGs editable by authenticated user. | def get_editable_dags(self, user) -> Iterable[DagModel]:
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_editable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])",
"def can_edit_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)",
"def get_editable_explorations(user_id):\n return [e for e in get_viewable_explorations(user_id)\n if e.is_editable_by(user_id)]",
"def get_readable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])",
"def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }",
"def get_readable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)",
"def get_access_list(self):\n return self.manager.get_access_list(self)",
"def _get_read_accessible_workspaces_by_user(user):\n if not settings.CAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT and user.is_anonymous:\n accessible_workspaces = []\n else:\n # workspace case\n # list accessible workspaces\n accessible_workspaces = [\n workspace.id\n for workspace in workspace_api.get_all_workspaces_with_read_access_by_user(\n user\n )\n ]\n\n return accessible_workspaces",
"def can_read_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)",
"def filter_granted(self, queryset):\n granted_runs = ContainerRun.filter_by_user(self.request.user)\n\n return queryset.filter(run_id__in=granted_runs)",
"def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]",
"def get_queryset(self):\n qs = self.character.valid_actions.order_by(\"-id\")\n user = self.request.user\n if not user or not user.is_authenticated:\n return qs.filter(public=True).filter(status=PlotAction.PUBLISHED)\n if (\n user.is_staff\n or user.check_permstring(\"builders\")\n or user.char_ob == self.character\n ):\n return qs\n return qs.filter(public=True).filter(status=PlotAction.PUBLISHED)",
"def create_dag_specific_permissions(self) -> None:\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)",
"def list_user_access(self):\n return self.manager.list_user_access(self)",
"def get_queryset(self):\n user = self.request.user\n return Interaction.objects.filter(owner=user)",
"def get_queryset(self):\n user = self.request.user\n return Task.objects.filter(author=user)",
"def get_queryset(self):\n user = self.request.user\n return Beer.objects.filter(owner=user)",
"def get_queryset(self):\n user = self.request.user\n return user.group_set.all()",
"def can_edit_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)",
"def get_all_access_entries(self) -> QuerySet:\n return self.model.objects.all().order_by(\"created_at\")",
"def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset",
"def get_queryset(self):\n return self.request.user.assets.all()",
"def get_accessible_spaces(user):\n if not user:\n return []\n obj_list = get_objects_for_user(user, 'access_space',Space)\\\n .order_by('-created_at')\n return obj_list",
"def filter_granted(self, queryset):\n granted_containers = Container.filter_by_user(self.request.user)\n\n return queryset.filter(container_id__in=granted_containers)",
"def get_accessible_projects(user):\n query = Q(deprecated_files=False)\n\n query &= get_public_projects_query()\n\n if user.is_authenticated:\n query |= get_restricted_projects_query(user)\n\n if user.is_credentialed:\n query |= get_credentialed_projects_query(user)\n\n query |= get_projects_accessible_through_events(user)\n\n return PublishedProject.objects.filter(query).distinct()",
"def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Projects.objects.filter(username = username).order_by('-id')",
"def get_projects_accessible_through_events(user):\n events_all = Event.objects.filter(Q(host=user) | Q(participants__user=user))\n\n active_events = set(events_all.filter(end_date__gte=datetime.now()))\n\n accessible_datasets = EventDataset.objects.filter(event__in=active_events, is_active=True)\n\n accessible_projects_ids = []\n for event_dataset in accessible_datasets:\n if has_access_to_event_dataset(user, event_dataset):\n accessible_projects_ids.append(event_dataset.dataset.id)\n\n query = Q(id__in=accessible_projects_ids)\n return query",
"def get_queryset(self):\n return models.Task.objects.filter(\n user__exact=self.request.user,\n date__exact=self.get_date()\n )",
"def filter_granted(self, queryset):\n granted_containers = Container.filter_by_user(self.request.user)\n\n return queryset.filter(app__container_id__in=granted_containers)",
"def get_isAdminOf(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"change_company\", klass=models.Company, accept_global_perms=False)\n return [x.id for x in userCompanies]"
]
| [
"0.75425524",
"0.66582686",
"0.6618005",
"0.6526522",
"0.6415768",
"0.61828756",
"0.6021461",
"0.60081035",
"0.59142524",
"0.574658",
"0.5726433",
"0.572193",
"0.565869",
"0.5658475",
"0.56448656",
"0.5641777",
"0.5621332",
"0.5581223",
"0.5547112",
"0.551225",
"0.5500119",
"0.54899067",
"0.54893214",
"0.5453079",
"0.5433642",
"0.542239",
"0.54199165",
"0.5401019",
"0.53670776",
"0.5356931"
]
| 0.74315757 | 1 |
Gets the DAG IDs readable by authenticated user. | def get_readable_dag_ids(self, user) -> set[str]:
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_editable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])",
"def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }",
"def getUserIds(self):\n raise BorkedGetUserIds",
"def get_ids(self):\n return self._graphs.keys()",
"def getIds(self) -> List[int]:\n return list(self.users.keys())",
"def user_ids(self):\r\n raise NotImplementedError",
"def get_editable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)",
"def user_ids(self):\n return list(self.get_users())",
"def get_readable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)",
"def get_user_ids(session, access_token):\n endpoint = \"https://graph.microsoft.com/v1.0/users?$select=id\"\n r = session.get(endpoint, headers={\"Authorization\": \"Bearer \" + access_token})\n response = json.loads(r.text)\n return response[\"value\"]",
"def getIDs():",
"def get_ids(self) -> List[str]:",
"def user_ids(self):\n return list(self._user_ids)",
"def get_user_session_ids(user_id: str) -> List[str]:\n listOfSessions = os.listdir('public_dataset/'+user_id)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions",
"def get_user_session_ids_for_task(user_id: str, task_name: str) -> List[str]:\n listOfSessions = os.listdir('Plots/Research/'+user_id+'/'+task_name)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions",
"def get_id_users(self):\n return self.execute(TABELLE['id_users']['select']['all'])",
"def get_ids(self):\n return self._ids",
"def user_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"user_ids\")",
"def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)",
"def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]",
"def get_security(self):\n users = find_root(self)['users']\n userids_and_groups = []\n for userid in self._groups:\n if userid in users:\n userids_and_groups.append({'userid':userid, 'groups':self.get_groups(userid)})\n return userids_and_groups",
"def get_all_group_ids(token) -> list:\n ids=list()\n _dict = perform_request(app_config.ENDPOINT, token)\n while True:\n for obj in _dict[\"value\"]:\n ids.append(obj[\"id\"])\n if \"@odata.nextLink\" not in _dict:\n return ids\n _dict = perform_request(_dict[\"@odata.nextLink\"], token)",
"def _get_user_ids(model):\n return model.objects.values_list(\"user\", flat=True).distinct(\"user\")",
"def ids(self):\n return self._ids",
"def getEventIds(self):\n eventIdsLst = []\n for event in self.eventsLst:\n eventIdsLst.append(event['id'])\n return eventIdsLst",
"def _get_exploration_ids_subscribed_to(self, user_id: str) -> List[str]:\n subscriptions_model = user_models.UserSubscriptionsModel.get(\n user_id, strict=False)\n # TODO(#15621): The explicit declaration of type for ndb properties\n # should be removed. Currently, these ndb properties are annotated with\n # Any return type. Once we have proper return type we can remove this.\n if subscriptions_model:\n exploration_ids: List[str] = subscriptions_model.exploration_ids\n return exploration_ids\n else:\n return []",
"def _get_read_accessible_workspaces_by_user(user):\n if not settings.CAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT and user.is_anonymous:\n accessible_workspaces = []\n else:\n # workspace case\n # list accessible workspaces\n accessible_workspaces = [\n workspace.id\n for workspace in workspace_api.get_all_workspaces_with_read_access_by_user(\n user\n )\n ]\n\n return accessible_workspaces",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def identer(self) -> List[str]:\n self._populer_identer()\n if self._identer:\n return [str(ident) for ident in self._identer if ident]\n return []",
"def getIDs(self):\n return self.multiengine.getIDs()"
]
| [
"0.6810618",
"0.6412881",
"0.60274273",
"0.5916253",
"0.58385724",
"0.5836793",
"0.58053106",
"0.57211167",
"0.5666801",
"0.5621901",
"0.5606045",
"0.5600266",
"0.5539903",
"0.54888976",
"0.54831",
"0.5393162",
"0.5322502",
"0.53144634",
"0.52798474",
"0.5208441",
"0.51715237",
"0.5141779",
"0.5117581",
"0.5082674",
"0.5078218",
"0.50772643",
"0.5075993",
"0.50567347",
"0.50544506",
"0.5054045"
]
| 0.71341 | 0 |
Gets the DAG IDs editable by authenticated user. | def get_editable_dag_ids(self, user) -> set[str]:
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_readable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])",
"def get_editable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)",
"def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }",
"def getUserIds(self):\n raise BorkedGetUserIds",
"def getIds(self) -> List[int]:\n return list(self.users.keys())",
"def user_ids(self):\r\n raise NotImplementedError",
"def getIDs():",
"def user_ids(self):\n return list(self.get_users())",
"def get_isAdminOf(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"change_company\", klass=models.Company, accept_global_perms=False)\n return [x.id for x in userCompanies]",
"def get_id_users(self):\n return self.execute(TABELLE['id_users']['select']['all'])",
"def get_ids(self) -> List[str]:",
"def get_editable_explorations(user_id):\n return [e for e in get_viewable_explorations(user_id)\n if e.is_editable_by(user_id)]",
"def get_ids(self):\n return self._graphs.keys()",
"def get_ids(self):\n return self._ids",
"def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]",
"def admin_ids(self):\n # type: () -> List[int]\n return self._admin_ids",
"def getIDs(self):\n return self.multiengine.getIDs()",
"def user_ids(self):\n return list(self._user_ids)",
"def can_edit_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)",
"def get_readable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)",
"def ids(self):\n return self._ids",
"def get_accessible_user_id(self):\n ### DATABASE CODE GOES HERE\n return 1",
"def get_users_and_id(self):\n return self.execute(TABELLE['id_users']['select']['all'])",
"def get_user_ids(session, access_token):\n endpoint = \"https://graph.microsoft.com/v1.0/users?$select=id\"\n r = session.get(endpoint, headers={\"Authorization\": \"Bearer \" + access_token})\n response = json.loads(r.text)\n return response[\"value\"]",
"def get_ids(self):\n return [item.id for item in self.items]",
"def identer(self) -> List[str]:\n self._populer_identer()\n if self._identer:\n return [str(ident) for ident in self._identer if ident]\n return []",
"def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)",
"def getEventIds(self):\n eventIdsLst = []\n for event in self.eventsLst:\n eventIdsLst.append(event['id'])\n return eventIdsLst",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def get_assessment_ids(self):\n if not self.is_assessment_based_activity():\n raise IllegalState()\n else:\n return [Id(a) for a in self._my_map['assessmentIds']]"
]
| [
"0.6775534",
"0.660425",
"0.6343545",
"0.5962925",
"0.59002167",
"0.57747674",
"0.5721568",
"0.56791186",
"0.56647104",
"0.5664357",
"0.56574374",
"0.56426185",
"0.55538565",
"0.5540895",
"0.54543847",
"0.54483676",
"0.54234356",
"0.54187673",
"0.541208",
"0.53471106",
"0.53308815",
"0.5318896",
"0.5308501",
"0.5283049",
"0.5268994",
"0.524299",
"0.5230743",
"0.5205107",
"0.5200464",
"0.5182989"
]
| 0.7752794 | 0 |
Determines whether a user has DAG read access. | def can_read_dag(self, dag_id: str, user=None) -> bool:
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_read(self, user):\n raise Return(True)",
"def has_read_permission(request):\n return request.user.is_authenticated",
"def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False",
"def public_read_access(self) -> typing.Optional[bool]:\n return self._values.get('public_read_access')",
"def can_retrieve(self, user):\n return user.has_perm('agenda.can_see')",
"def can_read_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)",
"def canRead(id, userId):\n db = core.connect()\n theShift = db[id]\n if user.isAdmin(userId):\n return True\n if theShift[\"createdBy\"] == userId:\n return True\n if theShift[\"publishData\"][\"draft\"]:\n return False\n theUser = db[userId]\n if not theShift[\"publishData\"][\"private\"]:\n return True\n if theUser[\"privateStream\"] in theShift[\"publishData\"][\"streams\"]:\n return True\n shiftStreams = theShift[\"publishData\"][\"streams\"]\n readableStreams = permission.readableStreams(userId)\n allowed = set(shiftStreams).intersection(readableStreams)\n return len(allowed) > 0",
"def has_object_read_permission(self, request):\n user = request.user\n\n return user.is_authenticated",
"def user_has_access(self, user):\n if self.visibility == self.PUBLIC:\n return True\n elif self.visibility == self.PRIVATE and self.created_by == user:\n return True\n elif self.visibility in (self.ORG_ONLY, self.ORG_ONLY_NO_EXTERNAL):\n if user.external and self.visibility == self.ORG_ONLY_NO_EXTERNAL:\n return False\n elif self.organization.memberships.filter(user=user).count() >= 1:\n return True\n return False",
"def get_can_read(self):\n\t\tif not self.can_read:\n\t\t\tself.build_permissions()\n\t\treturn self.can_read",
"def read_allowed(self, ui, req):\n\n user = req.env.get('REMOTE_USER')\n\n deny_read = ui.configlist('web', 'deny_read', untrusted=True)\n if deny_read and (not user or ismember(ui, user, deny_read)):\n return False\n\n allow_read = ui.configlist('web', 'allow_read', untrusted=True)\n # by default, allow reading if no allow_read option has been set\n if (not allow_read) or ismember(ui, user, allow_read):\n return True\n\n return False",
"def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())",
"def has_object_read_permission(self, request):\n user = request.user\n if user.is_superuser:\n return user.is_superuser\n\n return self.user == user",
"def _has_read_perm(self, perm: WorkspacePermission) -> bool:\n read_perms = [\n WorkspacePermission.ADMINISTRATOR,\n WorkspacePermission.READ_WRITE,\n WorkspacePermission.READ,\n ]\n return perm in read_perms",
"def is_read_only(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_read_only\")",
"def has_access(self):\n self._has_access = False\n\n if self.read_contact_info is not None:\n if self.read_contact_info['USERNAME'] == consts.USERNAME or \\\n consts.USERNAME in consts.REGISTEREDADMINS:\n self._has_access = True\n\n return self._has_access",
"def has_all_dags_access(self, user) -> bool:\n if not user:\n user = g.user\n return (\n self._has_role([\"Admin\", \"Viewer\", \"Op\", \"User\"], user)\n or self.can_read_all_dags(user)\n or self.can_edit_all_dags(user)\n )",
"def can_read(self, auth_param: str) -> bool:\n perms = self._get_workspace_permissions([auth_param])\n return self._has_read_perm(perms.get(auth_param, WorkspacePermission.NONE))",
"def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})",
"def is_read_only(self):\n return self.__aceQLHttpApi.is_read_only()",
"def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False",
"def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()",
"def get_viewable(self, user):\n return True",
"def is_accessible_by(self, user):\n return (self.public or\n (user.is_authenticated and\n (user.is_staff or self.users.filter(pk=user.pk).exists())))",
"def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False",
"def get_viewable(self, user):\n if user.get('role') in ('admin', 'manager', 'engineer'):\n return True\n return user['name'] == self.doc.get('customer')",
"def can_be_viewed_by(self,user):\n return True",
"def is_read_only(self):\n\t\treturn bool(call_sdk_function('PrlShare_IsReadOnly', self.handle))",
"def _has_read_permission_user_namespace(user, namespace):\n response = _fetch_herd_session() \\\n .get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,\n HERD_REST_BASE_PATH,\n '/userNamespaceAuthorizations/userIds/{}/namespaces/{}').format(\n user, namespace))\n\n if response.status_code == 200:\n json_response = response.json()\n return 'READ' in json_response['namespacePermissions']\n else:\n return False",
"def access(self, user, can=\"read\"):\n if can == \"read\":\n permission = self.can_read\n elif can == \"write\":\n permission = self.can_write\n else:\n raise ValueError(\"Invalid access: {}\".format(can))\n\n # However, the settings for `WIKI_ALLOW_*` takes precedence\n permission = getattr(settings, f\"WIKI_CAN_{can.upper()}\", permission)\n permission = permission.lower()\n if user is None or not user.is_authenticated:\n perms_object = [\"anonymous\"]\n else:\n perms_object = user.permissions.all()\n\n if permission in perms_object:\n # simplest case - we have direct match\n return True\n\n if permission in _PERMISSION_HIERARCHY:\n # check if we have a higher hierarchy position\n hpos_target = _PERMISSION_HIERARCHY.index(permission)\n return any(1 for hpos, hperm in enumerate(_PERMISSION_HIERARCHY)\n if hperm in perms_object and hpos_target < hpos)\n\n return False"
]
| [
"0.7598551",
"0.7589699",
"0.7226936",
"0.7186378",
"0.71421206",
"0.7118634",
"0.70449024",
"0.6999025",
"0.69731724",
"0.6968622",
"0.6941354",
"0.69252783",
"0.69086224",
"0.6758709",
"0.6709498",
"0.66952443",
"0.6646694",
"0.6631528",
"0.6598221",
"0.65821195",
"0.657995",
"0.6575013",
"0.6541179",
"0.6518445",
"0.64723897",
"0.6461111",
"0.64260733",
"0.63641334",
"0.63499355",
"0.63425314"
]
| 0.7601378 | 0 |
Determines whether a user has DAG delete access. | def can_delete_dag(self, dag_id: str, user=None) -> bool:
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_delete(self, user):\n raise Return(False)",
"def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False",
"def can_delete(self, user_obj):\n if self.id is None:\n return False\n if user_obj.is_superuser:\n return True\n if self.parentnode is not None and self.is_empty():\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def canDelete(id, userId):\n db = core.connect()\n theShift = db[id]\n return user.isAdmin(userId) or (userId == theShift['createdBy'])",
"def CAN_DELETE(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)",
"def has_delete_permission(self, request, obj=None):\r\n return False",
"def has_delete_permission(self, request, obj=None):\n return False",
"def has_delete_permission(self, request, obj=None):\n return False",
"def allowed_topology_access_delete(user, topology):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return topology.owner == user or user.has_perm(\"vnswww.topology_delete_any\") or (user.has_perm(\"vnswww.topology_delete_org\") and up.org == topology.org)",
"def has_delete_permission(self, request, obj=None, *args, **kwargs):\n return False",
"def has_delete_permission(self, request, obj=None) -> bool:\n permission = super().has_delete_permission(request, obj)\n\n if obj is not None:\n permission &= (obj.owner == request.user) or request.user.is_superuser\n\n return permission",
"def has_delete_permission(self, request, obj=None):\n opts = self.opts\n codename = get_permission_codename('delete', opts)\n return any([\n request.user.has_perm(\"%s.%s\" % (opts.app_label, codename)),\n request.user.has_perm(\"%s.%s\" % (opts.app_label, codename), obj)])",
"def has_delete_permission(self, request, instance=None):\n return False",
"def allowed_user_access_delete(usera, userb):\n try:\n upa = usera.get_profile()\n upb = userb.get_profile()\n except AttributeError:\n return False\n\n return (usera == userb and usera.has_perm(\"vnswww.userprofile_delete_self\")\n or usera.has_perm(\"vnswww.userprofile_delete_any\")\n or (usera.has_perm(\"vnswww.userprofile_delete_org\") and upa.org == upb.org))",
"def allowed_topologytemplate_access_delete(user, template):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return template.owner == user or user.has_perm(\"vnswww.topologytemplete_delete_any\") or (user.has_perm(\"vnswww.topologytemplete_delete_org\") and template.org == up.org)",
"def has_delete_permission(self, request, obj=None):\n if type(obj) is Client:\n return obj.is_user_in_main_sales_contacts_of_client(request.user)\n return False",
"def allowed_group_access_delete(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_delete_any\")\n or (user.has_perm(\"vnswww.group_delete_org\")\n and group.org == up.org))",
"def can_delete_comment(comment, user):\r\n if user.is_staff or user.is_superuser:\r\n return True\r\n if hasattr(comment, 'user') and comment.user == user:\r\n return True\r\n return False",
"def has_delete_permission(self, request, obj=None):\n has_perm = super(ShortURLAdmin, self).has_delete_permission(request, obj)\n if not has_perm:\n return False\n if obj is not None and not request.user.has_perm('deflect.list_all') and request.user.id != obj.creator.id:\n return False\n return True",
"def can_delete(self):\r\n return True",
"def test_has_perm_post_delete(self):\n self.context['request'].user = User.objects.get(pk=47963)\n allowed = has_perm(self.context, 'forums_forum.post_delete_forum',\n self.forum_1)\n eq_(allowed, True)\n allowed = has_perm(self.context, 'forums_forum.post_delete_forum',\n self.forum_2)\n eq_(allowed, False)",
"def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))",
"def test_has_perm_thread_delete(self):\n self.context['request'].user = User.objects.get(pk=47963)\n allowed = has_perm(self.context, 'forums_forum.thread_delete_forum',\n self.forum_1)\n eq_(allowed, True)\n allowed = has_perm(self.context, 'forums_forum.thread_delete_forum',\n self.forum_2)\n eq_(allowed, False)",
"def is_deletable(self):\r\n # Deletion functionality is currently under discussion.\r\n # For now, delete requests are emailed to the admin.\r\n return False\r\n #return (not self.is_approved or self.start_date > now) and (not self.amount_raised) and (not self.is_deleted)\r",
"def has_delete_permissions(self, request, obj, local_site=None, *args,\n **kwargs):\n return obj.is_mutable_by(request.user, local_site=local_site)",
"def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False",
"def only_delete(post, user):\n children = Post.objects.filter(parent_id=post.id).exclude(pk=post.id)\n # The conditions where post can only be deleted.\n cond1 = children or post.age_in_days > 7\n cond2 = post.vote_count > 1 or (post.author != user)\n\n only_del = cond1 or cond2\n\n return only_del",
"def testDeleteIsAllowed(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n namespaces = SecureNamespaceAPI(self.system.users['fluiddb'])\n namespaces.delete([u'user/private'])\n self.users.delete([u'user'])\n self.assertIdentical(None, getUser(u'user'))",
"def has_permission(self, request, view):\n return request.user.group == 'admin'",
"def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()"
]
| [
"0.78429",
"0.7446556",
"0.7367293",
"0.72968274",
"0.72547096",
"0.71889985",
"0.71632475",
"0.71632475",
"0.7145582",
"0.7087952",
"0.7009737",
"0.7005602",
"0.68999463",
"0.68475777",
"0.6841784",
"0.6757655",
"0.6748384",
"0.6725004",
"0.66738075",
"0.6660552",
"0.66505444",
"0.6593149",
"0.652268",
"0.64149183",
"0.6389542",
"0.6354328",
"0.6342702",
"0.6320705",
"0.6265619",
"0.62618303"
]
| 0.75682795 | 1 |
Returns the permission name for a DAG id. | def prefixed_dag_id(self, dag_id: str) -> str:
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_name(id):\r\n\r\n graph = GraphAPI(access_token=TOKEN, version='2.5')\r\n\r\n return graph.get_object(id=str(id).split('-')[0])['name']",
"def PermissionSetName(self) -> str:",
"def acl_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"acl_name\")",
"def dag_name(self):\n return self._dag_name",
"def get_name(self, id):\n\t\treturn self.name_by_index[id]",
"def get_project_name_from_id(project_id: int) -> str:\n session = konfuzio_session()\n url = get_project_url(project_id)\n r = session.get(url=url)\n return r.json()['name']",
"def getGroupName(Id):\r\n return \"Group name\"",
"def get_name(self, id_val=None):\n self.ensure_loaded()\n name = self.search('id', id_val, searchtype=\"match\")['name'].tolist()\n return name if len(name) > 1 else name[0]",
"def permission_codename(cls):\n return f'{cls.app_label}_{cls.name}_action'",
"def acl_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"acl_name\")",
"def _get_project_name(self, context, project_id):\n return project_id",
"def get_data_group_name(listener_id):\n return \"{}{}\".format(get_name(listener_id), f5_const.SUFFIX_ALLOWED_CIDRS)",
"def acl_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"acl_name\")",
"def get_label_name(label_id):\n if self._int_to_label == {}:\n print(\"ERROR\")\n print(\"Need to import data first\")\n else:\n label_name = self._int_to_label[label_id]\n\n return label_name",
"def get_reg_name(self, reg_path: str, reg_id: str) -> str:\n name = self.get_full_path(reg_path, reg_id)\n return name.lower().replace('-', '').replace('_', '').replace(' ', '')",
"def get_permission_codename(action, opts):\n return '%s_%s' % (action, opts.module_name)",
"def getShortname(input_id):\n \n name = formatDesignation(input_id)\n \n return name[0:6] + name[10:15]",
"def get_resource_group_name(resource_id):\n match_groups = re.match(r\".*resourcegroups/(?P<group_name>[^/]*)/.*\", resource_id, flags=re.IGNORECASE)\n return match_groups.group(\"group_name\")",
"def acl_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"acl_id\")",
"def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )",
"def permissions(self) -> str:\n return pulumi.get(self, \"permissions\")",
"def permissionContextById(self, id: str) -> PermissionContext:",
"def get_group_name(self, group_id):\n group = self.table.query.filter_by(group_id=group_id).first()\n return group.name",
"def get_name(self):\n return self.id.split('.')[-1:][0]",
"def dag_id(self):\n if self.parallelize_task:\n return f'{self.job_id_extension.get_parallel(self.job_id)}'\n else:\n return f'{self.job_id_extension.get_preparation(self.job_id)}'",
"def acl_id(self) -> str:\n return pulumi.get(self, \"acl_id\")",
"def acl_id(self) -> str:\n return pulumi.get(self, \"acl_id\")",
"def acl_id(self) -> str:\n return pulumi.get(self, \"acl_id\")",
"def permission_uri(cls):\n return f'daf.{cls.permission_codename}'",
"def get_name_from_resource_id(resource_id):\n return resource_id.rstrip(\"/\").split(\"/\")[-1]"
]
| [
"0.6119352",
"0.589299",
"0.5748296",
"0.56348586",
"0.5568398",
"0.55251837",
"0.5521585",
"0.54744",
"0.5474207",
"0.54451746",
"0.54412836",
"0.5435674",
"0.5379868",
"0.53726",
"0.5362401",
"0.535118",
"0.5331459",
"0.5284019",
"0.5281497",
"0.5270985",
"0.52205694",
"0.5193873",
"0.5187339",
"0.51778024",
"0.51767474",
"0.5165819",
"0.5165819",
"0.5165819",
"0.5165386",
"0.5165183"
]
| 0.6936026 | 0 |
Determines if a resource belongs to a DAG or all DAGs. | def is_dag_resource(self, resource_name: str) -> bool:
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_read_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)",
"def is_dag(self):\n if nx.is_directed_acyclic_graph(Node.G):\n return True\n else:\n return False",
"def can_edit_all_dags(self, user=None) -> bool:\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)",
"def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False",
"def is_resource(cls, attr):\n return cls._attributes[attr].kind \\\n in [ResourceAttributeKinds.MEMBER,\n ResourceAttributeKinds.COLLECTION]",
"def can_read_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)",
"def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:\n if dag_id and dag_id != \"~\":\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))\n\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))",
"def is_resource_enabled(resource):\n return use_resources is not None and resource in use_resources",
"def __contains__(self, resource):\r\n return resource in self.__resources or any(resource in x for x in self._ResourceManager__m2m)",
"def has_dag_field(self, field) -> bool:\n if not self._dag:\n return False\n return field in self._dag",
"def is_reserved_resource(self, work_dir: str, resource: str) -> bool:\n resource_dir = resource.split(\"/\")[0] if \"/\" in resource else resource\n if resource.startswith(\".resumables-\") and resource.endswith(\".db\"):\n logging.error(f\"resumable dbs not accessible {resource}\")\n return True\n elif re.match(r\"(.+)\\.([a-f\\d0-9-]{32,36})$\", resource):\n logging.error(\"merged resumable files not accessible\")\n return True\n elif re.match(r\"(.+).([a-f\\d0-9-]{32,36}).part$\", resource):\n logging.error(\"partial upload files not accessible\")\n return True\n elif VALID_UUID.match(resource_dir):\n potential_target = os.path.normpath(f\"{work_dir}/{resource_dir}\")\n if os.path.lexists(potential_target) and os.path.isdir(potential_target):\n content = os.listdir(potential_target)\n for entry in content:\n if re.match(r\"(.+).chunk.[0-9]+$\", entry):\n logging.error(f\"resumable directories not accessible {entry}\")\n return True\n return False",
"def can_group(self) -> bool:\n return (\n self.all_icon is not None and\n self.all_name is not None\n )",
"def is_resource_node(self):\n return self.camera is not None or self.mesh is not None",
"def check_dag_exists(dag_id, conf, dialect) -> bool:\n engine = db_utils.get_engine(conf, dialect)\n with engine.connect() as connection:\n result = connection.execute('''select * from dag \n where dag_id = '{}'\n '''.format(dag_id))\n found = [dict(r) for r in result.fetchall()]\n return found != []",
"def check_dag_acyclic(self, start, inputs):\n for binding in inputs.values():\n if start == binding.io_owner:\n return False\n for p in binding.parents:\n if not self.check_dag_acyclic(start, p.io_owner.input_bindings.bindings):\n return False\n\n return True",
"def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False",
"def check_owners(self, request, **resources):\r\n\r\n if self._meta.allow_public_access or not self._meta.parent:\r\n return True\r\n\r\n self.parent.check_owners(request, **resources)\r\n\r\n objects = resources.get(self._meta.name)\r\n if self._meta.model and self._meta.parent._meta.model and objects:\r\n pr = resources.get(self._meta.parent._meta.name)\r\n check = all(\r\n pr.pk == getattr(\r\n o, \"%s_id\" % self._meta.parent._meta.name, None)\r\n for o in as_tuple(objects))\r\n\r\n if not pr or not check:\r\n # 403 Error if there is error in parent-children relationship\r\n raise HttpError(\r\n \"Access forbidden.\", status=status.HTTP_403_FORBIDDEN)\r\n\r\n return True",
"def can_delete_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)",
"def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True",
"def is_resource(space, w_obj):\n return space.wrap(space.is_resource(w_obj))",
"def has_resources(self) -> Optional[bool]:\n return pulumi.get(self, \"has_resources\")",
"def op_in_graph(self, op):\n # pylint: disable=protected-access\n if op._graph == self:\n return True\n # pylint: enable=protected-access\n if self._parent_graph:\n return self._parent_graph.op_in_graph(op)\n return False",
"def has_resources(self):\r\n return hasattr(self, 'resources') and self.resources",
"def check_dag(self):\n self.create_dag()\n\n self.check_connected_components()\n\n self.check_for_cycles()",
"def if_graph_adheres(g, allowed_extensions=set()):\n allowed_extensions = set(allowed_extensions)\n if 'v-structure' not in allowed_extensions and 'v-structure' in {e.get('type', \"direct\") for e in g.get('edgeSet', [])}:\n return False\n if 'temporal' not in allowed_extensions and graph_has_temporal(g):\n return False\n if 'hopUp' not in allowed_extensions and any('hopUp' in e for e in g.get('edgeSet', [])):\n return False\n if 'hopDown' not in allowed_extensions and any('hopDown' in e for e in g.get('edgeSet', [])):\n return False\n if 'qualifier_rel' not in allowed_extensions and any(e.get('kbID', \"\").endswith('q') for e in g.get('edgeSet', [])):\n return False\n if 'multi_rel' not in allowed_extensions and len(g.get('edgeSet', [])) > 1:\n return False\n if 'filter' not in allowed_extensions and 'filter' in g:\n return False\n if 'iclass' not in allowed_extensions and any(edge.get(\"type\") == 'iclass' for edge in g.get('edgeSet', [])):\n return False\n return True",
"def can_edit_dag(self, dag_id: str, user=None) -> bool:\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)",
"def resource_rules(self) -> Optional[Sequence['outputs.NamedRuleWithOperations']]:\n return pulumi.get(self, \"resource_rules\")",
"def can_access_all_datasources(self) -> bool:\n\n return self.can_access(\"all_datasource_access\", \"all_datasource_access\")",
"def has_groups(self, resolvables, all=True):\n total_checks = 0\n\n for group in resolvables:\n if self.has_group(group):\n total_checks += 1\n\n if not all:\n return True\n\n return True if all and total_checks == len(resolvables) else False",
"def is_acyclic(graph):\n visited = []\n dfs_seq = DFSIterator(graph)\n\n for node in dfs_seq:\n visited.insert(0, node)\n node_neighbors = graph.get_neighbors(node)\n \n for neighbor in node_neighbors:\n if neighbor in visited:\n return False\n\n return True"
]
| [
"0.6148349",
"0.6011392",
"0.57166684",
"0.5582532",
"0.54927164",
"0.546736",
"0.5406616",
"0.5362055",
"0.53611654",
"0.52332544",
"0.522739",
"0.51649857",
"0.5148707",
"0.5101213",
"0.5099694",
"0.5078531",
"0.5074959",
"0.5069634",
"0.5061898",
"0.5059261",
"0.50497496",
"0.5033697",
"0.49938697",
"0.49755913",
"0.49641523",
"0.4941708",
"0.49121463",
"0.48819757",
"0.48452514",
"0.48119092"
]
| 0.73183984 | 0 |
Verify whether a given user could perform a certain action on the given resource. Example actions might include can_read, can_write, can_delete, etc. | def has_access(self, action_name: str, resource_name: str, user=None) -> bool:
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can(user, action):\n\n v = bitvector.BitVector(user.access_level)\n return v.is_set(EVERYTHING) or v.is_set(action)",
"def check_permission(user, action_name, app_label, model_name):\r\n p = '%s.%s_%s' % (app_label, action_name, model_name)\r\n return user and user.is_active and user.has_perm(p)",
"def check_action_permissions(self, request, action, obj=None):\n if action is None:\n self.permission_denied(request)\n\n for permission in self.get_permissions():\n if not permission.has_action_permission(request, self, action, obj):\n self.permission_denied(request)",
"def cant(user, action):\n\n return not can(user, action)",
"def view(self, user, action, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n # TODO check groups in request maybe ? dunno\n if user.is_advisor:\n return True\n\n return self.admin_permission(user, action, *args)",
"def is_allowed_to_do(cls, db_tuple, action, target, actor, should_raise_insufficent_priv_ex=True):\n action_check_fn = cls.get_action_check_fn(action)\n \n if action_check_fn is None:\n raise cls.UnrecognizedActionException('unrecognized action: %s' % action)\n \n # i do what i want!\n if actor.metaspace_privileges.has_privilege(MetaspacePrivilegeSet.SUPER):\n return True\n \n can_do_action = action_check_fn(db_tuple, target, actor)\n if should_raise_insufficent_priv_ex and not can_do_action:\n raise cls.InsufficientPrivilegesException('%s (user_id=%i) is not allowed to perform %s' % (actor.email_addr, actor.user_id, action))\n else:\n return can_do_action",
"def action_allowed_for(user, permission):\n if user is None or not user.is_authenticated:\n return False\n\n assert permission in amo.permissions.PERMISSIONS_LIST # constants only.\n return any(\n match_rules(group.rules, permission.app, permission.action)\n for group in user.groups_list\n )",
"def check_permission(self, action, username, resource, perm):\n if not resource:\n return\n if resource.realm == 'blog' and resource.id:\n the_post = BlogPost(self.env, resource.id, resource.version)\n for category in the_post.category_list:\n if category in self.draft and the_post.author != username:\n # Block all access regardless\n return False",
"def check_rights(self, resources, request=None):\r\n if not self.auth:\r\n return True\r\n\r\n try:\r\n if not self.auth.test_rights(resources, request=request):\r\n raise AssertionError()\r\n\r\n except AssertionError, e:\r\n raise HttpError(\r\n \"Access forbiden. {0}\".format(e),\r\n status=status.HTTP_403_FORBIDDEN\r\n )",
"def is_allowed(self, role, operation, resource):\r\n assert not role or role in self._roles\r\n assert not resource or resource in self._resources\r\n\r\n roles = set(get_family(self._roles, role))\r\n operations = set([None, operation])\r\n resources = set(get_family(self._resources, resource))\r\n\r\n is_allowed = None\r\n default_assertion = lambda *args: True\r\n\r\n for permission in itertools.product(roles, operations, resources):\r\n if permission in self._denied:\r\n assertion = self._denied[permission] or default_assertion\r\n if assertion(self, role, operation, resource):\r\n return False # denied by rule immediately\r\n\r\n if permission in self._allowed:\r\n assertion = self._allowed[permission] or default_assertion\r\n if assertion(self, role, operation, resource):\r\n is_allowed = True # allowed by rule\r\n\r\n return is_allowed",
"def access(self, user, can=\"read\"):\n if can == \"read\":\n permission = self.can_read\n elif can == \"write\":\n permission = self.can_write\n else:\n raise ValueError(\"Invalid access: {}\".format(can))\n\n # However, the settings for `WIKI_ALLOW_*` takes precedence\n permission = getattr(settings, f\"WIKI_CAN_{can.upper()}\", permission)\n permission = permission.lower()\n if user is None or not user.is_authenticated:\n perms_object = [\"anonymous\"]\n else:\n perms_object = user.permissions.all()\n\n if permission in perms_object:\n # simplest case - we have direct match\n return True\n\n if permission in _PERMISSION_HIERARCHY:\n # check if we have a higher hierarchy position\n hpos_target = _PERMISSION_HIERARCHY.index(permission)\n return any(1 for hpos, hperm in enumerate(_PERMISSION_HIERARCHY)\n if hperm in perms_object and hpos_target < hpos)\n\n return False",
"def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")",
"async def permits(self, identity, permission, context=None):\r\n return self.authorized_userid(identity) is not None",
"def is_any_allowed(self, roles, operation, resource):\r\n is_allowed = None # there is not matching rules\r\n for role in roles:\r\n is_current_allowed = self.is_allowed(role, operation, resource)\r\n if is_current_allowed is False:\r\n return False # denied by rule\r\n elif is_current_allowed is True:\r\n is_allowed = True\r\n return is_allowed",
"def authorize(self, action, author_id=None):\n if action not in CHANGE_TYPES:\n return False\n return True",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n if self.kind == \"persona_profile\":\n p = Persona.request_persona(self.author_id)\n return p.id == author_id\n elif self.kind == \"group_profile\":\n # Everyone can update\n if action == \"update\":\n return True\n # Only author can insert and delete\n elif self.author_id == author_id:\n return True\n\n elif self.kind == \"index\":\n p = Persona.query.filter(Persona.index_id == self.id)\n return p.id == author_id\n return False",
"def user_has_permission(self, id: int, user: User) -> bool:\n return self.get_queryset().filter(pk=id).filter_for_user(user).exists()",
"def test_has_permission(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n req.user = Mock()\n req.user.has_perm.return_value = True\n\n res = self.view(\n req,\n decorator=self.actions(\n self.mock_model, [\"doit\"], permission=\"do_things\")\n )\n\n self.assertEqual(res.status_code, 302)\n req.user.has_perm.assert_called_with(\"do_things\")",
"def authorize(self, action, author_id=None):\n return False",
"def permits(identity, obj, permission):\n return False",
"def user_roles_check(request):\n logger.debug('right_user_check')\n options = {\n 'api_file': {'GET': True, 'POST': False}\n }\n url_name = request.request.resolver_match.url_name\n if not request.request.user.is_authenticated:\n return False\n user_have_right = options[url_name][request.request.method]\n if user_have_right:\n return True\n raise PermissionDenied",
"def check_access(node, user, action, key=None):\n permission = permission_map.get(action, None)\n if permission is None:\n raise HTTPError(httplib.BAD_REQUEST)\n if node.has_permission(user, permission):\n return True\n if permission == 'read':\n if node.is_public or key in node.private_link_keys_active:\n return True\n code = httplib.FORBIDDEN if user else httplib.UNAUTHORIZED\n raise HTTPError(code)",
"def privilege_check(user, *required_privileges):\n for perm in required_privileges:\n if user.has_property(perm):\n return True\n return False",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return self.admin_id == author_id\n return False",
"def test_user_can_read(app, resource):\n with app.user():\n # Read resource\n app.client.get('/' + resource, assert_status=200)\n\n # Create fake item and read item\n _id = app.data.driver.db[resource].insert({})\n app.client.get('/%s/%s' % (resource, _id),\n assert_status=200)",
"def can_act(self, **kwargs):\n return True",
"def authorize(self, action, author_id=None):\n if Identity.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False",
"def applies_to(self, resource):\n user = resource.request.authenticated_user\n if user and user.is_admin:\n return self.applies_to_admin\n\n # Filter applies to all non admin users by default\n return True",
"def check_any(self, resource, permission, roles=None):\n if roles is None:\n roles = self._determine_roles()\n return self.acl.check_any(roles, resource, permission)",
"def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public"
]
| [
"0.69866955",
"0.6912086",
"0.6754024",
"0.67154",
"0.67023987",
"0.6687463",
"0.6617492",
"0.6611587",
"0.660988",
"0.65471673",
"0.6348825",
"0.63040656",
"0.6253602",
"0.625236",
"0.6251293",
"0.6249416",
"0.62281436",
"0.6177559",
"0.61749506",
"0.61509055",
"0.61430466",
"0.6137432",
"0.6130688",
"0.6126094",
"0.61189127",
"0.6094111",
"0.6090615",
"0.60840815",
"0.6071204",
"0.60649943"
]
| 0.6915247 | 1 |
FAB leaves faulty permissions that need to be cleaned up. | def clean_perms(self) -> None:
self.log.debug("Cleaning faulty perms")
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_perms(self) -> None:\n\n logger.info(\"Cleaning faulty perms\")\n sesh = self.get_session\n pvms = sesh.query(PermissionView).filter(\n or_(\n PermissionView.permission # pylint: disable=singleton-comparison\n == None,\n PermissionView.view_menu # pylint: disable=singleton-comparison\n == None,\n )\n )\n deleted_count = pvms.delete()\n sesh.commit()\n if deleted_count:\n logger.info(\"Deleted %i faulty permissions\", deleted_count)",
"def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')",
"def reset_permissions(self):\n self.permissions = 0",
"def octopus_permissions_clear(self, msg, args):\r\n return self.permissions.clear_permissions()",
"def remove_permissions(self):\n self._activate()\n self.configure(state=\"disabled\")",
"def DeniedPermissions(self) -> _n_6_t_0:",
"def test_permission_remove_all_actions_for_user(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous *')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def cleanup_docrules_permissions():\n content_type, created = ContentType.objects.get_or_create(app_label='rule', model='', name='document type')\n permissions = Permission.objects.filter(content_type=content_type)\n for p in permissions:\n p.delete()\n #print 'Deleted all permissions for each DocumentTypeRule()'",
"def remove_permissions(apps, schema_editor):\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n Group = apps.get_model(\"auth\", \"Group\")\n\n permission = Permission.objects.get(\n codename=\"can_approve_estimated_completion_date\",\n )\n\n admin_group = Group.objects.get(name=\"Administrator\")\n admin_group.permissions.remove(permission)\n permission.delete()\n\n print(\n 'Permission \"can_approve_estimated_completion_date\" removed from the \"Admin\" group.'\n )",
"def test_remove_facility_pt4(self):\n with self.assertRaises(InvalidPermission):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility', obj=[]))",
"def test_permission_remove_action_for_all_users(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission add anonymous TICKET_CREATE')\n self._execute('permission remove * TICKET_CREATE')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def has_remove_permissions(self, obj):\n return True",
"def reset_permissions(user_uuid_hexed):\n key = f\"perms_{user_uuid_hexed}\"\n return cache.delete(key)",
"def test_permission_remove_multiple_actions_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous WIKI_CREATE WIKI_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))",
"def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass",
"def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)",
"def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)",
"def pre_access_control_list_delete(self, resource_id):\n pass",
"def test_remove_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.remove_facility'))",
"def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))",
"def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def delPermission(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deletePermission(request[\"admin_username\"],request[\"perm_name\"])",
"def cleanup(self):\r\n # XXX should be fixed properly!!!\r\n try:\r\n self.unlock()\r\n except:\r\n pass",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','bigfirms'):\n abort(403)",
"def clean_method(self):\n stack_id = self.stack_name\n stacks = self.tc.user.cloudformation.describe_stacks(stack_id)\n if stacks:\n for stack in stacks:\n if (\n self.on_failure == 'DELETE' or\n self.on_failure == 'ROLLBACK'\n ):\n self.log.debug(\"Deleting the following stack: \" +\n str(stack.stack_name))\n try:\n self.tc.user.cloudformation.delete_stack(\n stack.stack_name)\n except BotoServerError as e:\n self.log.error(\"Failed to delete stack\")\n raise e\n else:\n self.log.debug(\"Stack and resources not deleted.\")\n pass",
"def clean(_context):",
"def remove_stuff_post_error(self):\n os.system('rm %s' % self.destination)",
"async def clean(self, ctx):\n pass",
"def cleanup(self, *args, **kwargs):"
]
| [
"0.6777668",
"0.6655451",
"0.6426778",
"0.63922745",
"0.6263131",
"0.6173565",
"0.60162455",
"0.5943351",
"0.592031",
"0.58951074",
"0.58124304",
"0.5756105",
"0.574823",
"0.573477",
"0.57173944",
"0.57100266",
"0.57020974",
"0.57020974",
"0.5701144",
"0.5669114",
"0.5663117",
"0.5663038",
"0.565804",
"0.565345",
"0.5645821",
"0.56408393",
"0.56261605",
"0.5569057",
"0.5543507",
"0.54970104"
]
| 0.71627325 | 0 |
Add the new (action, resource) to assoc_permission_role if it doesn't exist. It will add the related entry to ab_permission and ab_resource two meta tables as well. | def _merge_perm(self, action_name: str, resource_name: str) -> None:
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(
select(self.permission_model).filter_by(action=action, resource=resource).limit(1)
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addPermission(self, permission=None, permName=None, kvDict=None):\n return _modelActionBase(self, instance=permission, instanceName=permName, kvDict=kvDict,\n model=get_model('perm'), db=db, action='add', modelType='permission')",
"def addPermission(self, permission=None, permName=None, kvDict=None):\n return _modelActionBase(self, instance=permission, instanceName=permName, kvDict=kvDict,\n model=get_model('perm'), db=db, action='add', modelType='permission')",
"def add_permission(self, label, aws_account_id, action_name):\r\n return self.connection.add_permission(self, label, aws_account_id, action_name)",
"def create_permission(self, action_name, resource_name) -> Permission | None:\n if not (action_name and resource_name):\n return None\n perm = self.get_permission(action_name, resource_name)\n if perm:\n return perm\n resource = self.create_resource(resource_name)\n action = self.create_action(action_name)\n perm = self.permission_model()\n perm.resource_id, perm.action_id = resource.id, action.id\n try:\n self.get_session.add(perm)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_ADD_PERMVIEW.format(perm))\n return perm\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_ADD_PERMVIEW.format(e))\n self.get_session.rollback()\n return None",
"def add_permission_to_role(self, role: Role, permission: Permission | None) -> None:\n if permission and permission not in role.permissions:\n try:\n role.permissions.append(permission)\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_ADD_PERMROLE.format(permission, role.name))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_ADD_PERMROLE.format(e))\n self.get_session.rollback()",
"def add_to_resource_policy(self, permission: aws_cdk.aws_iam.PolicyStatement) -> None:\n ...",
"def allow(self, role, operation, resource, assertion=None):\r\n assert not role or role in self._roles\r\n assert not resource or resource in self._resources\r\n self._allowed[role, operation, resource] = assertion",
"def add_permission(self, perm):\n if not self.has_permission(perm):\n self.permissions += perm",
"def test_permission_add_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission add anonymous WIKI_CREATE '\n 'WIKI_VIEW WIKI_MODIFY')\n self.assertEqual(0, rv)\n rv, output2 = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output + output2)",
"def add_permission(self, queue, label, aws_account_id, action_name):\r\n params = {'Label': label,\r\n 'AWSAccountId' : aws_account_id,\r\n 'ActionName' : action_name}\r\n return self.get_status('AddPermission', params, queue.id)",
"def create_permission(permission, event):\n setDefaultRoles(permission.title, ('Manager',))",
"def _add(self, name, permissions):\n data = {\"name\": name, \"permissions\": permissions}\n path = self.router.roles\n return self.request(method=\"put\", path=path, json=data)",
"def _categorize_resource(self, resource: Resource, required_permissions: str) -> None:\n if resource.is_user_provided:\n self.resources_reused.append({\"arn\": resource.arn, \"required_permissions\": required_permissions})\n else:\n self.resources_created.append({\"arn\": resource.arn})",
"def add_to_resource_policy(self, permission: aws_cdk.aws_iam.PolicyStatement) -> None:\n return jsii.invoke(self, \"addToResourcePolicy\", [permission])",
"def add_to_resource_policy(self, permission: aws_cdk.aws_iam.PolicyStatement) -> None:\n return jsii.invoke(self, \"addToResourcePolicy\", [permission])",
"def addPermissionIfMissing(session,perm,desc):\n try:\n db.getPerm(session,perm)\n return False\n except NoResultFound:\n session.add(db.makePermission(perm,desc))\n acserver.log(\"Authentication: Adding permission %s\"%perm)\n session.commit()\n return True",
"def create_permission(db_manager, permission_action, forum_id=0, thread_id=0):\n permission_id = db_manager.insert_row(\"auth_permission\", {\n \"action\": permission_action,\n \"forum_id\": forum_id,\n \"thread_id\": thread_id,\n })\n\n if permission_id:\n return Permission(db_manager, permission_id, permission_action, forum_id, thread_id)\n return None",
"def add_permission(self, permission: str):\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])",
"def add_permission(self, permission):\n self._permissions.add(permission)",
"def post_access_control_list_create(self, resource_dict):\n pass",
"def on_permission_after_insert(\n self, mapper: Mapper, connection: Connection, target: Permission\n ) -> None:",
"def has_add_permission(self, request, obj=None):\n return False",
"def has_add_permission(self, request, obj=None):\n return False",
"def add_permission(self, topic, label, account_ids, actions):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic,\r\n 'Label' : label}\r\n self.build_list_params(params, account_ids, 'AWSAccountId')\r\n self.build_list_params(params, actions, 'ActionName')\r\n response = self.make_request('AddPermission', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()",
"def test_add_permission_with_any(self):\n permission_logic = self.permission_logic_class(\n any_permission=True\n )\n add_permission_logic(Article, permission_logic)\n self._auto_test_permission('add')\n remove_permission_logic(Article, permission_logic)",
"def has_add_permission(self, request, instance=None):\n return False",
"def has_add_permission(self, request):\r\n return False",
"def has_add_permission(self, request):\r\n return False",
"def add_acl_rule_to_acl(self, acl_name=None, rule_id='', action=None, conditions=None):\n pass"
]
| [
"0.5865254",
"0.5865254",
"0.5848337",
"0.5837557",
"0.58164537",
"0.57867205",
"0.57089216",
"0.5665712",
"0.5634836",
"0.55668044",
"0.550847",
"0.54680765",
"0.5388977",
"0.5367029",
"0.5367029",
"0.5362631",
"0.53599876",
"0.53311914",
"0.5264665",
"0.5261038",
"0.52266824",
"0.5223511",
"0.5223511",
"0.51994014",
"0.51558346",
"0.5150171",
"0.51410985",
"0.51389396",
"0.51389396",
"0.5103227"
]
| 0.62584925 | 0 |
Add Website.can_read access to all custom roles. | def add_homepage_access_to_custom_roles(self) -> None:
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_granted_roles(self):",
"def addRoleAccess(self, role, read, write, catalog='*', repository='*'):\n return self._client.addRoleAccess(role, read, write, catalog, repository)",
"def is_permitted(self):\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [\n\t\t\td.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"], filters={\"parent\": self.name})\n\t\t]\n\n\t\tcustom_roles = get_custom_allowed_roles(\"page\", self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True",
"def users_include_access_roles(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def set_user_roles(request):\n\n # defaults\n permissions = {\n \"can_view_manager\": False,\n \"can_view_operator\": False\n }\n\n if request.user.is_anonymous or request.user.userprofile.user_type == UserType.CUSTOMER:\n return permissions\n\n user_profile = request.user.userprofile\n if user_profile.user_type == UserType.OPERATOR:\n permissions['can_view_operator'] = True\n return permissions\n if user_profile.user_type == UserType.MANAGER:\n permissions['can_view_manager'] = True\n permissions['can_view_operator'] = True\n return permissions",
"def test_ipam_roles_read(self):\n pass",
"async def roles(self, ctx):\n\n pass",
"def create_custom_permissions(self) -> None:\n self.add_permission_view_menu(\"all_datasource_access\", \"all_datasource_access\")\n self.add_permission_view_menu(\"all_database_access\", \"all_database_access\")\n self.add_permission_view_menu(\"all_query_access\", \"all_query_access\")\n self.add_permission_view_menu(\"can_share_dashboard\", \"Superset\")\n self.add_permission_view_menu(\"can_share_chart\", \"Superset\")",
"def getRoles(self):",
"def get_can_read(self):\n\t\tif not self.can_read:\n\t\t\tself.build_permissions()\n\t\treturn self.can_read",
"def has_read_permission(request):\n return request.user.is_authenticated",
"def can_manage_roles(context):\n channel = context.channel\n member = context.message.author\n return dict(channel.permissions_for(member))['manage_roles']",
"def allowed_roles(allowed_permission=[]):\n\n def decorator(view_func):\n def wrapper_func(request, *args, **kwargs):\n all_perms = request.user.get_all_permissions()\n for perm in allowed_permission:\n if perm in all_perms or request.user.is_admin:\n return view_func(request, *args, **kwargs)\n else:\n messages.error(request, 'You do not have enough permissions to perform this operations')\n return redirect('/')\n\n return wrapper_func\n\n return decorator",
"def manageableRoles(self):\n return roleinfo.AUTHOR_ROLES",
"def test_ModifyPortalContent_permission_roles(self):\n roles_of_permission = self.get_roles_of_permission('Modify portal content')\n self.assertTrue(len(roles_of_permission) == 2)\n self.assertTrue('Parcelling Manager' in roles_of_permission)\n self.assertTrue('Manager' in roles_of_permission)",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"def get_roles():\r\n global _roles\r\n return _roles",
"def generate_is_role_functions(cls, roles):\n for access_role in roles.keys():\n setattr(cls, \"is_\" + access_role, lambda x: False)",
"def test_list_roles(self):\n pass",
"def test_manage_user_roles__manage_all(self, appbuilder, role, user):\n sm = appbuilder.sm\n\n user.roles.append(role('Other'))\n user.roles.append(role('Viewer'))\n sm.manage_user_roles(user, ['Admin', 'User'])\n\n assert {r.name for r in user.roles} == {'Admin', 'User'}",
"def read_allowed(self, ui, req):\n\n user = req.env.get('REMOTE_USER')\n\n deny_read = ui.configlist('web', 'deny_read', untrusted=True)\n if deny_read and (not user or ismember(ui, user, deny_read)):\n return False\n\n allow_read = ui.configlist('web', 'allow_read', untrusted=True)\n # by default, allow reading if no allow_read option has been set\n if (not allow_read) or ismember(ui, user, allow_read):\n return True\n\n return False",
"async def rollme_allow(self, ctx, *roles: typing.Union[discord.Role]):\n if not roles:\n raise Tools.NoRolesGiven\n\n await ctx.bot.add_self_roles(ctx, *roles)\n\n await ctx.send(embed = await Macro.send(\n f\"Allowed the self roles {', '.join([str(role) for role in roles])}\"\n ))",
"def test_AccessContentsInformation_permission_roles(self):\n roles_of_permission = self.get_roles_of_permission('Access contents information')\n self.assertTrue(len(roles_of_permission) == 3)\n self.assertTrue('Parcelling Manager' in roles_of_permission)\n self.assertTrue('Parcelling Reader' in roles_of_permission)\n self.assertTrue('Manager' in roles_of_permission)",
"def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})",
"def test03_perm_roles(self):\n print_ln('test16_perm_roles')\n \n try:\n pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*'))\n for perm in pList: \n print_ln(\"Role Perm obj name=\" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id)\n rList = review.perm_roles(perm)\n for role in rList:\n print_ln(\"Assigned role=\" + role, 1)\n except Exception as e:\n self.fail('test16_perm_roles failed, exception=' + e.msg)",
"def create_basic_roles(script):\n roles = script.do(Roles.GetAll())\n roles = [a['name'] for a in roles]\n\n 'developers' in roles or script.do(Roles.Create('developers'))\n 'supervisors' in roles or script.do(Roles.Create('supervisors'))\n 'readers' in roles or script.do(Roles.Create('readers'))",
"def listRoleAccess(self, role):\n return self._client.listRoleAccess(role)",
"def users_include_roles(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"async def get_permissions_for_role(self, role):\n raise NotImplementedError",
"def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()"
]
| [
"0.63643557",
"0.62228906",
"0.6118731",
"0.60232246",
"0.6011454",
"0.597188",
"0.5928835",
"0.59201527",
"0.5918606",
"0.588256",
"0.580424",
"0.57854474",
"0.5676077",
"0.56598353",
"0.5642836",
"0.563202",
"0.55289304",
"0.55146646",
"0.5463989",
"0.5431027",
"0.53917587",
"0.53672993",
"0.5364955",
"0.53535974",
"0.53479683",
"0.53339356",
"0.5332107",
"0.53314084",
"0.53237534",
"0.5311962"
]
| 0.7238645 | 0 |
Returns all permissions as a set of tuples with the action and resource names. | def get_all_permissions(self) -> set[tuple[str, str]]:
return set(
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name)
.join(self.permission_model.action)
.join(self.permission_model.resource)
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def permission_resources(self):\n return self._permission_resources",
"def permission_resources(self):\n return self._permission_resources",
"def collect_all_perms(cls):\n permissions = filter(lambda perm: perm.startswith('biom_perm') or perm.startswith('entity_perm'), dir(cls))\n\n result = [{\n 'perm_name': perm,\n 'description': getattr(cls, perm).__doc__,\n 'perm_type': getattr(cls, perm).action_type if hasattr(getattr(cls, perm), 'action_type') else None,\n 'default_value': getattr(cls, perm).default_value if hasattr(getattr(cls, perm), 'default_value') else None,\n\n } for perm in permissions]\n return result",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n if self.action in ['signup', 'login', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['retrieve', 'update', 'partial_update', 'destroy', 'u', 'p']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action in ['create', 'retrieve', 'react', 'reactions']:\n permissions = [IsAuthenticated, IsFriendPostOwner]\n elif self.action in ['update', 'partial_update']:\n permissions = [IsAuthenticated, IsCommentOwner]\n elif self.action in ['destroy']:\n permissions = [IsAuthenticated, IsCommentOrPostOwner]\n else:\n permissions = [IsAuthenticated]\n return[p() for p in permissions]",
"def permissions(self):\n return list(self._permissions)",
"def permissions(self):\n perms = set()\n for g in self.groups:\n perms = perms | set(g.permissions)\n return perms",
"def get_permissions(self):\n permission_classes = {\"create\": [CanUploadReport]}.get(self.action)\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def list_permissions(self):\n # type: () -> List[Permission]\n headers = Headers({\"accept\": \"application/json\"})\n return self.connection.api_call(\n \"GET\", [\"resources\", self.id, \"permissions\"], model=Permission, headers=headers,\n )",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [AdminPermission.__or__(ReviewerPermission)]\n elif self.action == 'retrieve':\n permission_classes = [\n AdminPermission.__or__(\n ReviewerPermission.__or__(UserPermission)\n )\n ]\n elif self.action in ['update', 'partial_update']:\n permission_classes = [AdminPermission.__or__(UserPermission)]\n else:\n permission_classes = [AdminPermission]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [AdminPermission.__or__(ReviewerPermission)]\n elif self.action == 'retrieve':\n permission_classes = [\n AdminPermission.__or__(\n ReviewerPermission.__or__(UserPermission)\n )\n ]\n elif self.action in ['update', 'partial_update']:\n permission_classes = [AdminPermission.__or__(UserPermission)]\n else:\n permission_classes = [AdminPermission]\n return [permission() for permission in permission_classes]",
"def get_all_permissions(self, obj=None):",
"def get_permissions(self):\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n if self.action in ['retrieve', 'list']:\n self.permission_classes = [permissions.ViewUserPermission,]\n elif self.action in ['update', 'partial_update']:\n self.permission_classes = [permissions.UpdateUserPermission]\n elif self.action in ['destroy']:\n self.permission_classes = [permissions.UpdateUserPermission]\n\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n permissions = [IsAuthenticated]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action in ['list', 'create']:\n permission_classes = [IsStaffOrReadOnly]\n else:\n permission_classes = [IsAuthorOrReadOnly, IsStaffOrReadOnly]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticatedOrReadOnly]\n if self.action == 'create':\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAdminUser | IsAuthenticated| IsAdminOrIsSelf]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def permissions(self):\n return self.get_permissions()",
"def get_permissions(self):\n if self.action in [\"update\", \"partial_update\", \"destroy\"]:\n permission_classes = [IsAdminOrOwner]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == \"destroy\":\n permission_classes = [IsAuthenticated, IsAuthor]\n elif self.action in [\"list\", \"create\"]:\n permission_classes = [IsAuthenticated, IsContributorOrAuthor]\n else:\n permission_classes = [NotAllowed]\n\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n \n if self.action in ['signup', 'login', 'verify']:\n permissions =[AllowAny]\n # cualquiera que vaya a acceder a estas peticiones lo podra hacer\n # si la accion es de tipo retrieve se debe validar el permiso de acceso\n elif self.action in ['retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n # si no hay ninguna opcion debe tener una sesion autenticada \n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action == 'destroy' or self.action == 'partial_update':\n permission_classes = [\n permissions.IsOwner,\n IsAuthenticated,\n ]\n else:\n permission_classes = [\n permissions.IsAdminOrReadOnly,\n IsAuthenticated,\n ]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action =='retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]"
]
| [
"0.7453826",
"0.7453826",
"0.7320338",
"0.7297786",
"0.7254982",
"0.7202756",
"0.7202756",
"0.71981245",
"0.7105019",
"0.7104533",
"0.70924604",
"0.70783067",
"0.70722604",
"0.7064907",
"0.7051972",
"0.7051972",
"0.7036866",
"0.70281744",
"0.7020507",
"0.7017964",
"0.7006028",
"0.69935715",
"0.6982356",
"0.6979132",
"0.69589126",
"0.69468",
"0.69277287",
"0.69128186",
"0.68908083",
"0.68816906"
]
| 0.8255731 | 0 |
Get permissions except those that are for specific DAGs. Returns a dict with a key of (action_name, resource_name) and value of permission with all permissions except those that are for specific DAGs. | def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name, self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.where(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_dag_specific_permissions(self) -> None:\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)",
"def get_permissions(self, exclude=None):\n\n exclude = exclude or []\n exclude.extend(['id', 'name', 'description'])\n\n perms = {}\n groups = self.secondary_groups.all()\n groups.append(self.primary_group)\n for group in groups:\n for c in group.__table__.columns:\n # try if the permission already exists in the dictionary\n # and if the permission is true, set it to True\n try:\n if not perms[c.name] and getattr(group, c.name):\n perms[c.name] = True\n\n # if the permission doesn't exist in the dictionary\n # add it to the dictionary\n except KeyError:\n # if the permission is in the exclude list,\n # skip to the next permission\n if c.name in exclude:\n continue\n perms[c.name] = getattr(group, c.name)\n return perms",
"def get_permissions(self):\n if self.action in ['create', 'retrieve', 'react', 'reactions']:\n permissions = [IsAuthenticated, IsFriendPostOwner]\n elif self.action in ['update', 'partial_update']:\n permissions = [IsAuthenticated, IsCommentOwner]\n elif self.action in ['destroy']:\n permissions = [IsAuthenticated, IsCommentOrPostOwner]\n else:\n permissions = [IsAuthenticated]\n return[p() for p in permissions]",
"def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]",
"def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)",
"def get_permissions(self, exclude=None):\n exclude = exclude or []\n exclude.extend(['id', 'name', 'description'])\n\n perms = {}\n # Get the Guest group\n group = Group.query.filter_by(guest=True).first()\n for c in group.__table__.columns:\n if c.name in exclude:\n continue\n perms[c.name] = getattr(group, c.name)\n return perms",
"def get_permissions(self):\n if self.action in [\"list\"]:\n permission_classes = [permissions.UserOrPlaylistIsAuthenticated]\n elif self.action in [\"create\", \"set_display_name\", \"push_attendance\"]:\n permission_classes = [\n permissions.PlaylistIsAuthenticated\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.BaseIsParamsVideoRoleThroughPlaylist\n ]\n elif self.action in [\n \"partial_update\",\n \"retrieve\",\n ]:\n permission_classes = [\n permissions.IsTokenPlaylistRouteObjectRelatedVideo\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.BaseIsParamsVideoRoleThroughPlaylist\n ]\n elif self.action in [\"list_attendances\"]:\n permission_classes = [\n permissions.IsTokenInstructor\n | permissions.IsTokenAdmin\n # With standalone site, admin can access\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.IsParamsVideoAdminOrInstructorThroughPlaylist\n ]\n elif self.action is None:\n if self.request.method not in self.allowed_methods:\n raise MethodNotAllowed(self.request.method)\n permission_classes = self.permission_classes\n else:\n # When here it means we forgot to define a permission for a new action\n # We enforce the permission definition in this method to have a clearer view\n raise NotImplementedError(f\"Action '{self.action}' is not implemented.\")\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action == 'destroy' or self.action == 'partial_update':\n permission_classes = [\n permissions.IsOwner,\n IsAuthenticated,\n ]\n else:\n permission_classes = [\n permissions.IsAdminOrReadOnly,\n IsAuthenticated,\n ]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in [\"update\", \"partial_update\", \"destroy\"]:\n permission_classes = [IsAdminOrOwner]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in ['signup', 'login', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['retrieve', 'update', 'partial_update', 'destroy', 'u', 'p']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]",
"def clean_permissions(self):\n permissions = self.cleaned_data['permissions']\n return [permission for permission in permissions\n if not (permission.startswith('delete_') or permission.startswith('add_'))]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticatedOrReadOnly]\n if self.action == 'create':\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAdminUser | IsAuthenticated| IsAdminOrIsSelf]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action == 'update' and self.action == 'delete':\n permission_classes = [IsBlackListedToken, IsValidGroupUser]\n else:\n permission_classes = [IsBlackListedToken, ]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == 'update' and self.action == 'delete':\n permission_classes = [IsBlackListedToken, IsValidGroupUser]\n else:\n permission_classes = [IsBlackListedToken, ]\n return [permission() for permission in permission_classes]",
"def permission_list(**kwargs):\n print(AppPermissionSchema(many=True).dumps(\n get_protected_routes(ignored_methods=[\"HEAD\", \"OPTIONS\"]), indent=4))",
"def get_permissions(self):\n permission_classes = {\"create\": [CanUploadReport]}.get(self.action)\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in []:\n permission_classes = [permissions.AllowAny]\n else:\n permission_classes = [permissions.IsAuthenticated]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action == \"destroy\":\n permission_classes = [IsAuthenticated, IsAuthor]\n elif self.action in [\"list\", \"create\"]:\n permission_classes = [IsAuthenticated, IsContributorOrAuthor]\n else:\n permission_classes = [NotAllowed]\n\n return [permission() for permission in permission_classes]",
"def get_all_permissions(self) -> set[tuple[str, str]]:\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )",
"def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == \"create\" or self.action == \"token\":\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n\n permissions = [\n IsAuthenticated(),\n IsCircleActiveMember(),\n ]\n\n if self.action in ['update', 'partial_update', 'finish']:\n permissions.append(\n IsRideOwner()\n )\n\n if self.action in ['join', 'qualify']:\n permissions.append(\n IsNotRideOwner()\n )\n\n return permissions",
"def extract_new_permissions(permissions):\n res = []\n for p in permissions:\n if not p.startswith('android.permission') and not p.startswith('com.google') and not p.startswith('com.android'):\n res.append(p)\n return res",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def permission_resources(self):\n return self._permission_resources",
"def permission_resources(self):\n return self._permission_resources"
]
| [
"0.6322439",
"0.56092525",
"0.5590724",
"0.5579938",
"0.5579938",
"0.5560115",
"0.55266464",
"0.5523204",
"0.5505508",
"0.5470867",
"0.5448818",
"0.54386747",
"0.54310954",
"0.5424532",
"0.54092175",
"0.53158826",
"0.53158826",
"0.52948",
"0.52631074",
"0.52576387",
"0.5255612",
"0.52525985",
"0.52467847",
"0.5237772",
"0.5201824",
"0.5192579",
"0.51920897",
"0.5188751",
"0.51693594",
"0.51693594"
]
| 0.7711198 | 0 |
Returns a dict with a key of role name and value of role with early loaded permissions. | def _get_all_roles_with_permissions(self) -> dict[str, Role]:
return {
r.name: r
for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.permissions))
).unique()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_roles_setting() -> dict[str, set[type['Intent']]]:\n return {\n # the admin role has access to everything\n 'admin': {\n Public,\n Private,\n Personal,\n Secret\n },\n # the editor can do most things\n 'editor': {\n Public,\n Private,\n Personal,\n },\n # registered users can do a few things\n 'member': {\n Public,\n Personal,\n },\n # the public has some access\n 'anonymous': {\n Public,\n }\n }",
"def normalized_roles_metadata(self):\n result = {}\n for role, meta in six.iteritems(self.plugin.roles_metadata):\n condition = \"settings:{0}.metadata.enabled == false\".format(\n self.plugin.name)\n meta = copy.copy(meta)\n meta['restrictions'] = [condition] + meta.get('restrictions', [])\n result[role] = meta\n\n return result",
"def get_roles(role):",
"def roles(self, user):\n return {}",
"def get_granted_roles(self):",
"def getRole(self, desired=None):\n return {\"roleName\":\"hasici\",\n \"roleTitle\":\"Soptici\"}",
"def getRoleInfo(self, role):",
"def roles(self):\n db = self['__store'].db\n my_roles = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n groups.id\n from `groups`, subgroups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'U'\n \"\"\",\n self._id)\n }\n return my_roles",
"def get_roles():\r\n global _roles\r\n return _roles",
"def getRoles(self):",
"def load_roles(self):\n\n roles_file = open(self.roles_file, mode='r')\n self.assignable_roles = json.load(roles_file)",
"def getCloudRoleList():\n \n roleList = {}\n\n # get all available roles\n try:\n list = Roletype.objects.all()\n \n # loop through list\n for listInfo in list:\n roleList[listInfo.id] = listInfo\n \n except Exception, e:\n debugException(e)\n \n return roleList",
"def listRoleInfo(self):\n return self._roles.values()",
"def getRoles():\n return jsonify(listRoles(ROLES_DIR))",
"def get_permissions(self):\n return {key: value.permissions for key, value in self}",
"def getRoles(self):\n return [self.getRole(), {\"roleName\":\"policajti\", \"roleTitle\":\"Svestky\"}]",
"def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()",
"def get_roles():\n return config.get_cfg_storage(ID_ROLE)",
"async def get_permissions_for_role(self, role):\n raise NotImplementedError",
"def user_roles(request):\n logger.debug('user roles')\n if not hasattr(request, 'user') or not request.user.is_authenticated:\n return {\n 'admin': False,\n 'super user': False,\n 'roles': {},\n }\n # here you get in db or ldap the user's authorizations\n # in this skeleton I did not share it for now\n admin = False\n super_user = False\n roles = {'api_file': {'GET': True, 'POST': False}}\n return {\n 'admin': admin,\n 'super user': super_user,\n 'roles': roles,\n }",
"def roles(self):\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role",
"def get_permissions_map(self, created):\n company = get_object_or_404(models.Company, pk=self.data['company'])\n admins = company.admins\n accountants = company.accountants\n return {\n 'view_sale': [admins, accountants],\n 'change_sale': [admins, accountants],\n 'delete_sale': [admins, accountants]\n }",
"def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)",
"def initialize_role_table():\n roles = [\n {\n \"name\": \"user\",\n \"description\": \"registered user permission\",\n \"raw_permissions\": Role.Permissions.REGISTERED.value\n },\n {\n \"name\": \"editor\",\n \"description\": \"user has ability to edit all content and comments\",\n \"raw_permissions\": (Role.Permissions.REGISTERED | Role.Permissions.EDITOR).value\n },\n {\n \"name\": \"admin\",\n \"description\": \"administrator user with access to all of the application\",\n \"raw_permissions\": (Role.Permissions.REGISTERED | Role.Permissions.EDITOR | Role.Permissions.ADMINISTRATOR).value\n }\n ]\n with session_manager() as session:\n for r in roles:\n role = Role.query.filter(Role.name == r.get(\"name\")).one_or_none()\n\n # is there no existing role by a given name?\n if role is None:\n role = Role(\n name=r.get(\"name\"),\n description=r.get(\"description\"),\n raw_permissions=r.get(\"raw_permissions\")\n )\n # otherwise, need to update existing role permissions\n else:\n role.description = r.get(\"description\")\n role.raw_permissions = r.get(\"raw_permissions\")\n\n db.session.add(role)\n db.session.commit()",
"def load_roles():\n for code, name in list(config.Roles.ALL_ROLES.items()):\n role = ContactRole.objects.get_or_create(code=code)[0]\n if role.name != name:\n role.name = name\n role.save()",
"def build_perm_map(self):\n\t\tself.perm_map = {}\n\t\tfor r in get_valid_perms():\n\t\t\tdt = r[\"parent\"]\n\n\t\t\tif dt not in self.perm_map:\n\t\t\t\tself.perm_map[dt] = {}\n\n\t\t\tfor k in frappe.permissions.rights:\n\t\t\t\tif not self.perm_map[dt].get(k):\n\t\t\t\t\tself.perm_map[dt][k] = r.get(k)",
"def get_permissions_map(self, created):\n company = get_object_or_404(models.Company, pk=self.data['company'])\n admins = company.admins\n accountants = company.accountants\n return {\n 'view_media': [admins, accountants],\n 'change_media': [admins, accountants],\n 'delete_media': [admins, accountants]\n }",
"def discover_role_strings(self):\n if not self._plugins_used:\n return {}\n\n # we have to refresh everything\n self._role_strings_info = {}\n\n for pl in self._plugins_used:\n pl.discover_strings()\n\n # if it could be a setter, check whether it has any\n # key string generator\n if any([j for k, v in self.role_strings_info.items() for j in v if j[RoleInfo.ROLE] == Role.SETTER]):\n self._set_string_generators()\n\n return self.role_strings_info",
"def get_roles(sig: UpperSignature) -> dict[str, str]:\n out = {}\n if sig.eboard:\n out['eboard'] = sig.eboard\n if sig.active_rtp:\n out['rtp'] = 'RTP'\n if sig.three_da:\n out['three_da'] = '3DA'\n if sig.w_m:\n out['wm'] = 'Wiki Maintainer'\n if sig.webmaster:\n out['webmaster'] = 'Webmaster'\n if sig.c_m:\n out['cm'] = 'Constitutional Maintainer'\n if sig.drink_admin:\n out['drink'] = 'Drink Admin'\n return out",
"def _get_role(self):\n return self.__role"
]
| [
"0.6373386",
"0.631273",
"0.63032925",
"0.626296",
"0.62269676",
"0.6022238",
"0.6016043",
"0.5985932",
"0.59825903",
"0.59076625",
"0.5885268",
"0.5858767",
"0.5834165",
"0.5824442",
"0.5819442",
"0.5761967",
"0.5714344",
"0.5677739",
"0.56501216",
"0.5647016",
"0.55830544",
"0.55688816",
"0.55588204",
"0.55543244",
"0.5499788",
"0.54978514",
"0.548313",
"0.5466925",
"0.54393244",
"0.5417338"
]
| 0.7307136 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.