query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
state_transl(state) > int Translates state names into numbers.
def state_transl(state): nonlocal state_cnt nonlocal state_transl_dict if state not in state_transl_dict.keys(): state_transl_dict[state] = state_cnt state_cnt += 1 return str(state_transl_dict[state])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_state_code(self, data) -> int:\n return int(self.state)", "def get_new_state():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))\n return state", "def get_lookup_state(self, state):\n return \"\".join(map(str, state))", "def lookup_state(state: str | int) -> dict:\n # Try to cast state as an integer to deal with \"02\", \"2\", 2.0, np.int64(2)...\n try:\n is_fips = isinstance(int(state), int)\n except ValueError:\n is_fips = False\n if is_fips:\n state = str(int(state)).zfill(2)\n return {x[\"fips\"]: x for x in STATES}[state]\n key = \"code\" if len(state) == 2 else \"name\"\n return {x[key].lower(): x for x in STATES}[state.lower()]", "def state_encod_arch2(self, state, action):", "def stateToCode(self, state):\n\n multiplier = 1\n code = \"\"\n for i in range(self.num_joints-1, -1, -1):\n num_angles = len(self.angles[i])\n code += str(int((state / multiplier ) % num_angles))\n multiplier *= len(self.angles[i])\n\n # Return the reversed code\n return code [::-1]", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def represent_state(state):\n return tuple(state[0]), tuple(state[1]), tuple(state[2])", "def fromState(state):", "def states_to_numbers(self, states, out=None):\n hind = self._get_hilbert_index()\n\n out = self._to_constrained_numbers_kernel(\n self._has_constraint,\n self._bare_numbers,\n hind.states_to_numbers(states, out),\n )\n\n return out", "def new_state():\n return ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))", "def get_state_s(self, lower = True):\r\n\r\n state_s = STATE_STRINGS[self._state - 1]\r\n state_s = state_s.lower() if lower else state_s\r\n return state_s", "def symb_transl(symb):\n nonlocal symb_cnt\n nonlocal symb_transl_dict\n\n if symb not in symb_transl_dict.keys():\n symb_transl_dict[symb] = symb_cnt\n symb_cnt += 1\n\n return str(symb_transl_dict[symb])", "def add_fsm_state_names():\n from migen.fhdl.visit import NodeTransformer\n from migen.genlib.fsm import NextState, NextValue, _target_eq\n from migen.fhdl.bitcontainer import value_bits_sign\n\n class My_LowerNext(NodeTransformer):\n def __init__(self, next_state_signal, next_state_name_signal, encoding,\n aliases):\n self.next_state_signal = next_state_signal\n self.next_state_name_signal = next_state_name_signal\n self.encoding = encoding\n self.aliases = aliases\n # (target, next_value_ce, next_value)\n self.registers = []\n\n def _get_register_control(self, target):\n for x in self.registers:\n if _target_eq(target, x[0]):\n return x[1], x[2]\n raise KeyError\n\n def visit_unknown(self, node):\n if isinstance(node, NextState):\n try:\n actual_state = self.aliases[node.state]\n except KeyError:\n actual_state = node.state\n return [\n self.next_state_signal.eq(self.encoding[actual_state]),\n self.next_state_name_signal.eq(\n int.from_bytes(actual_state.encode(), byteorder=\"big\"))\n ]\n elif isinstance(node, NextValue):\n try:\n next_value_ce, next_value = self._get_register_control(\n node.target)\n except KeyError:\n related = node.target if isinstance(node.target,\n Signal) else None\n next_value = Signal(bits_sign=value_bits_sign(node.target),\n related=related)\n next_value_ce = Signal(related=related)\n self.registers.append(\n (node.target, next_value_ce, next_value))\n return next_value.eq(node.value), next_value_ce.eq(1)\n else:\n return node\n\n import migen.genlib.fsm as fsm\n\n def my_lower_controls(self):\n self.state_name = Signal(len(max(self.encoding, key=len)) * 8,\n reset=int.from_bytes(\n self.reset_state.encode(),\n byteorder=\"big\"))\n self.next_state_name = Signal(len(max(self.encoding, key=len)) * 8,\n reset=int.from_bytes(\n self.reset_state.encode(),\n byteorder=\"big\"))\n self.comb += self.next_state_name.eq(self.state_name)\n self.sync += self.state_name.eq(self.next_state_name)\n return My_LowerNext(self.next_state, self.next_state_name,\n self.encoding, self.state_aliases)\n\n fsm.FSM._lower_controls = my_lower_controls", "def convert_state_name_to_id(exploration_id, state_name):\n if state_name == feconf.END_DEST:\n return feconf.END_DEST\n return get_state_by_name(exploration_id, state_name).id", "def state_format(states: list) -> list:\n return list(map(_format_n0, states))", "def decode_dict(state):\n new_state = dict()\n for k, v in state.items():\n if v.decode().isnumeric():\n new_state[k.decode()] = int(v)\n else:\n new_state[k.decode()] = v.decode()\n return new_state", "def encode(self, game_state: ssm.SnakeStateMachine) -> int:\n state = [e.encode(game_state) for e in self._encoders]\n return self._state2id[tuple(state)]", "def handle_state(data: bytes) -> Tuple[bytes, str]:\n actor_id, state_length = struct.unpack('IH', data[:6])\n state = data[6:6+state_length].decode(helpers.ENCODING)\n return data[6+state_length:], f'Actor {actor_id} in {state} state'", "def _status_to_state(status):\n if status == 'failed':\n return Finding.State.ACTIVE\n elif status == 'passed' or status == 'skipped':\n return Finding.State.INACTIVE\n else:\n return Finding.State.STATE_UNSPECIFIED", "def output_integer(state, key, data):\n return int(state[key])", "def transformNumberToDNA(inputState):\n if inputState == 0:\n result = \"A\"\n elif inputState == 1:\n result = \"C\"\n elif inputState == 2:\n result = \"G\"\n elif inputState == 3:\n result = \"T\"\n else:\n raise ValueError(\"The input state is not valid as 0,1,2 or 3\") \n return result", "def parse_state(self, state: str):\r\n state = state.strip()\r\n state = state.split(';')\r\n\r\n if len(state) < 2:\r\n print(state)\r\n return\r\n\r\n for field in state:\r\n split = field.split(':')\r\n if len(split) < 2:\r\n continue\r\n\r\n key = split[0]\r\n value = split[1]\r\n\r\n if key in Tello.state_field_converters:\r\n try:\r\n value = Tello.state_field_converters[key](value)\r\n except Exception as e:\r\n print('Error parsing state value for {}: {} to {}'\r\n .format(key, value, Tello.state_field_converters[key]))\r\n self.state[key] = value\r\n return", "def get_state_num(self):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n ball_state = self.get_state('soccer_ball','world')\n # each object is in a \"box\" that is RESOLUTION meters wide.\n robot_xbox = np.ceil((robot_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION)\n robot_ybox = np.ceil(robot_state.pose.position.y/Learn.RESOLUTION)\n ball_xbox = np.ceil((ball_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION)\n ball_ybox = np.ceil(ball_state.pose.position.y/Learn.RESOLUTION)\n # the state is the combination of dx and dy.\n dx = int(ball_xbox - robot_xbox)\n dy = int(ball_ybox - robot_ybox)\n # adjusting to remove negative values for states\n dx += Learn.BOXES_X-1\n dy += Learn.BOXES_Y-1\n # converting to unique number between 0 and NSTATES-1:\n return (2*Learn.BOXES_X-1)*dy+dx", "def numbers_to_states(self, numbers, out=None):\n\n hind = self._get_hilbert_index()\n return hind.numbers_to_states(self._to_bare_numbers(numbers), out)", "def state_to_string(board_state):\n return str(board_state)", "def native_value(self) -> str:\n if isinstance(self._state, Enum):\n return self._state.name.lower()\n return self._state.lower()", "def from_esi_name(cls, esi_state_name: str) -> \"Structure.State\":\n STATES_ESI_MAP = {\n \"anchor_vulnerable\": cls.ANCHOR_VULNERABLE,\n \"anchoring\": cls.ANCHORING,\n \"armor_reinforce\": cls.ARMOR_REINFORCE,\n \"armor_vulnerable\": cls.ARMOR_VULNERABLE,\n \"deploy_vulnerable\": cls.DEPLOY_VULNERABLE,\n \"fitting_invulnerable\": cls.FITTING_INVULNERABLE,\n \"hull_reinforce\": cls.HULL_REINFORCE,\n \"hull_vulnerable\": cls.HULL_VULNERABLE,\n \"online_deprecated\": cls.ONLINE_DEPRECATED,\n \"onlining_vulnerable\": cls.ONLINING_VULNERABLE,\n \"shield_vulnerable\": cls.SHIELD_VULNERABLE,\n \"unanchored\": cls.UNANCHORED,\n \"offline\": cls.POS_OFFLINE,\n \"online\": cls.POS_ONLINE,\n \"onlining\": cls.POS_ONLINING,\n \"reinforced\": cls.POS_REINFORCED,\n \"unanchoring \": cls.POS_UNANCHORING,\n }\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.UNKNOWN\n )", "def get_state(self, state):\n return state", "def state(self, state: str) -> None:" ]
[ "0.60437626", "0.590167", "0.58867633", "0.5879405", "0.58519715", "0.5814022", "0.57338357", "0.5721833", "0.564492", "0.56102365", "0.5605891", "0.5580897", "0.5508165", "0.5494175", "0.54446507", "0.54058284", "0.53943527", "0.5359987", "0.5331887", "0.5330565", "0.53102297", "0.5292188", "0.5278801", "0.52754056", "0.52654094", "0.5249716", "0.5248246", "0.52210104", "0.52169156", "0.52034247" ]
0.78725755
1
symb_transl(symb) > int Translates symbol names into numbers.
def symb_transl(symb): nonlocal symb_cnt nonlocal symb_transl_dict if symb not in symb_transl_dict.keys(): symb_transl_dict[symb] = symb_cnt symb_cnt += 1 return str(symb_transl_dict[symb])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symb_to_num(symbolic):\n\n if len(symbolic) == 9:\n group = (symbolic[:-6], symbolic[3:-3], symbolic[6:])\n try:\n numeric = notation[group[0]] + notation[group[1]] + notation[group[2]]\n except:\n numeric = \"Invalid Symbolic Representation!\"\n else:\n numeric = \"Symbolic input should be of lengh 9!\"\n\n return numeric", "def _hill_normalize_symbol(symb):\n symb = ptab.to_symbol(symb)\n if symb == 'C':\n symb = ''\n if symb == 'H':\n symb = '1'\n return symb", "def num_to_symb(num):\n\n num = str(num)\n if len(num) == 3:\n group = (num[0], num[1], num[2])\n symbolic = \"\"\n\n for key, value in notation.items():\n for g in group:\n if int(g) > 8 or int(g) < 0:\n symbolic = \"Invalid Numerical Representation!\"\n elif g == value:\n symbolic = symbolic + key\n else:\n symbolic = \"Number input should be of length 3!\"\n\n return symbolic", "def ord(s):\n pass", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def numerify_iso_label(lab):\n from sage.databases.cremona import class_to_int\n if 'CM' in lab:\n return -1 - class_to_int(lab[2:])\n else:\n return class_to_int(lab.lower())", "def roman2int(n):\n warn('The function roman2int is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n n = str(n).upper()\n i = result = 0\n for integer, numeral in numeral_map:\n while n[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n return result", "def name2digits(name):\n \n name = name.lower()\n \n if len(name)>25:\n name = name[0:25]\n \n primenumbers = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97]\n \n n = len(name)\n \n s = 0.0\n \n for i in range(n):\n s += primenumbers[i]*ord(name[i])*2.0**(i+1)\n\n import scipy.io.matlab\n Data = scipy.io.matlab.loadmat('hash.mat',struct_as_record=True)\n x = Data['x']\n t = int(np.mod(s,x.shape[0]))\n\n return np.sort(x[t,:])", "def m_symb(self):\n return self._m_symb", "def atomic_number(name):\n try:\n return symbols.index(name.capitalize()) + 1\n except ValueError:\n return lower_names.index(name.lower()) + 1", "def _pword_to_num(pword):\n\n return [ord(char) for char in pword]", "def numerize():\n pass", "def integer_to_english_numeral(n, activate_tts=False):\n if activate_tts is None:\n activate_tts = False\n elif not isinstance(activate_tts, bool):\n raise TypeError('Argument \"activate_tts\" is not a boolean')\n if not isinstance(n, int):\n raise TypeError('Not an integer')\n if n < 0:\n raise ValueError('Not a positive integer')\n if n > 999999999999:\n raise OverflowError('Integer greater than 999,999,999,999')\n return cardinal_numerals_eng.integer_to_english(n, activate_tts)", "def w2n(word):\n word = re.sub('[^A-Z0-9]', '', word)\n return ''.join([letter_to_number_mapping[x] for x in word])", "def _map_extlit(self, l):\n\n v = abs(l)\n\n if v in self.vmap.e2i:\n return int(copysign(self.vmap.e2i[v], l))\n else:\n self.topv += 1\n\n self.vmap.e2i[v] = self.topv\n self.vmap.i2e[self.topv] = v\n\n return int(copysign(self.topv, l))", "def convert_label_string2num(label, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n if num_types==4:\n label = label.replace('Implicit_', '')\n label = label.replace('Explicit_', '')\n return all_labels.index(label)", "def translateNumber(n):\r\n if type(n) != str:\r\n return None\r\n else:\r\n translation = \"\"\r\n word = \"\"\r\n for c in n:\r\n if c != ' ':\r\n word += c\r\n elif word in Numbers:\r\n translation += Numbers[word] + \" \"\r\n else:\r\n translation += word + \" \"\r\n return translation", "def convert_nm_angstroms(d_nm):\n return d_nm*nm_angstroms", "def ord(space, w_val):\n return space.ord(w_val)", "def update_syllable_count(word, syll_count):\n\n syllables = word.split('-')\n for i in range(1, 4):\n for j in range(len(syllables) - i + 1):\n gram = '-'.join(syllables[j: j + i])\n count = syll_count.setdefault(gram, 0)\n syll_count[gram] = count + 1", "def str2num(s):\n\n i = 0\n l = 0\n try:\n for i in range(len(s)):\n l = l << 8\n l += ord(s[i])\n return l\n except:\n return 0", "def state_transl(state):\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])", "def state_transl(state):\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])", "def word_to_number(nb_word):\n return {\n \"Zero\": 0,\n \"One\": 1,\n \"Two\": 2,\n \"Three\": 3,\n \"Four\": 4,\n \"Five\": 5,\n }[nb_word]", "def value(name):\r\n return sum(alpha.index(str(l)) + 1 for l in name)", "def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map):\n return_proto = []\n\n for protocol in protocols:\n if protocol in proto_to_num:\n return_proto.append(name_to_num_map[protocol])\n else:\n return_proto.append(protocol)\n\n return return_proto", "def roman_to_int(self, s):\r\n if not s:\r\n return 0\r\n\r\n # Create hash table for Roman numerals\r\n d = self.make_reference()\r\n\r\n p = \"\"\r\n x = 0\r\n for c in s.upper():\r\n # Evaluate M (1000)\r\n if c == \"M\":\r\n if p == \"C\":\r\n p = \"CM\"\r\n else:\r\n p = \"M\"\r\n # Evaluate D (500)\r\n elif c == \"D\":\r\n if p == \"C\":\r\n p = \"CD\"\r\n else:\r\n p = \"D\"\r\n # Evaluate C (100)\r\n elif c == \"C\":\r\n if p == \"X\":\r\n p = \"XC\"\r\n else:\r\n p = \"C\"\r\n # Evaluate L (50)\r\n elif c == \"L\":\r\n if p == \"X\":\r\n p = \"XL\"\r\n else:\r\n p = \"L\"\r\n # Evaluate X (10)\r\n elif c == \"X\":\r\n if p == \"I\":\r\n p = \"IX\"\r\n else:\r\n p = \"X\"\r\n # Evaluate V (5)\r\n elif c == \"V\":\r\n if p == \"I\":\r\n p = \"IV\"\r\n else:\r\n p = \"V\"\r\n # Evaluate I (1)\r\n else:\r\n p = \"I\"\r\n \r\n x += d[p]\r\n\r\n return x", "def toRoman(n):\n pass", "def test_human_ens_to_sym(self):\n\n mapper = EnsemblMapper(\n from_type='ensembl', to_type='symbol', host=HOST)\n mapped = mapper.map_ids(['ENSG00000141510', 'ENSG00000012048'])\n\n assert mapped == ['TP53', 'BRCA1']", "def name_to_number(name):\n if (name == 'rock' or name == 'Rock'):\n return 0\n elif (name == 'Spock' or name == 'spock'):\n return 1\n elif (name == 'paper' or name == 'Paper'):\n return 2\n elif (name == 'lizard' or name == 'Lizard'):\n return 3\n elif (name == 'scissors' or name == 'Scissors'):\n return 4\n else:\n return -1" ]
[ "0.5577643", "0.5450003", "0.5389088", "0.5294732", "0.51255476", "0.50892496", "0.5078452", "0.49963906", "0.4970441", "0.49569905", "0.49000248", "0.48983368", "0.4879762", "0.48773867", "0.48554212", "0.47946328", "0.47715932", "0.47610152", "0.47499046", "0.47405514", "0.4716917", "0.47145906", "0.47145906", "0.46891856", "0.46812218", "0.4672504", "0.46558532", "0.46536613", "0.4626512", "0.46206456" ]
0.7598046
0
aut2GFF(aut) > string Serializes an automaton as the GOAL file format.
def aut2GFF(aut): state_cnt = 0 state_transl_dict = dict() ########################################### def state_transl(state): """state_transl(state) -> int Translates state names into numbers. """ nonlocal state_cnt nonlocal state_transl_dict if state not in state_transl_dict.keys(): state_transl_dict[state] = state_cnt state_cnt += 1 return str(state_transl_dict[state]) ########################################### res = "" res += "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n" res += "<structure label-on=\"transition\" type=\"fa\">\n" # get the alphabet alphabet = set() states = set() for trans in aut["transitions"]: src, symb, tgt = trans alphabet.add(symb) states.add(src) states.add(tgt) for st in aut["initial"]: states.add(st) for st in aut["final"]: states.add(st) res += "<alphabet type=\"classical\">\n" for symb in alphabet: res += "<symbol>" + symb + "</symbol>\n" res += "</alphabet>\n" res += "<stateset>\n" for st in states: res += "<state sid=\"" + state_transl(st) + "\"></state>\n"; res += "</stateset>\n" res += "<acc type=\"buchi\">\n" for st in aut["final"]: res += "<stateID>" + state_transl(st) + "</stateID>\n" res += "</acc>\n" res += "<initialStateSet>\n" for st in aut["initial"]: res += "<stateID>" + state_transl(st) + "</stateID>\n" res += "</initialStateSet>\n"; res += "<transitionset>\n" tid = 0 for trans in aut["transitions"]: src, symb, tgt = trans res += "<transition tid=\"" + str(tid) + "\">\n" tid += 1 res += "<from>" + state_transl(src) + "</from>\n" +\ "<to>" + state_transl(tgt) + "</to>\n" + \ "<read>" + symb + "</read>\n" res += "</transition>\n" res += "</transitionset>\n" res += "</structure>\n" return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aut2BA(aut):\n res = \"\"\n for st in aut[\"initial\"]:\n res += st + \"\\n\"\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n res += \"{},{}->{}\".format(symb, src, tgt) + \"\\n\"\n for st in aut[\"final\"]:\n res += st + \"\\n\"\n\n return res", "def gff2database(fnamegff,fnamefasta):\n return gff2db(\n dbname=DB_NAME,\n dbhost=DB_HOST,\n dbconnection=DB_CONNECTION,\n dbuser=DB_USER,\n dbpass=DB_PASS,\n fnamefasta=fnamefasta,\n fnamegff=fnamegff,\n pathperl=PERL_PATH,\n pathloadgff=EXECUTABLE_LOAD_GFF)", "def compose_g(arpa_path: str, words_path: str, g_path: str, log_file: TextIO) -> None:\n arpafst_proc = subprocess.Popen(\n [\n thirdparty_binary(\"arpa2fst\"),\n \"--disambig-symbol=#0\",\n f\"--read-symbol-table={words_path}\",\n arpa_path,\n g_path,\n ],\n stderr=log_file,\n stdout=log_file,\n )\n arpafst_proc.communicate()", "def graph(automaton,graph_name=\"finite_state_machine\",file_name=\"finite_state_machine.gv\"):\n letter = next(iter(automaton.states))[0]\n grph = Digraph(graph_name,filename=file_name)\n grph.attr(rankdir=letter, size='8,5')\n #specifying final nodes \n grph.attr('node', shape='doublecircle')\n for state in automaton.final_states:\n grph.node(state)\n #creating edges\n \n grph.attr('node', shape='circle')\n if(isinstance(automaton,DFA)):\n grph =addEdgesDFA(grph,automaton)\n grph.edge(\"\",automaton.initial_state,\"\")\n grph.edge(\"\",automaton.initial_state,\"\")\n else:\n grph =addEdgesNFA(grph,automaton) \n return grph", "def aut2HOA(aut):\n state_cnt = 0\n state_transl_dict = dict()\n\n ###########################################\n def state_transl(state):\n \"\"\"state_transl(state) -> int\n\n Translates state names into numbers.\n \"\"\"\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])\n ###########################################\n\n symb_cnt = 0\n symb_transl_dict = dict()\n\n ###########################################\n def symb_transl(symb):\n \"\"\"symb_transl(symb) -> int\n\n Translates symbol names into numbers.\n \"\"\"\n nonlocal symb_cnt\n nonlocal symb_transl_dict\n\n if symb not in symb_transl_dict.keys():\n symb_transl_dict[symb] = symb_cnt\n symb_cnt += 1\n\n return str(symb_transl_dict[symb])\n ###########################################\n\n # count states and transitions\n for st in aut[\"initial\"]:\n state_transl(st)\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n state_transl(src)\n symb_transl(symb)\n state_transl(tgt)\n for st in aut[\"final\"]:\n state_transl(st)\n\n res = \"\"\n res += \"HOA: v1\\n\"\n res += \"States: {}\\n\".format(state_cnt)\n\n res += \"Start: \"\n for state in aut[\"initial\"]:\n res += state_transl(state) + \" \"\n res += \"\\n\"\n\n # magic setting for Buchi condition\n res += \"acc-name: Buchi\\n\"\n res += \"Acceptance: 1 Inf(0)\\n\"\n\n # atomic propositions\n res += \"AP: {}\".format(symb_cnt)\n for i in range(symb_cnt):\n for key in symb_transl_dict:\n if symb_transl_dict[key] == i:\n res += \" \\\"{}\\\"\".format(key)\n res += \"\\n\"\n\n res += \"--BODY--\\n\"\n for (name, num) in state_transl_dict.items():\n res += \"State: {}\".format(num)\n if name in aut[\"final\"]:\n res += \" { 0 }\"\n res += \"\\n\"\n\n for trans in aut[\"transitions\"]:\n src, symb, tgt = trans\n if src == name:\n res += \" [\"\n for i in range(symb_cnt):\n if i != 0:\n res += \" & \"\n if symb_transl_dict[symb] != i:\n res += \"!\"\n res += str(i)\n\n res += \"] {}\\n\".format(state_transl(tgt))\n res += \"--END--\\n\"\n\n return res", "def gencode_gtf(self):\n return op.join(self.root_dir, \"gencode.annotation.gtf\")", "def generate_graph_fca(organism):\n\twith open('./organisms/'+organism+'/'+'fcagraph.txt','r') as gfile:\n\t\trnamelst = [] \n\t\tenamelst = []\n\t\tclst = []\n\t\tfor line in gfile:\n\t\t\te = line.strip().split('\\t')\n\t\t\tfor rname in e[0:2]:\n\t\t\t\tif rname not in rnamelst:\n\t\t\t\t\trnamelst.append(rname)\n\t\t\tenamelst.append((e[0],e[1]))\n\t\t\tclst.append(e[2])\n\t\t\t\n\t\tG = igraph.Graph(0,directed=True)\n\t\tG.add_vertices(rnamelst)\n\t\tG.add_edges(enamelst)\n\t\tG.es['color'] = clst\n\t\n\treturn G", "def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs", "def genbank_to_gff(gb_file):\n max_size = 1e4\n gff_file = \"%s.gff3\" % os.path.splitext(gb_file)[0]\n if not os.path.exists(gff_file):\n with open(gb_file) as in_handle:\n with open(gff_file, \"w\") as out_handle:\n gb_iterator = SeqIO.parse(in_handle, \"genbank\")\n GFF.write(_filter_features(gb_iterator, max_size),\n out_handle)", "def single_string_to_actg(bin_str: str) -> str:\r\n y = \"\"\r\n i = 1\r\n while (1):\r\n if i >= len(bin_str):\r\n break\r\n if bin_str[i - 1] == '0' and bin_str[i] == '0':\r\n y += \"A\"\r\n if bin_str[i - 1] == '0' and bin_str[i] == '1':\r\n y += \"C\"\r\n if bin_str[i - 1] == '1' and bin_str[i] == '0':\r\n y += \"G\"\r\n if bin_str[i - 1] == '1' and bin_str[i] == '1':\r\n y += \"T\"\r\n i = i + 2\r\n return y", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def read_gaf_out(go_path):\n out = io.makedeephash()\n header = []\n temp = {}\n for line in open(go_path, mode=\"r\"):\n line = line.rstrip(\"\\n\")\n if line.startswith(str(\"ID\") + \"\\t\"):\n header = re.split(r\"\\t+\", line)\n else:\n things = re.split(r\"\\t+\", line)\n temp = dict(zip(header, things))\n if len(temp.keys()) > 0:\n pr = str.upper(temp[\"GN\"])\n for k in temp.keys():\n # if the key is the same\n if out[pr][k] and k != \"ID\" or \"GN\":\n out[pr][k] = \";\".join([str(out[pr][k]), temp[k]])\n elif k != \"ID\" or \"GN\":\n out[pr][k] = temp[k]\n return out", "def create_arff(fname, mat, gold):\n \n gold = [int(g.split('.')[-1]) for g in gold]\n \n g = set(gold)\n\n gold = np.matrix(gold)\n \n c = np.concatenate((mat.todense(), gold.T), axis=1)\n\n ncol = mat.shape[1]\n #FIXME: pathi relative yap\n out = \"/home/tyr/Desktop/local.weka/\" + fname + '.arff'\n f = open(out, 'w')\n f.write(\"@relation %s\\n\\n\" % fname)\n for i in xrange(ncol):\n f.write(\"@attribute a%d numeric\\n\" % i)\n s = ','.join(map(str, g))\n f.write(\"@attribute class {%s}\\n\\n\" % s)\n f.write(\"@data\\n\")\n #FIXME: Avoid writing two times\n np.savetxt(f, c, delimiter=',', fmt='%5f')\n f.close()\n lines = open(out).readlines()\n f = open(out, 'w')\n for line in lines:\n if line[0] != '@' and len(line) != 1:\n line = line.split(',')\n tag = line[-1].strip()\n tag = str(int(float(tag))) + '\\n'\n line[-1] = tag\n f.write(','.join(line))\n f.write('\\n')\n else:\n f.write(line)\n f.close()", "def toGenomeRepresentation(self):\n s = \"\"\n s += str(self.axiom)\n s += \"||\"+str(self.niterations) # The iterations must be shown as well\n for prod in self.productions:\n s += \"||\"\n s += prod.toGenomeRepresentation()\n return s", "def get_fable_string():\n f = open(\"fable.txt\", \"r\")\n fable = str(f.read())\n f.close()\n return fable", "def create_t2g_from_gtf(gtf_path, t2g_path, intron=False):\n logger.info('Creating transcript-to-gene mapping at {}'.format(t2g_path))\n gtf = GTF(gtf_path)\n with open_as_text(t2g_path, 'w') as f:\n for entry in gtf.entries():\n if entry['feature'] == 'transcript':\n transcript_id = entry['group']['transcript_id']\n transcript_version = entry['group'].get(\n 'transcript_version', None\n )\n transcript = '{}.{}'.format(\n transcript_id, transcript_version\n ) if transcript_version else transcript_id\n gene_id = entry['group']['gene_id']\n gene_version = entry['group'].get('gene_version', None)\n gene = '{}.{}'.format(\n gene_id, gene_version\n ) if gene_version else gene_id\n gene_name = entry['group'].get('gene_name', '')\n f.write('{}\\t{}\\t{}\\n'.format(transcript, gene, gene_name))\n\n if intron:\n f.write(\n '{}\\t{}\\t{}\\n'.format(\n transcript + '-I', gene, gene_name\n )\n )\n\n return {'t2g': t2g_path}", "def main(inFilepath, outFilepath):\n\n gff_df=read_gff(inFilepath, additional_lst=[\"ID\"])\n attribute_lst=[]\n for _, row in gff_df.iterrows():\n orfId = \"{}_{}\".format(row[\"seqname\"], row[\"ID\"].split(\"_\")[-1])\n att = \"{};orf_id={}\".format(row[\"attribute\"], orfId)\n attribute_lst.append(att)\n gff_df[\"attribute\"]=attribute_lst\n write_gff(gff_df, outFilepath)\n print(\"DONE: output {}\".format(outFilepath))", "def writeNETString(g):\n return f\"*Vertices {len(g.nodes)}\\n*Arcs\\n*Edges\\n\"+''.join([f\"{e[0]+1} {e[1]+1} {randint(1, MAX_WEIGHT)}\\n\" for e in g.edges])", "def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]", "def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds", "def write_bgf(self, filename):\n body = [\"BIOGRF{0:>5s}\\n\".format(self.biogrf)]\n if self.descrp:\n body.append(\"DESCRP {0}\\n\".format(self.descrp))\n else:\n body.append(\"DESCRP {0}\\n\".format(filename))\n body.append(\"FORCEFIELD {0}\\n\".format(self.ff))\n body.append(\"FORMAT ATOM (a6,1x,i5,1x,a5,1x,a3,1x,a1,1x,a5,3f10.5\"\n \",1x,a5,i3,i2,1x,f8.5,i2,i4,f10.5)\\n\")\n atoms = []\n hetatms = []\n conect = []\n for atom in self.atoms:\n a, c = atom.writeline()\n if atom.record == 'ATOM':\n atoms.append(a)\n elif atom.record == 'HETATM':\n hetatms.append(a)\n conect.append(c)\n body.extend(atoms)\n body.extend(hetatms)\n body.append(\"FORMAT CONECT (a6,14i6)\\nFORMAT ORDER (a6,i6,13f6.3)\\n\")\n body.extend(conect)\n body.append(\"END\\n\")\n with open(filename, 'w') as f:\n f.writelines(body)", "def gff2genome(gff3_path, out_path):\n ptrn = re.compile(r'(Genbank|RefSeq)\\s+region')\n out_lines = []\n with open(gff3_path) as in_file:\n for line in in_file:\n region = ptrn.search(line)\n if region:\n out_lines.append(line.split()[0] + \"\\t\" + line.split()[4] + \"\\n\")\n with open(out_path, 'w') as out_file:\n out_file.writelines(out_lines)", "def write_gml(self, f):\n G = self.graph.copy()\n\n # networkx doesn't like writing non-string attributes to GML\n for u, v in G.edges:\n for key in list(G[u][v].keys()):\n G[u][v][key] = str(G[u][v][key])\n nx.readwrite.gml.write_gml(G, f)", "def write_gff3(self,gff3_file=None):\r\n # write the new gff3\r\n if gff3_file:\r\n outfile = open(gff3_file, 'w')\r\n else:\r\n outfile = sys.stdout\r\n for set in self.sets:\r\n if isinstance(set, GT_seq_location):\r\n outfile.write(set.compose())\r\n else:\r\n outfile.write(set)\r\n outfile.close()", "def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"[email protected]\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()", "def build_gff(annotations, faa):\n with open(faa, \"rt\") as faa_file:\n for line in faa_file:\n if \">\" not in line:\n continue\n\n # each fasta is suffixed on the annotated faa if a prefix _INT (_1 .. _n)\n contig_name, start, end, strand = parse_fasta_header(line)\n if None in (contig_name, start, end, strand):\n print(\n \"It was not possible to parse the \" + line, end=\"\", file=sys.stderr\n )\n continue\n\n clean_name = Annotation.clean_seq_name(contig_name)\n\n row_annotations = Annotation.merge(\n [ann.get() for ann in annotations.get(contig_name, [])]\n )\n\n ann_string = \";\".join(\n [\n \"{}={}\".format(k, \",\".join(v).strip())\n for k, v in row_annotations.items()\n ]\n )\n\n eggNOGScore = \"\".join(row_annotations.get(\"eggNOG_score\", []))\n\n if len(ann_string):\n yield [\n clean_name,\n \"eggNOG-v2\",\n \"CDS\",\n start,\n end,\n eggNOGScore or \".\",\n \"+\" if strand == \"1\" else \"-\",\n \".\",\n \"ID=\" + clean_name + \";\" + ann_string,\n ]", "def collapsed_to_hg_gff(self):\n return op.join(self.collapse_to_hg_dir, \"touse.gff\")", "def to_fgong(self, reverse=True, ivers=1300):\n from .fgong import FGONG\n\n glob = np.zeros(15)\n glob[0] = self.M\n glob[1] = self.R\n glob[2] = self.L\n glob[14] = self.G\n\n var = np.zeros((len(self.data), 40))\n var[:,0] = self.r\n var[:,1] = self.lnq\n var[:,2] = self.T\n var[:,3] = self.P\n var[:,4] = self.rho\n var[:,6] = self.L_r\n var[:,7] = self.kappa\n var[:,9] = self.Gamma_1\n var[:,14] = self.AA\n\n if reverse:\n return FGONG(glob, var[::-1], ivers=ivers, G=self.G)\n else:\n return FGONG(glob, var, ivers=ivers, G=self.G)", "def decode_ga(ga: int) -> str:\n if not isinstance(ga, int):\n ga = struct.unpack('>H', ga)[0]\n return '{}/{}/{}'.format((ga >> 11) & 0x1f, (ga >> 8) & 0x07, (ga) & 0xff)", "def toGML(self):\n raise NotImplementedError" ]
[ "0.5754445", "0.54976296", "0.54789925", "0.5461304", "0.5443864", "0.54335296", "0.53065205", "0.52437365", "0.5173404", "0.5142212", "0.5129688", "0.51291436", "0.51273733", "0.5098814", "0.5092949", "0.50890404", "0.50819266", "0.5079442", "0.5072792", "0.5064227", "0.5050565", "0.498818", "0.49817598", "0.49738115", "0.49340692", "0.49248928", "0.49204415", "0.49106124", "0.4909226", "0.49054942" ]
0.7273705
0
state_transl(state) > int Translates state names into numbers.
def state_transl(state): nonlocal state_cnt nonlocal state_transl_dict if state not in state_transl_dict.keys(): state_transl_dict[state] = state_cnt state_cnt += 1 return str(state_transl_dict[state])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_state_code(self, data) -> int:\n return int(self.state)", "def get_new_state():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))\n return state", "def get_lookup_state(self, state):\n return \"\".join(map(str, state))", "def lookup_state(state: str | int) -> dict:\n # Try to cast state as an integer to deal with \"02\", \"2\", 2.0, np.int64(2)...\n try:\n is_fips = isinstance(int(state), int)\n except ValueError:\n is_fips = False\n if is_fips:\n state = str(int(state)).zfill(2)\n return {x[\"fips\"]: x for x in STATES}[state]\n key = \"code\" if len(state) == 2 else \"name\"\n return {x[key].lower(): x for x in STATES}[state.lower()]", "def state_encod_arch2(self, state, action):", "def stateToCode(self, state):\n\n multiplier = 1\n code = \"\"\n for i in range(self.num_joints-1, -1, -1):\n num_angles = len(self.angles[i])\n code += str(int((state / multiplier ) % num_angles))\n multiplier *= len(self.angles[i])\n\n # Return the reversed code\n return code [::-1]", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def represent_state(state):\n return tuple(state[0]), tuple(state[1]), tuple(state[2])", "def fromState(state):", "def states_to_numbers(self, states, out=None):\n hind = self._get_hilbert_index()\n\n out = self._to_constrained_numbers_kernel(\n self._has_constraint,\n self._bare_numbers,\n hind.states_to_numbers(states, out),\n )\n\n return out", "def new_state():\n return ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))", "def get_state_s(self, lower = True):\r\n\r\n state_s = STATE_STRINGS[self._state - 1]\r\n state_s = state_s.lower() if lower else state_s\r\n return state_s", "def symb_transl(symb):\n nonlocal symb_cnt\n nonlocal symb_transl_dict\n\n if symb not in symb_transl_dict.keys():\n symb_transl_dict[symb] = symb_cnt\n symb_cnt += 1\n\n return str(symb_transl_dict[symb])", "def add_fsm_state_names():\n from migen.fhdl.visit import NodeTransformer\n from migen.genlib.fsm import NextState, NextValue, _target_eq\n from migen.fhdl.bitcontainer import value_bits_sign\n\n class My_LowerNext(NodeTransformer):\n def __init__(self, next_state_signal, next_state_name_signal, encoding,\n aliases):\n self.next_state_signal = next_state_signal\n self.next_state_name_signal = next_state_name_signal\n self.encoding = encoding\n self.aliases = aliases\n # (target, next_value_ce, next_value)\n self.registers = []\n\n def _get_register_control(self, target):\n for x in self.registers:\n if _target_eq(target, x[0]):\n return x[1], x[2]\n raise KeyError\n\n def visit_unknown(self, node):\n if isinstance(node, NextState):\n try:\n actual_state = self.aliases[node.state]\n except KeyError:\n actual_state = node.state\n return [\n self.next_state_signal.eq(self.encoding[actual_state]),\n self.next_state_name_signal.eq(\n int.from_bytes(actual_state.encode(), byteorder=\"big\"))\n ]\n elif isinstance(node, NextValue):\n try:\n next_value_ce, next_value = self._get_register_control(\n node.target)\n except KeyError:\n related = node.target if isinstance(node.target,\n Signal) else None\n next_value = Signal(bits_sign=value_bits_sign(node.target),\n related=related)\n next_value_ce = Signal(related=related)\n self.registers.append(\n (node.target, next_value_ce, next_value))\n return next_value.eq(node.value), next_value_ce.eq(1)\n else:\n return node\n\n import migen.genlib.fsm as fsm\n\n def my_lower_controls(self):\n self.state_name = Signal(len(max(self.encoding, key=len)) * 8,\n reset=int.from_bytes(\n self.reset_state.encode(),\n byteorder=\"big\"))\n self.next_state_name = Signal(len(max(self.encoding, key=len)) * 8,\n reset=int.from_bytes(\n self.reset_state.encode(),\n byteorder=\"big\"))\n self.comb += self.next_state_name.eq(self.state_name)\n self.sync += self.state_name.eq(self.next_state_name)\n return My_LowerNext(self.next_state, self.next_state_name,\n self.encoding, self.state_aliases)\n\n fsm.FSM._lower_controls = my_lower_controls", "def convert_state_name_to_id(exploration_id, state_name):\n if state_name == feconf.END_DEST:\n return feconf.END_DEST\n return get_state_by_name(exploration_id, state_name).id", "def state_format(states: list) -> list:\n return list(map(_format_n0, states))", "def decode_dict(state):\n new_state = dict()\n for k, v in state.items():\n if v.decode().isnumeric():\n new_state[k.decode()] = int(v)\n else:\n new_state[k.decode()] = v.decode()\n return new_state", "def encode(self, game_state: ssm.SnakeStateMachine) -> int:\n state = [e.encode(game_state) for e in self._encoders]\n return self._state2id[tuple(state)]", "def handle_state(data: bytes) -> Tuple[bytes, str]:\n actor_id, state_length = struct.unpack('IH', data[:6])\n state = data[6:6+state_length].decode(helpers.ENCODING)\n return data[6+state_length:], f'Actor {actor_id} in {state} state'", "def _status_to_state(status):\n if status == 'failed':\n return Finding.State.ACTIVE\n elif status == 'passed' or status == 'skipped':\n return Finding.State.INACTIVE\n else:\n return Finding.State.STATE_UNSPECIFIED", "def output_integer(state, key, data):\n return int(state[key])", "def transformNumberToDNA(inputState):\n if inputState == 0:\n result = \"A\"\n elif inputState == 1:\n result = \"C\"\n elif inputState == 2:\n result = \"G\"\n elif inputState == 3:\n result = \"T\"\n else:\n raise ValueError(\"The input state is not valid as 0,1,2 or 3\") \n return result", "def parse_state(self, state: str):\r\n state = state.strip()\r\n state = state.split(';')\r\n\r\n if len(state) < 2:\r\n print(state)\r\n return\r\n\r\n for field in state:\r\n split = field.split(':')\r\n if len(split) < 2:\r\n continue\r\n\r\n key = split[0]\r\n value = split[1]\r\n\r\n if key in Tello.state_field_converters:\r\n try:\r\n value = Tello.state_field_converters[key](value)\r\n except Exception as e:\r\n print('Error parsing state value for {}: {} to {}'\r\n .format(key, value, Tello.state_field_converters[key]))\r\n self.state[key] = value\r\n return", "def get_state_num(self):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n ball_state = self.get_state('soccer_ball','world')\n # each object is in a \"box\" that is RESOLUTION meters wide.\n robot_xbox = np.ceil((robot_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION)\n robot_ybox = np.ceil(robot_state.pose.position.y/Learn.RESOLUTION)\n ball_xbox = np.ceil((ball_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION)\n ball_ybox = np.ceil(ball_state.pose.position.y/Learn.RESOLUTION)\n # the state is the combination of dx and dy.\n dx = int(ball_xbox - robot_xbox)\n dy = int(ball_ybox - robot_ybox)\n # adjusting to remove negative values for states\n dx += Learn.BOXES_X-1\n dy += Learn.BOXES_Y-1\n # converting to unique number between 0 and NSTATES-1:\n return (2*Learn.BOXES_X-1)*dy+dx", "def numbers_to_states(self, numbers, out=None):\n\n hind = self._get_hilbert_index()\n return hind.numbers_to_states(self._to_bare_numbers(numbers), out)", "def state_to_string(board_state):\n return str(board_state)", "def native_value(self) -> str:\n if isinstance(self._state, Enum):\n return self._state.name.lower()\n return self._state.lower()", "def from_esi_name(cls, esi_state_name: str) -> \"Structure.State\":\n STATES_ESI_MAP = {\n \"anchor_vulnerable\": cls.ANCHOR_VULNERABLE,\n \"anchoring\": cls.ANCHORING,\n \"armor_reinforce\": cls.ARMOR_REINFORCE,\n \"armor_vulnerable\": cls.ARMOR_VULNERABLE,\n \"deploy_vulnerable\": cls.DEPLOY_VULNERABLE,\n \"fitting_invulnerable\": cls.FITTING_INVULNERABLE,\n \"hull_reinforce\": cls.HULL_REINFORCE,\n \"hull_vulnerable\": cls.HULL_VULNERABLE,\n \"online_deprecated\": cls.ONLINE_DEPRECATED,\n \"onlining_vulnerable\": cls.ONLINING_VULNERABLE,\n \"shield_vulnerable\": cls.SHIELD_VULNERABLE,\n \"unanchored\": cls.UNANCHORED,\n \"offline\": cls.POS_OFFLINE,\n \"online\": cls.POS_ONLINE,\n \"onlining\": cls.POS_ONLINING,\n \"reinforced\": cls.POS_REINFORCED,\n \"unanchoring \": cls.POS_UNANCHORING,\n }\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.UNKNOWN\n )", "def get_state(self, state):\n return state", "def state(self, state: str) -> None:" ]
[ "0.60437626", "0.590167", "0.58867633", "0.5879405", "0.58519715", "0.5814022", "0.57338357", "0.5721833", "0.564492", "0.56102365", "0.5605891", "0.5580897", "0.5508165", "0.5494175", "0.54446507", "0.54058284", "0.53943527", "0.5359987", "0.5331887", "0.5330565", "0.53102297", "0.5292188", "0.5278801", "0.52754056", "0.52654094", "0.5249716", "0.5248246", "0.52210104", "0.52169156", "0.52034247" ]
0.78725755
0
Initialises a new ordered linked list object, with a given list of elements lst.
def __init__(self, lst=[]): self.__length = 0 # current length of the linked list self.__head = None # pointer to the first node in the list for e in lst: # initialize the list, self.add(e) # by adding elements one by one
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, lst=[]):\r\n self.__length = 0 # current length of the linked list\r\n self.__head = None # pointer to the first node in the list\r\n self.__last = None # pointer to the last node in the list\r\n lst.reverse() # reverse to ensure elements will appear in same order\r\n for e in lst: # add elements of input list lst one by one\r\n self.add(e)", "def __init__(self, head: ListNode):\n self.head = head\n self.list = []\n while head:\n self.list.append(head.val)\n head = head.next", "def __init__(self, head: ListNode):\n self.l = []\n while head:\n self.l.append(head.val)\n head = head.next", "def lstToLinkedList(lst):\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList", "def fromList(cls, lst):\n head = None\n\n while lst:\n s = lst.pop()\n node = Node(s)\n node.next = head\n head = node\n return head", "def constructListNode(input_list):\n root = ListNode(0)\n curr = root\n for i in range(len(input_list)):\n curr.next = ListNode(input_list[i])\n curr = curr.next\n return root.next", "def lstToLinkedList(lst):\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList", "def __init__(self, head: ListNode):\n self.nodes = []\n\n while(head):\n self.nodes.append(head)\n head = head.next", "def from_list(cls, lst):\n return cls(lst[0], lst[1], lst[2])", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def constructList(vals):\n # Current method is iterative, recursive soln also exists\n head = ListNode(val=vals.pop(0))\n current = head\n while len(vals) > 0:\n nex = ListNode(val=vals.pop(0))\n current.next = nex; current = nex\n return head", "def __init__(self, head: ListNode):\n self.head = head\n temp = head\n i = 0\n while temp is not None:\n i+=1\n temp = temp.next\n self.len = i # 找到list的长度", "def __init__(self, iterable=None):\n # Initialize a new linked list to store the items\n # print(\"self __init__\", self)\n self.list = LinkedList()\n # self.top = self.list.head\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self, l):\n self.l = l\n self.next = None\n self.prev = None\n self.prev_n = -1\n self.next_n = -1", "def __init__(self) -> None: \n SortedList.__init__(self)\n self.head = None", "def test_lined_list_create_with_non_iterable():\n from linked_list import Linked_List\n new_linked_list = Linked_List(-100)\n assert new_linked_list.head.value == -100", "def create_linked_list(input_list):\n head=None\n for value in input_list:\n if head is None:\n head=Node(value)\n else:\n current_node=head\n while current_node.next:\n current_node=current_node.next\n current_node.next=Node(value)\n# printlist(head)\n# print('------')\n return head", "def __init__(self):\n node = ListNode(0) # dummy\n self.head = node\n self.tail = node\n self.len = 0", "def from_list(L):\n n = None\n for i in xrange(len(L)-1, -1, -1):\n n = Node(x=L[i], nxt=n)\n return n", "def from_list(L):\n n = None\n for i in xrange(len(L)-1, -1, -1):\n n = Node(x=L[i], nxt=n)\n return n", "def create_list(nums):\n return_node = ListNode(int(nums[0]))\n prev_node = return_node\n for i in range(1, len(nums)):\n curr_node = ListNode(int(nums[i]))\n prev_node.next = curr_node\n prev_node = curr_node\n return return_node", "def __init__(self):\n self.head = ListNode()", "def __init__(self, items):\n if len(items) == 0:\n self._first = None\n self._rest = None\n else:\n self._first = items[0]\n self._rest = LinkedListRec(items[1:])", "def create_linked_list(input_list):\n\t\ttry:\n\t\t\thead = Node(input_list.pop(0)) #remove the first list item and return as its head\n\n\t\t\twhile (len(input_list)>0):\n\t\t\t\tcurrent_node = head\n\t\t\t\twhile current_node.next:\n\t\t\t\t\tcurrent_node = current_node.next\n\t\t\t\tcurrent_node.next = Node(input_list.pop(0))\n\n\t\texcept IndexError:\n\t\t\t\thead = None\n\t\treturn head", "def __init__(self, nestedList):\n self.curr = nestedList\n self.idx = 0", "def __init__(self, iterable=None):\n self.list = LinkedList()\n\n if iterable:\n for item in iterable:\n self.push(item)" ]
[ "0.7740163", "0.7060453", "0.6915438", "0.682955", "0.6793986", "0.6741357", "0.6705007", "0.66472715", "0.655822", "0.64578074", "0.64578074", "0.64578074", "0.64578074", "0.64578074", "0.6447554", "0.643696", "0.6430444", "0.6427214", "0.642692", "0.6379833", "0.6360155", "0.63451856", "0.63297206", "0.63297206", "0.6304716", "0.62523043", "0.62185854", "0.6189111", "0.6186756", "0.6182545" ]
0.76552415
1
Removes the node at the start of the list. Leaves the ordered list intact if already empty.
def __remove_first(self): if self.__head is not None: self.__length -= 1 self.__head = self.__head.next() if self.__length == 0: # when there are no more elements in the list, self.__last = None # remove the pointer to the last element
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_node_at_start(self):\n if not self.head:\n print('List already empty.')\n return\n self.head = self.head.next", "def delete_node_at_beginning(self):\n\t\tif self.root is None:\n\t\t\traise EmptyRootException(\"ERROR: No node available in list. Please insert node in list.\")\n\t\tcurrent_node = self.root\n\t\tself.root = current_node.next\n\t\tself.root.prev = None\n\t\tself.display_nodes()", "def remove_first(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n\n data = self.head.data\n self.head = self.head.nxt\n self.size -= 1\n\n if self.is_empty(): self.tail = None\n else: self.head.prev = None\n\n return data", "def delete_at_beginning(self) -> None:\n current = self.head\n if current is None:\n return None\n else:\n self.head = self.head.get_next_node()\n self.head.set_previous_node(None)\n temp = current.get_data()\n del current\n self._decrease_length()\n return temp", "def delete_first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._delete_node(self._head._next)", "def remove(self):\r\n if self.first() is not None:\r\n self.dec_size()\r\n self.set_first(self.first().next())\r\n if self.size() == 0: # when there are no more elements in the list,\r\n self.__last = None # remove the pointer to the last element\r", "def remove_first(self):\n # return None if there are no Nodes\n if self.head is None:\n return None\n # save and disconect the first Node from the list\n # and set the head to the next Node\n removed = self.head\n self.head = self.head.next\n removed.next = None\n # set the tail as None if list got empty\n if self.head is None:\n self.tail = None\n # remove the skip back pointer from the second Node if needed\n elif self.head.next is not None:\n self.head.next.skip_back = None\n \n return removed.data", "def delete_node_at_end(self):\n if not self.head:\n print('List already empty')\n return\n temp = self.head\n while temp.next:\n if not temp.next.next:\n break\n temp = temp.next\n temp.next = None", "def delete_first(self):\n if self.is_empty():\n raise Empty('list is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty(): # special case as deque is empty\n self._tail = None # removed head had been the tail\n else:\n self._head._prev = None\n return answer", "def delete_first(self):\n if self.is_empty():\n raise Empty(\"List is empty\")\n return self._delete_node(self._header._next)", "def remove_first(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n tmp_val = self.head.data\n self.head = self.head.next_node\n self.list_size -= 1\n return tmp_val", "def remove_from_head(self):\n\n if self.size == 0: # no elements in list\n return None # nothing to return\n\n removed_value = self.head.value # make a copy of the node to be deleted\n\n if self.size == 1: # if only one element in list (node is head and tail)\n self.head = self.tail = None # list will be empty\n\n else: # more than one element in list\n self.head = self.head.next # shift head right (reassign head to head.next)\n self.head.prev = None # reassign head.prev to point at None (it used to point at old_head)\n\n self.size -= 1\n return removed_value", "def remove_first(self):\n if self.is_empty():\n raise IndexError\n else:\n self._first = self._rest._first\n if self._rest.is_empty():\n self._rest = None\n else:\n self._rest = self._rest._rest", "def delete_list(self): \n temp_node = self.head\n while temp_node is not None:\n prev_node = temp_node\n temp_node = temp_node.next\n # prev_node.val += \": deleted\" # for sanity check\n # reset data\n prev_node.val = None\n prev_node.next = None", "def clear(self):\n self.head = None", "def move_to_head(self, node):\n if node is self.head:\n return\n value = node.value\n self.delete(node)\n self.add_to_head(value)", "def clear(self):\n SortedList.clear(self)\n self.head = None", "def pop(self) -> None:\n node = self.head\n self.head = self.head.next\n node.next = None", "def remove(self, value):\n node = self.first()\n # case 1 : in case of empty list, do nothing and return None\n if node is None:\n return None\n # case 2 : list has at least one element and node to be removed is the first element\n if node.value() == value:\n self.__head = node.next()\n self.__length -= 1\n node.set_next(None)\n return node\n # case 3 : list has at least one element and node to be removed is not the first element\n previous = node\n node = node.next()\n while node is not None:\n if node.value() == value:\n previous.set_next(node.next())\n self.__length -= 1\n node.set_next(None)\n return node\n else:\n node = node.next()\n return None\n\n ##############", "def pop_first(self):\n self.pop_item(0)", "def removeFirst(self):\n if self.__nelems == 0:\n raise BaseException('Empty List')\n\n temp = self.__head\n if self.__nelems == 1:\n self.__head = self.__tail = None\n\n else:\n self.__head = temp.getNext()\n temp.setNext(None)\n\n self.__nelems -= 1\n\n return temp.getData()", "def delete_first(self):\n if self.n == 0:\n return None\n first = self.A[1]\n self.n -= 1\n last = self.A.pop()\n if self.n > 0:\n self.A[1] = last\n self.pos[last[0]] = 1\n self.combine(1)\n return first[0]", "def remove_front(self):\n\n if self.items:\n return self.items.pop(0)\n return None", "def pop_head(self):\n if self.is_empty():\n return None\n\n current = self._head._next\n node = self._head\n current._previ = None\n self._head = current\n data = node._data\n nodo = Node(None)\n\n self._size -= 1\n\n return data", "def delete_first(self):\n if self._size == 0:\n raise Empty('Dequeue is empty')\n return self._delete_node(self._head._next)", "def deleteHead(self):\n if not self._head:\n return\n\n if self._head is self._tail:\n self._head = None\n self._tail = None\n else:\n self._head = self._head.next\n self._size -= 1", "def delete_first(self):\n self.deque.pop(0)", "def remove_first(lst, elem):\n \"*** YOUR CODE HERE ***\"\n if len(lst) <= 0:\n return []\n if lst[0] == elem:\n return lst[1:]\n return lst[:1] + remove_first(lst[1:], elem)", "def _delete_node(self, node):\n\n if self.is_empty():\n raise Empty(\"List is empty!\")\n\n predecessor = node._prev\n successor = node._next\n\n predecessor._next = successor\n successor._prev = predecessor\n\n elem = node._element\n node._prev = node._next = node._element = None\n\n self._size -= 1\n\n return elem", "def _move_to_head(self, node):\n self._remove_node(node)\n self._add_node(node)" ]
[ "0.8351589", "0.7582796", "0.73799616", "0.7289658", "0.7140646", "0.71081614", "0.7106494", "0.7027544", "0.68831503", "0.6852153", "0.6819653", "0.669881", "0.6663422", "0.6578224", "0.6563162", "0.6558882", "0.65287566", "0.6518836", "0.648081", "0.6434819", "0.6407462", "0.64074534", "0.6346933", "0.63452184", "0.6319415", "0.62860674", "0.6268411", "0.6251216", "0.6249486", "0.6244379" ]
0.77263117
1
Adds a node with value s at the right position in an already sorted ordered linked list.
def add(self, s): current = self.first() # case 1 : list is empty, add new node as first node if self.size() == 0: self.__add_first(s) return # case 2 : list is not empty, element to be added is smaller than all existing ones elif s < current.value(): self.__add_first(s) return # case 3 : list is not empty, element is larger than value of current element else: self.__length += 1 nxt = current.next() # loop until we are at the end to find where to insert element while nxt is not None: if s < nxt.value(): n = self.Node(s, nxt) current.set_next(n) return current = nxt nxt = nxt.next() current.set_next(self.Node(s, None)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(s, v):\n if empty(s):\n return Link(v)\n if s.first > v:\n s.first, s.rest = v, Link(s.first, s.rest)\n elif s.first < v and empty(s.rest):\n s.rest = Link(v, s.rest)\n elif s.first < v:\n add(s.rest, v)\n return s", "def add(self, s, value):\n\t\thead, tail = s[0], s[1:]\n\t\tcur_node = self.root[head]\n\t\tif not tail:\n\t\t\tcur_node.value = value\n\t\t\treturn # No further recursion\n\t\tcur_node.add(tail, value)", "def add(s, v):\n \n if empty(s):\n return Link(v)\n head = s\n if head.first > v:\n # s = Link(v, s) #error: assigment, then s will rebind to a new object\n # s.first, s.rest = v, s # error s.rest = s\n s.first, s.rest = v, Link(s.first, s.rest)\n return s\n # head.first <= v\n while not empty(head.rest) and head.rest.first <= v:\n head = head.rest\n if head.first == v:\n return s\n else:\n head.rest = Link(v, head.rest)\n return s", "def test_add_circular_node_sll(self):\n sll = SinglyLinkedList()\n a = Node('a')\n sll.insert_beg(a)\n sll.insert_beg(a)", "def addNode(self, new_value): # Class O(n)\r\n if type(new_value) is not int: raise ValueError(\"Please, insert an integer\")\r\n h = self.head\r\n while 'next' in dir(h.next):\r\n h = h.next\r\n else:\r\n h.next = Node(new_value)", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def add(self, item):\n \"\"\"\n :type item: Node()\n :rtype None\n \"\"\"\n node = Node(item)\n if self.head == None or self.head.getData() > node.getData():\n node.setNext(self.head)\n self.head = node\n return\n \n prev = self.head\n curr = self.head\n while curr:\n if curr.getData() > node.getData():\n prev.setNext(node)\n node.setNext(curr)\n return \n prev = curr\n curr = curr.getNext()\n \n # Add to the end\n prev.setNext(node)", "def insert_node(self, head, node):\n prev, curr = None, head\n while curr.val < node.val:\n prev, curr = curr, curr.next\n if not prev:\n head = node\n else:\n prev.next = node\n node.next = curr\n return head", "def __addToLevel(self, head, value):\n\n #if DEBUG: print('\\t__addToLevel({})'.format(value))\n\n cur = head\n \n if cur.next == None:\n output = self.__insert(cur,value)\n return output\n \n #cur = cur.next\n\n while cur:\n if cur.next == None or \\\n cur.val == value or\\\n cur.next.val > value:\n output = self.__insert(cur,value)\n #output = cur\n break\n cur = cur.next\n return output", "def sorted_insert(self, value):\n new = Node(value)\n if self.__head is None:\n self.__head = new\n return\n\n cur = self.__head\n if new.data < cur.data:\n new.next_node = self.__head\n self.__head = new\n return\n\n while (cur.next_node is not None) and (new.data > cur.next_node.data):\n cur = cur.next_node\n\n new.next_node = cur.next_node\n cur.next_node = new\n return", "def sorted_insert(self, value):\n if self.__head is None:\n self.__head = Node(value, None)\n elif value < self.__head.data:\n self.__head = Node(value, self.__head)\n else:\n n = self.__head\n while n.next_node is not None and n.next_node.data <= value:\n n = n.next_node\n new_node = Node(value, n.next_node)\n n.next_node = new_node", "def sorted_insert(self, value):\n if self.__head is None or self.__head.data > value:\n new_node = Node(value)\n if self.__head is not None:\n new_node.next_node = self.__head\n self.__head = new_node\n else:\n runner = self.__head\n while runner.next_node and value > runner.next_node.data:\n runner = runner.next_node\n runner.next_node = Node(value, runner.next_node)", "def add(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n if current.get_data() > item:\n break\n else:\n previous = current\n current = current.get_next()\n \n n = Node(item)\n # If node is to be added at the beginning (incl. case of empty list)\n if previous is None:\n n.set_next(self.head)\n self.head = n\n else:\n previous.set_next(n)\n n.set_next(current)", "def insert(self,x,pos):\n new = ListNode()\n new.value = x\n new.next = pos.next\n pos.next = new", "def insert(self, value, pos):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n\r\n if pos == 0:\r\n self.prepend(value)\r\n return\r\n\r\n index = 0\r\n node = self.head\r\n while node.next and index <= pos:\r\n if (pos - 1) == index:\r\n new_node = Node(value)\r\n new_node.next = node.next\r\n node.next = new_node\r\n return\r\n\r\n index += 1\r\n node = node.next\r\n else:\r\n self.append(value)", "def addNode(self, new_data):\r\n curr = self.head\r\n\r\n # Add new Node\r\n if curr is None:\r\n n = Node(new_data) \r\n self.head = n\r\n return\r\n \r\n # Sort Nodes \r\n if curr.data > new_data:\r\n n = Node(new_data) \r\n n.next = curr\r\n self.head = n\r\n return\r\n\r\n while curr.next is not None:\r\n if curr.next.data > new_data:\r\n break\r\n curr = curr.next\r\n n = Node(new_data) \r\n n.next = curr.next\r\n curr.next = n\r\n return", "def appendleft(self, item):\n tmpNode = Node(item, self.head.next)\n self.head.next = tmpNode\n self._size += 1", "def addNodeAfter(self, new_value, after__node): # Class O(n)\r\n if not isinstance(new_value, Node):\r\n if new_value % 1 != 0: raise ValueError(\"Please, insert an integer\")\r\n if after__node > self.length(): raise ValueError(\"Invalid position\")\r\n count = 1\r\n h = self.head\r\n while count != after__node:\r\n h = h.next\r\n count += 1\r\n move_after = h.next\r\n h.next = Node(new_value)\r\n h.next.next = move_after", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def sorted_insert(self, value):\n\n new = Node(value)\n if self.__head is None:\n self.__head = new\n elif self.__head.data > value:\n new.next_node = self.__head\n self.__head = new\n else:\n temp = self.__head\n while (temp.next_node is not None and temp.next_node.data < value):\n temp = temp.next_node\n new.next_node = temp.next_node\n temp.next_node = new", "def add(self, data):\n node = Node(data)\n if self.head == None:\n self.head = node\n\n else:\n traverse = self.head\n if self.head.data > node.data:\n self.head = node\n node.next = traverse\n\n if self.head.data < node.data:\n temp = self.head\n while traverse.next != None:\n if traverse.data < node.data:\n temp = traverse\n traverse = traverse.next\n\n if traverse.data < node.data:\n temp = traverse\n\n temp1 = temp.next\n temp.next = node\n node.next = temp1", "def add(self, item):\n # must keep two pointers marching\n # in synch down the list.\n current = self._head\n previous = None\n while current != None:\n if current.getData() > item:\n # we’ve reached the insertion spot\n break\n else:\n # otherwise, advance both pointers\n previous = current\n current = current.getNext()\n temp = Node(item)\n if previous == None:\n # insert at the start of the list\n temp.setNext(self._head)\n self._head = temp\n else:\n temp.setNext(current)\n previous.setNext(temp)", "def append(self, value):\n node = SLLNode(value)\n if self.head is None: \n self.head = node \n else: \n tail_node = self.head\n while tail_node.next_node is not None: \n tail_node = tail_node.next_node\n tail_node.next_node = node", "def insert(self, pos, item):\n \n if pos == 0:\n self.add(item)\n \n elif pos >= self.length():\n self.append(item)\n \n else:\n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n n = Node(item)\n previous.set_next(n)\n n.set_next(current)", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def reverse(head, s, f):\r\n count = 0\r\n n = head\r\n if s>f:\r\n return\r\n prev_start, start, end, post_end = [None, ]*4\r\n while n:\r\n count += 1\r\n if count == s - 1:\r\n prev_start, start = n, n.next\r\n if count == f:\r\n end, post_end = n, n.next\r\n n = n.next\r\n prev, t = post_end, start\r\n while t != post_end:\r\n temp = t.next\r\n t.next = prev\r\n prev = t\r\n t = temp\r\n prev_start.next = end", "def add_node(self, node):", "def add(self, item):\n \n n = Node(item)\n n.set_next(self.head)\n self.head = n", "def addNode(self):\n\t\tself.head.insert(self.size, len(self.succ))\n\t\tself.size += 1" ]
[ "0.69526494", "0.6913095", "0.66902816", "0.615452", "0.6098683", "0.6022445", "0.6022445", "0.60025346", "0.5976091", "0.5931356", "0.5927383", "0.589682", "0.589132", "0.5852468", "0.58322865", "0.5830779", "0.58050364", "0.58000183", "0.57888347", "0.5756908", "0.57483876", "0.57032037", "0.5677238", "0.5677014", "0.5665961", "0.5602123", "0.5582651", "0.55795985", "0.55562574", "0.55556023" ]
0.7564802
0
Removes the first node with the given value from the ordered linked list. Leaves the list intact if already empty.
def remove(self, value): node = self.first() # case 1 : in case of empty list, do nothing and return None if node is None: return None # case 2 : list has at least one element and node to be removed is the first element if node.value() == value: self.__head = node.next() self.__length -= 1 node.set_next(None) return node # case 3 : list has at least one element and node to be removed is not the first element previous = node node = node.next() while node is not None: if node.value() == value: previous.set_next(node.next()) self.__length -= 1 node.set_next(None) return node else: node = node.next() return None ##############
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n return node \n while node.next_node is not None:\n current = node.next_node \n if current.value == value:\n node.next_node = current.next_node \n return current \n node = current\n raise ValueError('Deleting non-existing value.')", "def remove(self, value):\r\n if self.head is None:\r\n return\r\n\r\n if self.head.value == value:\r\n self.head = self.head.next\r\n return\r\n\r\n node = self.head\r\n while node.next:\r\n if node.next.value == value:\r\n node.next = node.next.next\r\n return\r\n node = node.next", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n if self.head is None: \n self.tail = None\n else:\n self.head.prev_node = None \n return node \n while node.next_node is not None:\n node = node.next_node \n if node.value == value:\n node.prev_node.next_node = node.next_node \n if node.next_node is None: \n self.tail = node.prev_node \n else:\n node.next_node.prev_node = node.prev_node\n return node\n raise ValueError('Deleting non-existing value.')", "def remove_node(self, value):\n node = self.head\n\n while node:\n if self.head.value == value:\n self.head = self.head.next\n return\n if node.next.value == value:\n node.next = node.next.next\n return\n node = node.next", "def delete(self, value):\n current = self.head\n prev = None\n\n while current:\n if current.value == value:\n if prev == None:\n self.head = current.next\n else:\n prev.next = current.next\n break\n prev = current\n current = current.next", "def delete(self, value):\n current = self.head\n if current.value == value:\n self.head = current.next\n else:\n while current:\n if current.value == value:\n break\n prev = current\n current = current.next\n if current == None:\n return\n prev.next = current.next\n current = None", "def remove(self,value):\n if self.is_empty():\n return\n current = self._head\n if current.value == value:\n self._head = self._head.next\n elif current.next is None:\n # Contains one element only, but it is not the one we are looking for.\n return\n else:\n while current.next.value != value:\n current = current.next\n if current.next is None: # Remove value not found.\n return\n\n # Find removed value, remove it.\n current.next = current.next.next\n if current.next is None:\n self._tail = current\n self._size -= 1", "def delete(self, value):\n current = self.head\n previous = None\n while current.value != value and current.next:\n previous = current\n current = current.next\n if current.value == value:\n if previous:\n previous.next = current.next\n else:\n self.head = current.next\n pass", "def delete(self, value):\n # Iterating to node that has value\n node = self.head\n last_node = None\n while node is not None and node.value != value:\n last_node = node\n node = node.next_\n\n # Check if the node has been found\n if node is None:\n return\n\n # Checking whether head matched\n if last_node is None:\n self.head = node.next_\n return\n\n # Deleting node\n last_node.next_ = node.next_", "def remove_value(self, value):\n # check the head's key\n temp_node = self.head\n if temp_node.val==value:\n self.head = temp_node.next\n temp_node = None\n self.n -= 1\n return\n\n # search for the key value\n while temp_node.val != value: # check the next node's key\n prev_node = temp_node # store prev node to change prev.next\n temp_node = temp_node.next\n # if the key is not found\n if temp_node == None:\n print(\"Error; key value is not found\")\n return\n else:\n # reconfigure; unlink the current node\n prev_node.next = temp_node.next\n temp_node = None\n self.n -= 1", "def removeNodesByValue(self, value): # Class O(nlog2n)\r\n # I'm assuming this classification because this function\r\n # calls removeNode()\r\n h = self.head\r\n count = 1\r\n while count <= self.length():\r\n try:\r\n if h.value == value:\r\n self.removeNode(count)\r\n if h.next != h:\r\n h = h.next\r\n next\r\n else:\r\n count += 1\r\n h = h.next\r\n except:\r\n break", "def remove(self, val):\n current_node = self.head\n previous_node = None\n\n while current_node:\n if current_node.val == val:\n if previous_node:\n previous_node.next = current_node.next\n else:\n self.head = current_node.next\n\n previous_node = current_node\n current_node = current_node.next", "def delete(self, value):\n current = self.head\n index = 1\n ''' delete first element '''\n if index == 1 and current.value == value:\n print (\"deleting first element\")\n current.next = current.next.next\n return\n \n ''' delete last element '''\n while not current.next.next and current.next.value == value:\n print (\"deleting last element\")\n current.next = None\n return\n \n ''' anywhere in between '''\n while current.next.next and current.next.value != value:\n current = current.next\n \n ''' delete the element '''\n print (\"deleting anywhere between element\")\n current.next = current.next.next\n return", "def remove_recursive(self, value, node=None):\n if node == None:\n node = self.head\n\n if node.value == value:\n if node.prev:\n node.prev.next = node.next\n else:\n self.head = node.next\n if node.next:\n node.next.prev = node.prev\n elif node.next:\n self.remove_recursive(value, node.next)", "def remove_value(self, value):\n if self.empty():\n return \"Linked List is empty\"\n h = self.head\n previous = self.head\n idx = 0\n while h is not None:\n if h.data is value:\n if previous is h:\n self.head = h.next\n return idx\n else:\n previous.next = h.next\n h = None\n return idx\n idx += 1\n previous = h\n h = h.next\n\n pass", "def remove_from_head(self):\n\n if self.size == 0: # no elements in list\n return None # nothing to return\n\n removed_value = self.head.value # make a copy of the node to be deleted\n\n if self.size == 1: # if only one element in list (node is head and tail)\n self.head = self.tail = None # list will be empty\n\n else: # more than one element in list\n self.head = self.head.next # shift head right (reassign head to head.next)\n self.head.prev = None # reassign head.prev to point at None (it used to point at old_head)\n\n self.size -= 1\n return removed_value", "def remove_flat(self, value):\n node = self.head\n while node and node.value != value:\n node = node.next\n\n if node:\n if node.prev:\n node.prev.next = node.next\n else:\n self.head = node.next\n if node.next:\n node.next.prev = node.prev", "def remove_by_value(self, data):\n pre_node = None\n for n in self:\n if n.data == data:\n if pre_node is None:\n self.pop()\n else:\n pre_node.next = n.next\n break\n pre_node = n\n else:\n raise ValueError(f'value [{data}] not found in linked list')", "def delete_by_value(self, key):\n cur_node = self.head\n\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n if cur_node is None:\n return\n prev.next = cur_node.next\n cur_node = None", "def remove(self, val):\n node = self.search(val)\n if not node:\n return\n if node.left and not node.right:\n self._remove_parent(node.left)\n self.size_number -= 1\n return\n elif node.right and not node.left:\n self._remove_parent(node.right)\n self.size_number -= 1\n return\n nxt = None\n nxt = self._nxt_inorder(nxt, node, val)\n if nxt is None:\n try:\n self._redirect(node, None)\n except AttributeError:\n self.root = None\n self.size_number -= 1\n return\n self.remove(nxt.data)\n self._replace_node(nxt, node)\n\n # check balance on parent of the node we just removed", "def remove(self, value):\n # Using a slice assignment (children[:] =) the list is modified instead of assign the name to a new list (children =).\n self.children[:] = (child for child in self.children if child.value != value)", "def remove_first(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n tmp_val = self.head.data\n self.head = self.head.next_node\n self.list_size -= 1\n return tmp_val", "def remove(self, val: Generic[T]) -> None:\n def remove_node(node: Node) -> Node: #recursive function\n if node is self.node:\n return node\n if node.val == val: #removes all nodes with value val\n next_node = node.next\n prev_node = node.prev\n\n prev_node.next = next_node\n next_node.prev = prev_node\n remove_node(node.next)\n\n remove_node(self.node.next)", "def __delitem__(self, value) -> bool: # True -> if element was deleted else False\n if not self.head:\n return False\n if self.head.value == value:\n if self.head.next_value:\n self.head = self.head.next_value\n else:\n self.head = None\n return True\n link = self.head.next_value\n prev = self.head\n while link:\n if link.value == value:\n prev.next_value = link.next_value\n return True\n prev = link\n link = link.next_value\n return False", "def remove(self, value):\n if self.root is None:\n return self.NULL_NODE\n removed = self._remove(self.root, value)\n if removed and removed.value:\n self.size -= 1\n self.root = removed\n return True\n else:\n return False", "def remove(self, value):\n tower = [None] * self.max_levels\n node = self.head\n for level in reversed(range(self.max_levels)):\n while node.next[level].value < value:\n node = node.next[level]\n tower[level] = node\n if value != tower[0].next[0].value:\n raise KeyError('Not Found')\n d = len(tower[0].next[0].next)\n for level in range(d):\n prev = tower[level]\n prev.width[level] += prev.next[level].width[level] - 1\n prev.next[level] = prev.next[level].next[level]\n for level in range(d, self.max_levels):\n tower[level].width[level] -= 1\n self.size -= 1", "def remove(self, value):\n\n list.remove(self, value)\n self.changed()", "def delete_first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._delete_node(self._head._next)", "def remove(self,valor):\n\n if self.size==0:\n return False\n else:\n current=self.first\n try:\n while current.next.valor!=valor:\n current=current.next\n deleted_node=current.next\n current.next=deleted_node.next\n except AttributeError:\n return False\n self.size-=1\n return deleted_node", "def remove_first(self):\n # return None if there are no Nodes\n if self.head is None:\n return None\n # save and disconect the first Node from the list\n # and set the head to the next Node\n removed = self.head\n self.head = self.head.next\n removed.next = None\n # set the tail as None if list got empty\n if self.head is None:\n self.tail = None\n # remove the skip back pointer from the second Node if needed\n elif self.head.next is not None:\n self.head.next.skip_back = None\n \n return removed.data" ]
[ "0.84336585", "0.82318616", "0.82197016", "0.79544455", "0.7914709", "0.78713995", "0.78583854", "0.7776217", "0.7672654", "0.76066184", "0.7589861", "0.7580605", "0.7466483", "0.7233176", "0.71712923", "0.7104648", "0.70787865", "0.70723754", "0.6865433", "0.6846761", "0.67830116", "0.67290306", "0.6728027", "0.6712039", "0.6692006", "0.66165", "0.66025573", "0.65874356", "0.65826344", "0.65552557" ]
0.82805014
1
Returns the value of the cargo contained in this node.
def value(self): return self.__cargo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value(self):\r\n return self.__cargo", "def value(self):\n return self.node_value", "def value(self):\n return self.get_attribute(\"value\", str(self.children))", "def getValue(self):\n return _libsbml.ASTNode_getValue(self)", "def getValue(self):\n \n return self._value", "def getValue(self):\n return self.value", "def getValue(self):\n return self.value", "def get_node_value(self, n):\n node = self.get_node(n)\n if node:\n return node.value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def value(self):\n return self.value()._value", "def value(self):\n return self.get_data(\"value\")", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value" ]
[ "0.8142979", "0.73237133", "0.7138349", "0.69184244", "0.6778445", "0.6735514", "0.6679945", "0.6646512", "0.6615436", "0.6615436", "0.6615436", "0.66109717", "0.66104925", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634", "0.6604634" ]
0.8199494
0
L is a list of locations ordered by their id, L[i] is the name of the ith location E is an adjacency matrix whose entries are weights, where E[i][j] denotes an edge from i to j E[i][j] == 'x' if and only if such an edge does not exist H is a list of homes identified by id s is the starting location fileName is name of the file to write to
def print_input(L, E, H, s, fileName): f = open(fileName, 'w') f.write(str(len(L))+'\n') #The first line of the input should contain a single integer, which equals the number of locations f.write(str(len(H))+'\n') #The second line should also be an integer, which equals the number of homes for location in L: f.write(location+' ') f.write('\n') for home_id in H: f.write(L[home_id]+' ') f.write('\n') f.write(s+'\n') for i in range(len(E)): for j in range(len(E)): f.write(str(E[i][j]) + ' ') f.write('\n') f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def pathij(self, i, j, pathlist):\n import math\n path = []\n \n visit = np.zeros(self.nodenum)\n \n self.DFS(i, j, visit, path, pathlist)\n \n return pathlist", "def safeJourney(Alist,s,d):\n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin\n \n #for n,w in G.adj[nmin].items():\n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #dcomp = dmin + w\n dcomp = max(w,dmin) #take largest value to record most dangerous segment\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n #path[n].extend(path[nmin])\n #path[n] = path[nmin]\n \n #path[n].append(n) #n not nmin\n print(path)\n # else:\n #dcomp = dmin + w\n # dcomp = max(w,dmin)\n # Udict[n] = dcomp\n #path[n].extend(path[nmin])\n #path[n].append(nmin) \n \n if nmin == d: #if current node is destination\n return path[d],Edict[d]\n return [] #no path", "def shortJourney(Alist,s,d):\n \"\"\"Find shortest distances to s in weighted graph, G\"\"\"\n \n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin \n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #key difference below\n dcomp = (w+dmin) #take sum as you go along\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n print(path) \n if nmin == d: #if current node is destination\n return [path[d],Edict[d]]\n return [] #no path", "def get_wh_list(br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"wh\"]\r\n br_data_df = br_data_df.reset_index(drop=True)\r\n to = str()\r\n ca_location_2 = agent_df.loc[0, 'location_2']\r\n br_data_df['location_ca'] = str(ca_location_2) ### location 2!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_2 # location 2!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n wh_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in wh_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n print(f'active_users_location_df: {active_users_location_df}')\r\n print(f'segment_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results", "def CreateAndSave_l_matrices(self, lmin, filename, theta, phi):\n\t\tif lmin == 0:\n\t\t\tmode = 'w'\n\t\telse:\n\t\t\tmode = 'r+'\n\n\t\tf = tables.openFile(filename, mode)\n\t\troot = f.root\n\t\tindex_iterator = self.Config.AngularRepresentation.index_iterator\n\n\t\tprint \"Legendre ...\"\n\t\tprevl = -1;\n\t\tfor i, lm in enumerate(index_iterator.__iter__()):\n\t\t\tprint i\n\t\t\tif lm.l >= lmin:\n\t\t\t\tif lm.l != prevl:\n\t\t\t\t\tmidx = 0\n\t\t\t\t\tleg = zeros([(2 * lm.l + 1), len(theta), len(phi)], dtype=complex)\n\n\t\t\t\tfor j, my_theta in enumerate(theta):\n\t\t\t\t\tleg[midx,j,:] = sph_harm(lm.m, lm.l, phi, my_theta)\n\t\t\t\t\n\t\t\t\tmidx += 1\n\n\t\t\t\tif midx == 2 * lm.l + 1:\n\t\t\t\t\tf.createArray('/','l_' + str(lm.l),leg)\n\n\t\t\t\tprevl = lm.l\n\t\tf.setNodeAttr(\"/\",\"lmax\",index_iterator.lmax)\n\t\tf.close()", "def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)", "def addNeighboursToList(neighbour,parent):\r\n #key= neighbour.getKey()\r\n\r\n\r\n global openList\r\n global closeList\r\n\r\n if( neighbour not in closeList or neighbour not in openList):\r\n\r\n openList.append(neighbour)\r\n\r\n else:\r\n\r\n ## Modify the f(n) and g(n) values of the neighbour\r\n\r\n\r\n\r\n if(neighbour in openList):\r\n\r\n print(\"neighbour already present in open list\")\r\n oldNeighbour = getOldNeighbour(openList,neighbour)\r\n oldNeighbour.pathCost = min(oldNeighbour.pathCost,neighbour.pathCost)\r\n oldNeighbour.totalCost = neighbour.pathCost + neighbour.heuristicCost\r\n\r\n\r\n elif(neighbour in closeList):\r\n print(\"neighbour already present in close list\")\r\n oldNeighbour = getOldNeighbour(openList,neighbour)\r\n\r\n\r\n newPathCost = min(oldNeighbour.pathCost,neighbour.pathCost)\r\n newTotalCost = newPathCost + neighbour.heuristicCost\r\n\r\n if(oldNeighbour.totalCost > newTotalCost):\r\n openList.append(oldNeighbour)\r\n closeList.remove(oldNeighbour)", "def edge_list_build(input_path, output_path):\n\n start_time = time.time()\n\n df = pd.read_csv(input_path, sep='\\t', header=None)\n\n for col in range(1, len(df.columns)):\n df.iloc[:, col] = df.iloc[:, col-1] + '_' + df.iloc[:, col]\n\n n_divs = len(df.columns) - 1\n\n\n dict_node_names = {}\n\n for id, node_name in enumerate(np.unique(df.values.flatten())):\n dict_node_names[node_name] = id + 1\n\n tmp_df = pd.DataFrame.from_dict(dict_node_names, orient='index')\n tmp_df.reset_index(inplace=True)\n tmp_df.rename({'index': 'nodes', 0: 'hash'}, inplace=True, axis=1)\n\n hash_df = tmp_df['nodes'].str.split('_', n=n_divs, expand=True)\n hash_df = pd.concat([hash_df, tmp_df['hash']], axis=1)\n\n for col_name in df.columns:\n df[col_name] = df[col_name].map(dict_node_names)\n\n df['root'] = 0\n colnames = df.columns.values\n colnames = list(colnames[-1:]) + list(colnames[:-1])\n df = df[colnames]\n\n df_tuples = pd.DataFrame()\n\n for i in range(len(df.columns) - 1):\n df_tuples[i] = list(df[df.columns[i:i + 2]].itertuples(index=False, name=None))\n del df\n gc.collect()\n\n nodes_list = []\n\n for col_id in range(0, df_tuples.shape[1]):\n father_child = df_tuples.iloc[:, col_id].drop_duplicates().values\n nodes_list.extend(father_child)\n\n graph = nx.DiGraph(nodes_list)\n graph_bfs = nx.bfs_tree(graph, 0)\n \n path = output_path + '.hashmap'\n hash_df.to_csv(path, index=False, sep='\\t')\n end_time = time.time()\n print(\"Time spent creating tree from csv file:\", end_time - start_time)\n return graph_bfs", "def create_all(graph,first_last_fn):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n #paths = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n #paths[trip_id] = p\n while p.next_line != len(graph.lines):#file_length:\n graph.trip_id2line_num[trip_id] = line_num\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n #trip_id = dg.normalize(lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n # paths[trip_id] = p\n graph.trip_id2line_num[trip_id] = line_num\n graph.num_trips = num_trips\n\n\n with open(first_last_fn,'wb') as output:\n pickle.dump(graph.first_last2trip_ids,output)\n\n with open('pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)\n #return paths", "def mapNighbours(self,name):\n \twith open(name, 'r', encoding='utf-8') as f:\n for line in f:\n li = line.split()\n item=li[0]\n nighbs=li[1:]\n self.NighboursMap[item]=nighbs", "def Save2Pajek(filepath, adjmatrix, labels=[], directed=False, weighted=False):\n # 0) SECURITY CHECK\n N = len(adjmatrix)\n if labels:\n if len(labels) != N:\n raise ValueError( \"List of labels not aligned with network size\" )\n\n # 1) OPEN THE TARGET FILE\n outfile = open(filepath, 'w')\n\n # 2) SAVE INFORMATION OF THE NODES\n print('*Vertices', N, file=outfile)\n\n # Wrie the list of nodes if labels have been given\n if labels:\n for i in range(N):\n # line = '%d \"%s\"' %(i+1, labels[i])\n line = '%d\\t\"%s\"' % (i + 1, labels[i])\n print(line, file=outfile)\n\n # 3) SAVE THE LINKS\n # Save the links AND their WEIGHTS\n if weighted:\n # 3.1) Find whether weights are integers or floats\n if adjmatrix[0, 0].dtype in [np.uint8, np.uint, np.int8, np.int]:\n formatstring = '%d %d %d'\n elif adjmatrix[0, 0].dtype in [np.float16, np.float32, np.float, np.float64]:\n formatstring = '%d %d %f'\n\n # 3.2) Save the ARCS if directed\n if directed:\n print('*Arcs', file=outfile)\n for i in range(N):\n neighbours = adjmatrix[i].nonzero()[0]\n for j in neighbours:\n line = formatstring % (i + 1, j + 1, adjmatrix[i, j])\n print(line, file=outfile)\n\n # 3.2) Save the EDGES, if undirected\n else:\n print('*Edges', file=outfile)\n for i in range(N):\n neighbours = adjmatrix[i].nonzero()[0]\n for j in neighbours:\n if j > i:\n line = formatstring % (i + 1, j + 1, adjmatrix[i, j])\n print(line, file=outfile)\n\n # Save ONLY the adjacency list\n else:\n formatstring = '%d %d'\n\n # 3.1) Save the ARCS if directed\n if directed:\n print('*Arcs', file=outfile)\n for i in range(N):\n neighbours = adjmatrix[i].nonzero()[0]\n for j in neighbours:\n line = formatstring % (i + 1, j + 1)\n print(line, file=outfile)\n\n # 3.1) Save the EDGES, if undirected\n else:\n print('*Edges', file=outfile)\n for i in range(N):\n neighbours = adjmatrix[i].nonzero()[0]\n for j in neighbours:\n if j > i:\n line = formatstring % (i + 1, j + 1)\n print(line, file=outfile)\n\n # 4) CLOSE FILE AND FINISH\n outfile.close()", "def create_path_new(self):\n\n \n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n nodes_visited = []\n nodes_visited.append([])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n prev_gps = (-1.0,-1.0)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n new_edges = self.find_edges((lat,lon),prev_gps)\n for add_edge in new_edges:\n edge_sets[matrices_index][add_edge] = 1\n else:\n edge_sets[matrices_index][edge_num] = 1\n\n if coords[0] == -1:\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n nodes_visited.append([])\n matrices_index += 1\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n normalized = normalize_simple(self.graph.lines[cur_line])\n prev_gps = (lat,lon)\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n #for coords in nodes_visited[best_index]:\n # self.graph.node_visit(self.trip_id,coords)\n\n #if self.trip_id not in self.graph.trip_id2line_num:\n # self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],first_lasts[best_index]", "def update_in_out1(filename):\r\n import shutil\r\n\r\n with open(filepath(filename, 'Edges'), 'r',\r\n encoding='utf8') as edge_file:\r\n edge_reader = csv.reader(edge_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n\r\n # edges = [l for l in edge_reader] # List of lists\r\n \r\n for predecessor, successor in edge_reader:\r\n chk_append_in_out1(successor, predecessor, 'Predecessors')\r\n chk_append_in_out1(predecessor, successor, 'Successors')\r\n\r\n listtocheck = os.listdir(os.path.abspath(\r\n '/home/cyneo/Work/Scans/Processed Data/Word Dictionary/')\r\n )\r\n\r\n for item in listtocheck:\r\n filename = os.path.abspath(\r\n '/home/cyneo/Work/Scans/Processed Data/Word Dictionary/' + item)\r\n tempfile = os.path.abspath(\r\n '/home/cyneo/Work/Scans/Processed Data/Word Dictionary/'\r\n + 'tmp ' + item)\r\n\r\n with open(filename, 'r', encoding='utf8') as word_file:\r\n file_reader = csv.reader(word_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n list_of_things = [thing[0] for thing in file_reader]\r\n set_of_things = set(list_of_things)\r\n \r\n with open(tempfile, 'w', encoding='utf8') as temp_file:\r\n temp_writer = csv.writer(temp_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n for item in set_of_things:\r\n temp_writer.writerow([item])\r\n \r\n shutil.move(tempfile, filename)", "def persistent_homology(self):\n\n def low(j, R):\n \"\"\"\n :return: maximum line index of the column j in the matrix R with a 1 in it\n \"\"\"\n if R[j] == []:\n return (-1)\n else:\n return (sorted(R[j])[-1])\n\n # low_j = 0\n # for k in range(j):\n # if R[k, j] == 1:\n # low_j = k\n # return (low_j)\n\n N = self.nbr_splxs\n self.homology_matrix = self.neighbours_matrix[:]\n n = self.nbr_0_splxs\n # initilize the low_j matrix\n self.low_j_to_j_list = N * [-1]\n # Apply the persistence algorithm\n j = 0\n while low(j, self.homology_matrix) == -1:\n j += 1\n self.low_j_to_j_list[low(j, self.homology_matrix)] = j\n j += 1\n while j < N:\n low_j = low(j, self.homology_matrix)\n j0 = self.low_j_to_j_list[low_j]\n while j0 != -1:\n self.homology_matrix[j] = self.sum_column(j, j0, self.homology_matrix)\n # self.homology_matrix[:j, j] = (self.homology_matrix[:j, j0] + self.homology_matrix[:j, j]) % 2\n low_j = low(j, self.homology_matrix)\n j0 = self.low_j_to_j_list[low_j]\n if low_j != -1:\n self.low_j_to_j_list[low_j] = j\n j += 1\n if j % 10 == 0:\n print(j / N)\n # for j in range(1, N):\n # test = True\n # while test:\n # test = False\n # for j0 in range(j):\n # if low(j0, self.homology_matrix) == low(j, self.homology_matrix) \\\n # and low(j0, self.homology_matrix) != 0:\n # self.homology_matrix[:j, j] = (self.homology_matrix[:j, j0] + self.homology_matrix[:j, j]) % 2\n # test = True\n # if j % 10 == 0:\n # print(np.log(j + 1) / np.log(N))\n\n for j in range(N):\n low_j = low(j, self.homology_matrix)\n if low_j != -1:\n # print(low_j,j)\n # self.pers_pairs_birth.append(self.dist_appearance[low_j])\n # self.pers_pairs_death.append(self.dist_appearance[j])\n if self.splxs[low_j][0] == 0:\n self.h0_birth.append(self.dist_appearance[low_j])\n self.h0_death.append(self.dist_appearance[j])\n print(low_j)\n else:\n self.h1_birth.append(self.dist_appearance[low_j])\n self.h1_death.append(self.dist_appearance[j])\n print(\"persistant homology achieved\")\n return ()", "def _build_ham(self):\n path = self._solverpath.long_tail()\n print(path)\n current, k = self.snake.head(), 0\n for direc in path:\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = direc\n current = current.adj(direc)\n k += 1\n # Process snake bodies\n current = self.snake.tail()\n for _ in range(self.snake.len() - 1):\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = self.snake.direc\n current = current.adj(self.snake.direc)\n k += 1", "def populate_link_cells(lc, xyz, Lx, Nx):\n N = len(xyz)\n for i in range(N):\n num = xyz[i] // Lx % Nx\n lc[id_from_coord(num, Nx)].append(i)", "def process_input(input_path):\n\n # Parse lines from input file into list\n with open(input_path, 'r') as input_file:\n lines = input_file.readlines()\n\n # Declare component lists and helper variables\n vertex_map = {} # Mapping of named vertices to indices, handles duplicate connections\n idx = 0\n edges = [] # List of (src, dst) tuples\n weights = [] # Weight of each edge\n\n for line in lines:\n # Parse each line of csv or text file\n if input_path.endswith('.csv'):\n parts = line.split(',')\n else:\n parts = line.split()\n\n # Add source vertex to list of vertices\n src = parts[0]\n if src not in vertex_map:\n vertex_map[src] = idx\n idx += 1\n\n # Add destination vertex to list of vertices\n dst = parts[1]\n if dst not in vertex_map:\n vertex_map[dst] = idx\n idx += 1\n\n # Add integer representation of edges to list of connections\n edges.append((vertex_map[src], vertex_map[dst]))\n weights.append(parts[2])\n\n # Get definite list of vertices\n vertices = vertex_map.keys()\n\n # Print graph information\n vprint(str(len(vertices)) + ' vertices')\n vprint(str(len(edges)) + ' edges')\n\n # Build IGraph representation of network\n graph = ig.Graph(edges, directed=False)\n graph.es['weight'] = [weights[e] for e in range(len(graph.es))]\n\n return graph, vertices", "def __name_vertices(vertices: \"List[Vertex]\"):\n # In H all names of vertices are saved\n H = []\n for v in vertices:\n # If the vertex is a leaf, the name 'lr' is assigned\n # The amount of automorphisms of the subtree of the leaf, with the leaf as root, is equal to 1\n if v.degree_fixed == 1:\n v.name = 'lr'\n v.auto = 1\n else:\n # The name of all other vertices is a sorted collection of the names of its children\n # At the same time, remember how many automorphisms the subtree of that vertex has, with that vertex as root\n children_names = []\n children_count = {}\n children_auto = {}\n for n in v.neighbours:\n # The neighbour of a vertex is a child if the level of that neighbour is larger than the level of the vertex\n if n.level > v.level:\n # If twins are removed, add the name also for each of the removed twins\n for _ in range(n.n_twins):\n children_names.append(n.name)\n children_count[n.name] = children_count.setdefault(n.name, 0) + 1\n # The amount of isomorphisms of the subtree with 'v' as its root is all combinations of isomorphisms\n # of all children of 'v' combined, so the amount of isomorphisms of the children must be multiplied\n # with each other\n children_auto[n.name] = children_auto.setdefault(n.name, 1) * n.auto\n\n # If children of 'v' could be mapped to each other, the amount of isomorphisms is increased by all\n # combinations of children that could be swapped. So for each set of children that could be mapped to each\n # other the amount of isomorphisms should be multiplied with factorial(amount of children that can be\n # mapped to each other)\n v_auto = 1\n for child_name in set(children_names):\n v_auto *= factorial(children_count[child_name]) * children_auto[child_name]\n v.auto = v_auto\n\n # To be able to compare the names of vertices, make sure the collection of children names is sorted\n # and than concatenate them to one string\n children_names.sort()\n v_name = 'l'\n for child_name in children_names:\n v_name += child_name\n v_name += 'r'\n v.name = v_name\n H.append(v.name)\n\n return H", "def write_hot_start_file(file_name, hot_start_list):\n with open(file_name, 'w') as mesh_file:\n for ht in hot_start_list:\n mesh_file.write('DATASET\\nOBJTYPE \"mesh2d\"\\n')\n if len(ht.values.columns) > 1:\n mesh_file.write('BEGVEC\\n')\n else:\n mesh_file.write('BEGSCL\\n')\n mesh_file.write('ND {}\\n'.format(len(ht.values)))\n mesh_file.write('NC {}\\n'.format(ht.number_of_cells))\n mesh_file.write('NAME \"{}\"\\n'.format(ht.name))\n mesh_file.write('TS 0 0\\n')\n mesh_file.write(ht.values.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n mesh_file.write('ENDDS\\n')", "def read_file_pathfinder(diagonals):\n file_name = 'maze.txt'\n graph = {}\n nodes = []\n\n with open(file_name) as file:\n lines = file.readlines()\n for row in range(len(lines)):\n line = lines[row].strip('\\n')\n node_row = []\n\n for col in range(len(line)):\n character = line[col]\n\n if character == '#':\n node_row.append(None)\n continue\n\n # updating start and end node positions\n pos = (row, col)\n node_row.append(PathNode(row, col))\n\n # adds paths to the graph\n graph = add_adj_to_graph(graph, pos, lines, diagonals)\n\n nodes.append(node_row)\n\n wid = len(nodes[0])\n hei = len(nodes)\n\n while True:\n start_col = end_col = random.randint(0, wid-1)\n start_row = end_row = random.randint(0, hei-1)\n if lines[start_row][start_col] == ' ':\n break\n\n min_dist = min(wid, hei)/2\n cur_dist = 0\n while cur_dist < min_dist:\n start_col = random.randint(0, wid-1)\n start_row = random.randint(0, hei-1)\n if lines[start_row][start_col] == '#':\n continue\n cur_dist = abs(start_row - end_row) + abs(start_col - end_col)\n\n for line in nodes:\n for node in line:\n if node is not None:\n node.update_dist_to_end((end_row, end_col))\n\n return graph, nodes, nodes[start_row][start_col], nodes[end_row][end_col]", "def write_and_filter_paths(self, source, target, relation, label, paths):\n file_dir = os.path.join(self.save_dir, relation + \"_\" + str(self.maximum_length) + \"_\" + str(self.remaining_percentage) + \"_\" + str(self.random_seed) + \".txt\")\n with open(file_dir, \"a\") as fh:\n fh.write(str(label) + \"\\t\" + str(source) + \"\\t\" + str(target) + \"\\t\")\n for pdx, path in enumerate(paths):\n if not self.include_entity:\n if len(path) == 1:\n continue\n for rdx, rel_idx in enumerate(path):\n fh.write(self.idx_to_relation[rel_idx])\n if rdx != len(path)-1:\n fh.write(\"|\")\n if pdx != len(paths)-1:\n fh.write(\"###\")\n else:\n if len(path) == 3:\n continue\n fh.write(self.idx_to_node[path[0]].get_name())\n fh.write(\"|\")\n for rdx in range(0, (len(path)-1)/2):\n fh.write(self.idx_to_relation[path[rdx*2+1]])\n fh.write(\"|\")\n fh.write(self.idx_to_node[path[rdx*2+2]].get_name())\n if rdx*2+2 != len(path)-1:\n fh.write(\"|\")\n if pdx != len(paths)-1:\n fh.write(\"###\")\n fh.write(\"\\n\")", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def _build_sparse_table(self):\n self._table = {}\n\n for p in self._jump_nodes:\n self._table[p.index()] = [self._tree.parent(p)] # table[p][0] = parent(p)\n\n l = 0\n while l < self._logsize:\n u = self._table[p.index()][l]\n\n if u is None:\n break\n\n if self._ind[u.index()] < self._pow[l]: # incomplete ladder\n break\n\n i = self._path[u.index()] # u belongs to path_i\n j = self._ind[u.index()] # path_i[j] = u\n w = self._ladders[i][j - self._pow[l]]\n self._table[p.index()].append(w)\n l += 1", "def write_edgelist(H, path, delimiter=\" \", encoding=\"utf-8\"):\n with open(path, \"wb\") as file:\n for line in generate_edgelist(H, delimiter):\n line += \"\\n\"\n file.write(line.encode(encoding))", "def append_step(path, neighbours_list):\n index = neighbours_list.index(8)\n directions = ['L', 'U', 'R', 'D']\n return path + directions[index]", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms == []:\n return\n \n row = len(rooms)\n column = len(rooms[0])\n visited = [[False for i in range(column)] for j in range(row)]\n def valid(row_index, column_index):\n if row_index < row and row_index >= 0 and column_index< column and column_index >= 0:\n return True\n return False\n \n \n def bfs_traverse(row_index, column_index, distance):\n if valid(row_index, column_index) == False or rooms[row_index][column_index] < distance:\n return\n else:\n # if rooms[row_index] [column_index] != -1 and rooms[row_index] [column_index] != 0:\n if distance < rooms[row_index][column_index]:\n\n rooms[row_index][column_index] = distance\n if rooms[row_index] [column_index] != -1:\n if valid(row_index+1, column_index):\n bfs_traverse(row_index+1, column_index, distance+1)\n if valid(row_index, column_index+1):\n bfs_traverse(row_index, column_index +1 , distance+1)\n if valid(row_index-1, column_index):\n bfs_traverse(row_index-1, column_index, distance+1)\n if valid(row_index, column_index-1):\n bfs_traverse(row_index, column_index-1, distance+1)\n \n \n for row_index in range(row):\n for column_index in range(column):\n if rooms[row_index][column_index] == 0:\n bfs_traverse(row_index, column_index, 0)", "def cluster_anls(cluster_labels,fingerprints,coords_id,coords_list):\n import xlwt\n workbook = xlwt.Workbook()\n '''Analyse the cluster each RP belongs to'''\n sheet1 = workbook.add_sheet(u'sheet_1',cell_overwrite_ok=True)\n sheet1.write(0,0,'RP Coordinate')\n sheet1.write(0,1,'Cluster labels')\n\n uniq_coords_id = np.unique(coords_id)\n i = 0\n for coord_id in uniq_coords_id:\n i+=1\n sheet1.write(i,0,str(tuple(coords_list.tolist()[int(coord_id)])))\n sheet1.write(i,1,str(np.unique(\\\n cluster_labels[np.where(coords_id == coord_id)]\\\n .tolist())))\n\n '''Analyse the set of RPs each cluster contains'''\n sheet2 = workbook.add_sheet(u'sheet_2',cell_overwrite_ok=True)\n sheet2.write(0,0,'Cluster')\n sheet2.write(0,1,'Set of coordinates')\n\n uniq_clst_labels = np.unique(cluster_labels)\n start = 1\n end = 1\n for clst_label in uniq_clst_labels:\n coord_set = coords_list[coords_id[np.where(cluster_labels == clst_label)],:].tolist()\n coord_set = list(set([tuple(coord) for coord in coord_set]))\n for j in range(len(coord_set)):\n end = start+j\n sheet2.write(end,1,str(coord_set[j]))\n sheet2.write_merge(start,end,0,0,str(clst_label))\n start = end+1\n workbook.save('./Data_Statistics/Rp_Cluster_Relation/rp_cluster_analysis.xls')\n print ('统计信息生成完毕!')", "def median_path(self,fl):\n nodes = {}\n edges = self.fl2prediction[fl]\n for i in range(len(edges)):\n if edges[i] == 1:\n node_tup = self.edge_index2tuple[i]\n nodes[node_tup[0]] = True\n nodes[node_tup[1]] = True\n fn_prefix = \"psdd/paths/may14/median_%d_%d_%d_%d\" % (self.rows,self.cols,fl[0],fl[1])\n out_fn = \"%s_coords.txt\" % fn_prefix\n with open(out_fn,'w') as outfile:\n for node in nodes.keys():\n outfile.write(\"%s,%s\\n\" % ('0',str(self.node2median[node])[1:-1]))" ]
[ "0.55288064", "0.52756536", "0.5219858", "0.5188992", "0.5051993", "0.5038808", "0.5035424", "0.5009456", "0.50076455", "0.4981707", "0.4980944", "0.49535435", "0.49374443", "0.4930359", "0.48791504", "0.48749682", "0.48330376", "0.48256785", "0.48253503", "0.480284", "0.4799396", "0.4797914", "0.47966322", "0.4781978", "0.47783983", "0.47715053", "0.47630218", "0.4760809", "0.47392908", "0.47213802" ]
0.565996
0
Generate a random input of n locations and write to fileName Each vertex is a home with probability p
def generate_random_input(n, p, fileName): max_x = 1000 L = [] H = [] E = [] x = [] #non negative x-coordinate of vertices for i in range(n): L.append('location' + str(i)) rand = round(random.random() * max_x) + 1 while rand in x: rand = round(random.random() * max_x) + 1 x.append(rand) for i in range(n): if random.random() < p and len(H) < n / 2: #vertex is a home with probability p H.append(i) for i in range(n): E.append([]) for j in range(0, i): E[i].append(abs(x[i] - x[j])) #E[i][j] = absolute value of difference in x-coordinates of vertex i and vertex j as weight to ensure triangular inequality E[i].append('x') #no self-edges for i in range(n): for j in range(i+1, n): E[i].append(E[j][i]) starting_index = int((random.random() * (len(L) - 1)) // 1) s = L[starting_index] print_input(L, E, H, s, fileName)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateRandomInput(filename, num_people, travel_db):\n import random\n routes = []\n for i in range(num_people):\n route = travel_db.randomRoute()\n route.insert(0,\"Person \" + str(i)) # Add a name for each route.\n routes.append(route)\n if FileHandler.writeRoutesCSV(filename,routes): # If it's successful writing the file\n print(\"File {0} created successfully with {1} people.\".format(filename, num_people))\n else:\n print(\"File {0} could not be created.\".format(filename))", "def gen_int(filename):\n random.seed()\n random.randint(-100,100)\n with open(filename, \"w\") as f:\n for i in range(1000):\n f.write(str(random.randint(-100,100)))\n f.write(\" \")\n # f.write(\"hello\")", "def generate_nums(filename, n):\n text = ''\n for i in range(n):\n num = random.randrange(0, 100)\n text += (str(num) + '\\n')\n f = open(filename, 'w')\n f.write(text)\n f.close()\n return", "def write_numbers(file_path):\n count = random.randint(20, 40)\n try:\n with open(file_path, 'w') as f:\n for _ in range(count):\n f.write(' '.join([str(x) for x in random.sample(range(10, 90), random.randint(4, 12))]))\n f.write('\\n')\n except Exception as err:\n print('Unexpected error:', err)", "def generate(count):\n lst = []\n with open('data.txt', 'w+') as f:\n for i in range(0, count):\n st = str(random.random())\n f.write(st+\"\\n\")\n lst.append(st)\n return lst", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def generate_random_testing(file_name, nb_points):\n file_name = _format_file_extension(file_name)\n acoustic_data = _generate_random_acoustic(nb_points)\n data = pd.DataFrame(acoustic_data, columns=[fmd.COLUMN_NAME[0]])\n data.to_csv(file_name, index=False)", "def generate_random_training(file_name, nb_points):\n file_name = _format_file_extension(file_name)\n acoustic_data = _generate_random_acoustic(nb_points)\n acoustic_data = np.concatenate((acoustic_data, np.ones((nb_points, 1))), axis=1)\n data = pd.DataFrame(acoustic_data, columns=fmd.COLUMN_NAME)\n data.to_csv(file_name, index=False)", "def generate_triangle(seed, num_points=200):\n points = {\n 0: 750,\n 750: 0,\n 1500: 751\n }\n random.seed(seed)\n while len(points) < num_points:\n y_coord = (random.randrange(500) or 1) + 200\n x_coord = random.randrange(round(y_coord*4/3)) + round((500 - y_coord)*(3/4)) + 400\n if (not points.get(x_coord)) and (x_coord != 750):\n points[x_coord] = y_coord\n\n os.makedirs(os.path.join(DATA_DIR, seed), exist_ok=True)\n filepath = os.path.join(DATA_DIR, '{}/triangle.node'.format(seed))\n\n # creates the input nodes used by triangle to create delauney graph\n with open(filepath, 'w') as node_file:\n header = \"{} 2 0 0\\n\".format(len(points))\n node_file.write(header)\n i = 1\n for x_coord, y_coord in points.items():\n node_file.write(\" {} {} {}\\n\".format(i, x_coord, y_coord))\n i += 1\n node_file.close()\n\n call(['triangle', '-e', filepath])", "def random(xl, xr, yl, yu, landclasses, cellsize=30, p=None, path=None):\n # Determine number of columns\n ncol = int(math.ceil((xr-xl)/cellsize)) + 1\n # Determine number of rows\n nrow = int(math.ceil((yu-yl)/cellsize)) + 1\n if path is None:\n path = os.getcwd()\n with open(os.path.join(path, 'p_struct.txt'), 'w') as fid:\n fid.write(str(p))\n return np.random.choice(landclasses, (nrow, ncol), True, p)", "def make_kosaraju(filename, number_of_nodes, number_of_clusters, smallest_degree):\n\n file = open(filename, 'w')\n tmp = generate_graph(number_of_nodes, number_of_clusters, smallest_degree)\n for i in tmp:\n for j in tmp[i]:\n file.write(\"{} {}\\n\".format(i, j))", "def _make_random_file(self, dir, num_chars=10000):\n filename = os.path.join(dir, \"f-%d\" % random.randint(1, 2**63 - 1))\n content = \"\".join([random.choice(\"0123456789abcdefghijklmnopqrstuvwxyz\\n\") for _ in range(num_chars)])\n with open(filename, \"w\") as f:\n f.writelines(content)\n return filename", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def generatePositivePHASLoci(options,whole_mapped_data,phase,cycle):\n out_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".positive_phase_loci\"\n fhw=open(out_filename,\"w\")\n for chromosome in sorted(whole_mapped_data):\n filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\"\n try:\n fhr=open(filename,\"r\")\n except FileNotFoundError:\n continue\n flag_reg=1000\n window_start,window_end=0,0\n for line in fhr:\n \"\"\"pvalue=float(line.strip().split()[-1])\n if pvalue>=options.pvalue_cutoff:continue\"\"\"\n register,start,end=map(int,line.strip().split()[:3])\n if register==flag_reg:\n if window_end>start:\n window_end=end\n else:\n fhw.write(chromosome+\"\\t\"+str(window_start)+\"\\t\"+str(window_end)+\"\\n\")\n window_start=start\n window_end=end\n else:\n if flag_reg!=1000:\n fhw.write(chromosome+\"\\t\"+str(window_start)+\"\\t\"+str(window_end)+\"\\n\")\n window_start=start\n window_end=end\n flag_reg=register\n fhr.close()\n fhw.write(chromosome+\"\\t\"+str(window_start)+\"\\t\"+str(window_end)+\"\\n\")\n fhw.close()", "def generate_file(name, size):\n print('=> Generating %s file' % name)\n with open(DATASET_DIR+name+DATASET_EXTENSION, 'wb+') as fout:\n fout.write(os.urandom(size))", "def generate(seq_sz, num, offset, filename):\n\n # generator of data\n label = '>test_rand_' \n\n gen_dna = lambda rng: (choice(symbols) \\\n for _ in range(seq_sz + randint(-(rng / 2), rng / 2)))\n data = ((gen_dna(offset)) for _ in range(num))\n\n # write generated data to a file\n with open(filename, 'w') as f:\n for i, t in enumerate(data):\n f.write(label + str(i) + '\\n')\n f.write(''.join(t) + '\\n')", "def generate_output(input_filename: str, output_filename: str, goal_node: Node,\n generated: set) -> None:\n\n input_stream = io.open(input_filename, 'r', encoding='utf-8', errors='ignore',\n newline='\\n')\n with open(output_filename, 'w') as out_file:\n for i in range(0, 10):\n out_file.write(input_stream.readline().rstrip())\n out_file.write('\\n')\n \"\"\" The first ten lines of the output file are identical to those in the \n input file. The tenth line should be skipped because it's blank.\"\"\"\n out_file.write(str(goal_node.path_cost) + '\\n')\n # Line 11 of the output, the depth level d\n out_file.write(str(len(generated)) + '\\n')\n # Line 12 of the output, the total number of nodes generated\n\n # Writing Line 13 of the output, the sequence of moves\n length = len(goal_node.path_history)\n for i in range(length - 1):\n out_file.write(goal_node.path_history[i] + ' ')\n out_file.write(goal_node.path_history[length - 1] + '\\n')\n\n # Writing Line 14 of the output, the f(n) values\n f_line = str(goal_node.f) + ' '\n parent = goal_node.parent\n while parent: # Loop stops when parent == None\n f_line += (str(parent.f) + ' ')\n parent = parent.parent\n f_list = f_line.split(' ')\n # Breaks down the string to the integers it contains\n reverse = ''\n for i in range(len(f_list) - 2, -1, -1):\n # f_line[len(f_line)-1] is an extra whitespace character and\n # thus shouldn't be copied\n reverse += str(f_list[i])\n if i != 0:\n reverse += ' '\n \"\"\" The order of the f(n) values in f_line is from goal node \n to root node. The four lines above reverse the order, which \n is what the output format expects.\"\"\"\n out_file.write(reverse)\n\n out_file.close()", "def generate_index(file_name):\n count = num_lines(file_name)\n index = random.randint(0, count - 1)\n return index", "def _prepare_input_file(self, filename, numlines, maxvalue):\n with open(filename, 'a') as f:\n for _ in range(numlines):\n f.write(str(randrange(maxvalue)) + '\\n')\n self.filepath = f.name", "def create_file_empty_particles( self, fullpath, iteration,\n time, dt, select_nglobal_dict=None ):\n # Create the file (can be done by one proc or in parallel)\n f = self.open_file( fullpath,\n parallel_open=self.write_metadata_parallel )\n\n # Setup the different layers of the openPMD file\n # (f is None if this processor does not participate is writing data)\n if f is not None:\n\n # Setup the attributes of the top level of the file\n self.setup_openpmd_file( f, iteration, time, dt )\n # Setup the meshes group (contains all the particles)\n f.attrs[\"particlesPath\"] = np.string_(\"particles/\")\n particle_path = \"/data/%d/particles/\" %iteration\n particle_grp = f.require_group(particle_path)\n # Loop through all particle species\n for species_name in sorted(self.species_dict.keys()):\n species = self.species_dict[species_name]\n\n # Check the number of particles to write\n if select_nglobal_dict is not None:\n N = select_nglobal_dict[species_name]\n else:\n N = None\n\n # Create and setup the h5py.Group species_grp\n species_path = particle_path+\"%s/\" %(species_name)\n species_grp = f.require_group( species_path )\n self.setup_openpmd_species_group( species_grp, species, N=N )\n\n # Loop over the different quantities that should be written\n # and setup the corresponding datasets\n for particle_var in self.particle_data:\n\n # Vector quantities\n if particle_var in [\"position\", \"momentum\", \"E\", \"B\"]:\n # Setup the dataset\n quantity_path=species_path+ \"%s/\" %particle_var\n quantity_grp = f.require_group(quantity_path)\n for coord in [\"x\",\"y\",\"z\"]:\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = quantity_grp.create_dataset(\n coord, (N,), dtype='f8')\n else:\n dset = quantity_grp.create_dataset(\n coord, (0,), maxshape=(None,), dtype='f8')\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( quantity_grp,\n particle_var)\n\n # Scalar quantity\n elif particle_var in [\"weighting\", \"id\", \"t\"]:\n # Choose the type of the output\n if particle_var == \"id\":\n dtype = 'uint64'\n else:\n dtype = 'f8'\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = species_grp.create_dataset(\n particle_var, (N,), dtype=dtype )\n else:\n dset = species_grp.create_dataset( particle_var,\n (0,), maxshape=(None,), dtype=dtype)\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( dset, particle_var )\n\n # Unknown field\n else:\n raise ValueError(\n \"Invalid string in particletypes: %s\" %particle_var)\n\n # Close the file\n f.close()", "def save_samples(samples, output_prefix=\"sample\"):\n\n for (i, vertices) in enumerate(samples):\n vertex_fname = \"{pref}{i}_vertices.ply\".format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == \"\":\n vertex_fname = \"./\" + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)", "def generate_submission_sample(map_path, sample_path):\n with open(sample_path, 'wb') as output:\n output.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n output.write('<osm>\\n ')\n\n # Write every 10th top level element\n for i, element in enumerate(get_element(map_path)):\n if i % 10 == 0:\n output.write(ET.tostring(element, encoding='utf-8'))\n\n output.write('</osm>')", "def generate_N_doping(path, N_graphitic, N_pyridinic, N_pyrrolic, filename1):\n global bond_list\n bond_list = bond_list_1 + bond_list_3\n atom_list = read_in_graphene(path)\n rings = find_rings(atom_list)\n bond_list = bond_list_1 + bond_list_3\n map_3, map_2, map_2n = filter_carbon_atoms(atom_list, rings)\n graphitic = N_graphitic \n pyridinic = N_pyridinic\n pyrrolic = N_pyrrolic\n attempt = len(atom_list) / 10\n choices = [1, 2, 3]\n while (((N_graphitic > 0) or (N_pyridinic > 0) or (N_pyrrolic > 0)) and (attempt > 0)):\n print(\"Left to add: \", \"N_graphitic \", N_graphitic, \"N_pyridinic \", N_pyridinic, \"N_pyrrolic \", N_pyrrolic)\n if (N_graphitic == 0):\n try:\n choices.remove(1)\n except:\n pass\n if (N_pyridinic == 0):\n try:\n choices.remove(2)\n except:\n pass\n if (N_pyrrolic == 0):\n try:\n choices.remove(3)\n except:\n pass\n choice = random.choice(choices)\n if (choice == 1):\n while ((N_graphitic > 0) and (len(map_3) > 0)):\n random_atom = random.choice(map_3)\n N_graphitic -= 1\n N = Atom(random_atom.atom_number, \"N3\", \"N3A\", str(graphitic - N_graphitic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 3) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") and ((identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CY\"))):\n for ring in rings:\n if (random_atom in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n try:\n atom_list.remove(random_atom)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n elif (choice == 2):\n while ((N_pyridinic > 0) and (len(map_2) > 0)): \n random_atom = random.choice(map_2)\n N_pyridinic -= 1\n N = Atom(random_atom.atom_number, \"N2\", \"N2A\", str(pyridinic - N_pyridinic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 2) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") ):\n found = False\n for ring in rings:\n if (random_atom in ring):\n found = True\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n if (found == False):\n try:\n map_3.remove(random_atom)\n except:\n pass\n try:\n map_2.remove(random_atom)\n except:\n pass\n try:\n map_2n.remove(random_atom)\n except:\n pass\n atom_list.remove(random_atom)\n atom_list.append(N)\n else:\n attempt -= 1\n else: \n attempt -= 1\n elif (choice == 3):\n while ((N_pyrrolic > 0) and (len(map_2n) > 0)):\n random_atom_1 = random.choice(map_2n)\n for neighbour in identify_bonds(random_atom_1, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n random_atom_2 = neighbour[0]\n break\n for ring in rings:\n if (random_atom_1 in ring):\n center_6 = {}\n center_6['x'] = 0\n center_6['y'] = 0\n center_6['z'] = 0\n center_4 = {}\n center_4['x'] = 0\n center_4['y'] = 0\n center_4['z'] = 0\n for atom in ring:\n center_6['x'] += atom.x\n center_6['y'] += atom.y\n center_6['z'] += atom.z\n if ((atom != random_atom_1) and (atom != random_atom_2)):\n center_4['x'] += atom.x\n center_4['y'] += atom.y\n center_4['z'] += atom.z\n center_6['x'] /= 6\n center_6['y'] /= 6\n center_6['z'] /= 6\n center_4['x'] /= 4\n center_4['y'] /= 4\n center_4['z'] /= 4\n N_pyrrolic -= 1\n p = 0.6\n limit = 0.3\n if ((-limit < center_4['x'] - center_6['x'] < limit) and (-limit < center_4['y'] - center_6['y'] < limit)): \n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n for ring in rings:\n if (random_atom_1 in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n for mol in identify_bonds(atom, atom_list):\n try:\n map_2n.remove(mol[0])\n except:\n pass\n try:\n atom_list.remove(random_atom_1)\n atom_list.remove(random_atom_2)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n attempt -= 1\n writepdb(atom_list, filename1)\n print(\"done.\")\n return 'done.'", "def output_files(self,positions, num_trials):\r\n output_text = open('results.txt', 'w')\r\n result = self.simulation(positions, num_trials)\r\n for pos in positions:\r\n position_value = 1000 / pos\r\n mean = np.mean(result[pos])\r\n std = np.std(result[pos])\r\n plt.hist(result[pos],100,range=[-1,1])\r\n plt.savefig(\"histogram_\"+str(pos).zfill(4)+\"_pos.pdf\")\r\n plt.close()\r\n output_text.write('For position : {0} with position Value: {1} '.format(pos,position_value))\r\n output_text.write(' The mean is: {0} The standard deviation: {1} \\n'.format(mean,std))\r\n output_text.close()", "def write_file(f_name, size, best, best_tour):\n new_file = open(\"Checking Tours/Genetic/TourfileA/tourNEW\"+f_name+\".txt\", 'w+')\n new_file.write(\"NAME = \" + f_name + \",\")\n new_file.write(\"\\nTOURSIZE = \" + str(size) + \",\")\n new_file.write(\"\\nLENGTH = \" + str(best) + \",\\n\")\n for i in range(size):\n new_file.write(str(best_tour[i]+1)+\",\")\n new_file.close()", "def generate_seed_file(kb_mapping, seed_file):\n r_file = open(kb_mapping, 'r')\n s_file = open(seed_file, 'w+')\n\n for line in r_file:\n values = line.strip().split(\"\\t\")\n relations = values[1].split(\" \")\n subsumptions = values[2].split(\" \")\n for subsumption in subsumptions:\n if subsumption == \"concept:relatedto\":\n continue\n for relation in relations:\n s_file.write(\"%s\\t%s\\t1.0\\n\" %(relation, subsumption))\n\n r_file.close()\n s_file.close()", "def generate_random_walks(num_walks, walk_length, workers, vertices):\r\n\r\n logging.info('Loading distances_nets on disk...')\r\n\r\n\r\n\r\n graphs = restore_variable_from_disk('distances_nets_graphs')\r\n\r\n alias_method_j = restore_variable_from_disk('nets_weights_alias_method_j')\r\n\r\n alias_method_q = restore_variable_from_disk('nets_weights_alias_method_q')\r\n\r\n amount_neighbours = restore_variable_from_disk('amount_neighbours')\r\n\r\n\r\n\r\n logging.info('Creating RWs...')\r\n\r\n t0 = time()\r\n\r\n\r\n\r\n walks = deque()\r\n\r\n\r\n\r\n if workers > num_walks:\r\n\r\n workers = num_walks\r\n\r\n\r\n\r\n with ProcessPoolExecutor(max_workers=workers) as executor:\r\n\r\n futures = {}\r\n\r\n for walk_iter in range(num_walks):\r\n\r\n random.shuffle(vertices)\r\n\r\n job = executor.submit(exec_random_walks_for_chunk, vertices, graphs, alias_method_j, alias_method_q,\r\n\r\n walk_length, amount_neighbours)\r\n\r\n futures[job] = walk_iter\r\n\r\n logging.info(\"Receiving results...\")\r\n\r\n for job in as_completed(futures):\r\n\r\n walk = job.result()\r\n\r\n r = futures[job]\r\n\r\n logging.info(\"Iteration {} executed.\".format(r))\r\n\r\n walks.extend(walk)\r\n\r\n del futures[job]\r\n\r\n\r\n\r\n t1 = time()\r\n\r\n logging.info('RWs created. Time: {}m'.format((t1 - t0) / 60))\r\n\r\n logging.info(\"Saving Random Walks on disk...\")\r\n\r\n save_random_walks(walks)", "def generateData(N, k):\n n = float(N) / k\n X = []\n for i in range(k):\n c = (random.uniform(-1, 1), random.uniform(-1, 1))\n s = random.uniform(0.05, 0.5)\n x = []\n while len(x) < n:\n a, b = np.array([np.random.normal(c[0], s), np.random.normal(c[1], s)])\n # Continue drawing points from the distribution in the range [-1,1]\n if abs(a) < 1 and abs(b) < 1:\n x.append([a, b])\n X.extend(x)\n X = np.array(X)[:N]\n # Write list to file for later use\n f = open('./dataset_N' + str(N) + '_K' + str(k) + '.txt', 'w')\n for x in X:\n f.write(str(x[0]) + \" \" + str(x[1]) + '\\n')\n\n f.close();\n\n return X", "def generate(self):\n super().generate()\n records = random.random((self._dimension, self._dimension, self.num_samples))\n record_labels = [0] * self.num_samples\n prev_out_spec =\"\"\n count = 0\n for i in range(0, int(self.num_files)):\n if i % self.comm_size == self.my_rank:\n progress(i+1, self.num_files, \"Generating NPZ Data\")\n out_path_spec = \"{}_{}_of_{}.npz\".format(self._file_prefix, i, self.num_files)\n if count == 0:\n prev_out_spec = out_path_spec\n if self.compression != Compression.ZIP:\n np.savez(out_path_spec, x=records, y=record_labels)\n else:\n np.savez_compressed(out_path_spec, x=records, y=record_labels)\n count += 1\n else:\n copyfile(prev_out_spec, out_path_spec)", "def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)" ]
[ "0.67131954", "0.62345254", "0.6159704", "0.61301774", "0.6034666", "0.5965056", "0.58956575", "0.5811915", "0.5804421", "0.5698664", "0.5690434", "0.5683697", "0.56731087", "0.56668985", "0.5614566", "0.55891496", "0.5583429", "0.5572517", "0.55605453", "0.5547967", "0.5526433", "0.55072814", "0.5501364", "0.54855424", "0.5474924", "0.547097", "0.54624915", "0.5460049", "0.5458242", "0.5454548" ]
0.77192765
0
Gets the feature of this PaymentConnectorFeature.
def feature(self): return self._feature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, feature_name):\n return getattr(self, feature_name)", "def get(cls, feature_name):\n try:\n return cls.feature_registry[feature_name]\n except KeyError:\n raise FeatureNotFound(feature_name)", "def config(self, feature):\n return self._config.get(feature, base_config.BaseConfig())", "def get_bluetooth(self):\n return self._bluetooth", "def get_feature(self, feature: FeatureName) -> FeatureInfo:\n if feature not in self.feature_map:\n state = FeatureState.Unsupported\n else:\n state = self.feature_map[feature]\n return FeatureInfo(state=state)", "def _get_feature(self, layer_name):\n if (\n layer_name in self.config[\"layers\"]\n and \"feature\" in self.config[\"layers\"][layer_name]\n ):\n return self.config[\"layers\"][layer_name][\"feature\"]\n else:\n return 0", "def get(self, name):\n try:\n return(self._d_features[name])\n except:\n log.error(\"Can't get feature '%s'\" % name)\n return", "def get_feature(self, feature_name: FeatureName) -> interface.FeatureInfo:\n if feature_name == FeatureName.PushUpdates:\n # Multiple protocols can register a push updater implementation, but only\n # one of them will ever be used (i.e. relaying is not done on method\n # level). So if at least one push updater is available, then we can return\n # \"Available\" here.\n if self._push_updater_relay.count >= 1:\n return interface.FeatureInfo(FeatureState.Available)\n if feature_name in self._feature_map:\n return self._feature_map[feature_name][1].get_feature(feature_name)\n return interface.FeatureInfo(FeatureState.Unsupported)", "def getFeatureData(self, feature):\n return self.data[:,self._getFIdx(feature)]", "def getFeatureGenerator(self):\n\t\treturn self.feature_generator", "def get_feature_flag(self, account, flag, signing_account=None):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, signing_account,\n \"conveyor.get_feature_flag\",\n [account['name'], flag])", "def featureByName(self, name):\n for feature in self.features:\n if feature.name == name:\n return feature\n return None", "def getFeature(self, featureName):\n # loop through all the existing features\n for feature in self.features:\n # when we have a match with the name\n if featureName == feature.name:\n # return the value in the solution\n return feature\n # feature doesn't exist\n return None", "def get_feature_of(self, name, field_):\n return self.get_person(name)[field_]", "def get_feature(\n self,\n ) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_feature\" not in self._stubs:\n self._stubs[\"get_feature\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature\",\n request_serializer=featurestore_service.GetFeatureRequest.serialize,\n response_deserializer=feature.Feature.deserialize,\n )\n return self._stubs[\"get_feature\"]", "def get_currency_feature_value(self):\n curr_feature = AssetFeature.Standard.CURRENCY.get_object()\n return AssetFeatureValue.objects.get_or_create(name=self.currency, feature=curr_feature)[0]", "def TableFeatureConfig(self):\n\t\treturn self._get_attribute('tableFeatureConfig')", "def as_feature(self) -> Feature:\n if self.geometry:\n feat = Feature(geometry=self.geometry, attributes=self._get_feature_attributes())\n else:\n feat = Feature(attributes=self._get_feature_attributes())\n return feat", "def __call__(self, feature):\n return self.is_enabled(feature)", "def get(self, feature, reference):\n keyindex = self.rows[feature]\n refindex = self.columns[reference]\n return self.data[refindex][keyindex]", "def getChild(self):\n return self.features[0]", "def feature_set(self) -> Optional[pulumi.Input['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")", "def get_feature_by_name(self, feature_name):\n feature_index = self.feature_name_index.get(feature_name,-1)\n if feature_index > -1:\n return self.features[feature_index]\n else:\n logger.error(\"{} does not exist!\".format(feature_name))\n return None", "def get_features(self):\n return self._features", "def get_feature(self, wf, feature_name):\n\n # 1) Do logging - NYI\n # What is the easiest way to initialize without forcing a init super call?\n # NOTE: We could also support returning all depdencies, in which we get\n # the dependencies of the parent and add those as well\n if hasattr(self, 'dependencies'):\n self.dependencies.append(feature_name)\n else:\n self.dependencies = [feature_name]\n\n # 2) Make the call to WormFeatures\n # Note, we call wf.get_features rather than the spec to ensure that wf\n # is aware of the new features that have been computed\n return wf._get_and_log_feature(feature_name, internal_request=True)", "def get_gateway(self):\n return self.gateway", "def feature_set(self) -> pulumi.Output[Optional['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")", "def project(self, feature):\n return feature", "def get_other_features(self):\n return self.other_features", "def get_region_feature_value(self):\n return self._get_region_feature(self.region.name)" ]
[ "0.63052654", "0.58349454", "0.58330095", "0.5801297", "0.57597953", "0.574794", "0.56846553", "0.5595365", "0.5569588", "0.55606323", "0.5526605", "0.550877", "0.5505963", "0.55038595", "0.548417", "0.5483593", "0.54776543", "0.54644096", "0.53197587", "0.53013575", "0.52925277", "0.52119666", "0.5206888", "0.5201887", "0.5177826", "0.5169276", "0.51236594", "0.5114398", "0.5090802", "0.5081027" ]
0.7268537
0
Sets the feature of this PaymentConnectorFeature.
def feature(self, feature): self._feature = feature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_cur_feature(self, feature):\n self.cur_feature = feature", "def setFeature(self, name, value=1):\n self.features[self.featureSet.getId(self.tag+name)] = value", "async def async_set_features(self, features):\n self._features = features", "def attach_feature(self, feature):\r\n\r\n # Filter out literally identical features\r\n if feature in self._features:\r\n return # the feature is already present\r\n\r\n # Filter out functionally identical features.\r\n # Features may use their on_attach method to raise\r\n # toolbox.AlreadyThere if they detect that some\r\n # installed feature does the same thing already\r\n attach = getattr(feature, 'on_attach', None)\r\n if attach is not None:\r\n try:\r\n attach(self)\r\n except toolbox.AlreadyThere:\r\n return\r\n self.execute_callbacks_times.setdefault(feature, 0)\r\n #it would be nice if we could require a specific class instead of\r\n #a \"workalike\" so we could do actual error checking\r\n #if not isinstance(feature, toolbox.Feature):\r\n # raise TypeError(\"Expected gof.toolbox.Feature instance, got \"+\\\r\n # str(type(feature)))\r\n\r\n # Add the feature\r\n self._features.append(feature)", "def set_features(self, features: list):\n self._features = features", "def update_feature(self, dataset, fid, feature):\n uri = URITemplate(\n self.baseuri + '/{owner}/{did}/features/{fid}').expand(\n owner=self.username, did=dataset, fid=fid)\n return self.session.put(uri, json=feature)", "def set_features(self, features: np.ndarray):\n self.features = features", "def __setitem__(self, feature, value):\n setattr(self, feature, value)", "def set_features(self, features):\n self.features_ = list(features)", "def set(self, X, y, feature_name):\n pass", "def _setStringFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureStringSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)", "def features(self, features):\n\n self._features = features", "def register_feature(self, feature_name):\n self.disco_info.add_feature(feature_name)", "def _setIntFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureIntSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)", "def _set(self, driver: AbstractHasFeatures, value: Any):\n with driver.lock:\n set_chain(self, driver, value)", "def _setBoolFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureBoolSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)", "def set_flag(self, set_flag):\n\n self._set_flag = set_flag", "def enable_feature(self, feature_name, callback=None):\n logger.info(\"enable_feature {} called\".format(feature_name))\n self.feature_enabled[feature_name] = True\n\n def pipeline_callback(call):\n if call.error:\n # TODO we need error semantics on the client\n exit(1)\n if callback:\n callback()\n\n self._pipeline.run_op(\n pipeline_ops_base.EnableFeature(feature_name=feature_name, callback=pipeline_callback)\n )", "def integration_setting(self, integration_setting):\n\n self._integration_setting = integration_setting", "def __delattr__(self, feature):\n setattr(self, feature, None)", "def __init__(__self__, *,\n feature_set: Optional[pulumi.Input['OrganizationFeatureSet']] = None):\n if feature_set is not None:\n pulumi.set(__self__, \"feature_set\", feature_set)", "def provide(self, feature, provider, suspend_callable=False, *args, **kwargs):\n if not self.allow_replace:\n assert feature not in self.providers, \"Duplicate feature: %r\" % feature\n if callable(provider) and not suspend_callable:\n def call():\n return provider(*args, **kwargs)\n else:\n def call():\n return provider\n self.providers[feature] = call", "def add(self, feature):\n \n if self.bo is not None:\n feature.attach(self.bo)\n \n bo_feature_name = feature.name\n \n if bo_feature_name not in self._d_features:\n log.info(\"Add feature '%s'\" % feature)\n self._d_features[bo_feature_name] = feature\n return(True)\n else:\n log.error(\"Feature name '%s' ever exists - you must delete it before\" % bo_feature_name)\n return(False)", "def register(cls, feature_name, feature):\n if feature_name in cls.feature_registry:\n raise FeatureAlreadyRegistered(feature_name)\n cls.feature_registry[feature_name] = feature", "def featuresets(self, featuresets):\n\n self._featuresets = featuresets", "def delete_feature(self, feature):\r\n cmd = DeleteFeatureCommand(self._delete_feature, self._set_features, self._features, feature)\r\n self.get_invoker().store_and_execute(cmd)", "def advanced_features(self, advanced_features):\n\n self._advanced_features = advanced_features", "def set_feature_mask(self, feature_mask):\n self.feature_mask = feature_mask", "def set_frame(self, frame_next):\n self.featureDesA = self.featureDesB\n self.featureFrameA = self.featureFrameB\n self.frameA = self.frameB\n self.frameB = frame_next", "def setHotplug(self, hotplug):\n # type: (bool)->None\n\n msg = \"hotplug key will be renamed into allow-hotplug in 4.0\"\n warnings.warn(msg, DeprecationWarning)\n\n self._validator.validate_one(\n 'hotplug', VALID_OPTS['hotplug'], hotplug)\n self._ifAttributes['hotplug'] = hotplug" ]
[ "0.61582905", "0.5667619", "0.5464145", "0.5331207", "0.52944213", "0.5291586", "0.52516097", "0.52505726", "0.52294904", "0.521491", "0.51534677", "0.51327854", "0.51170415", "0.5108358", "0.50498706", "0.50097615", "0.4988337", "0.4987762", "0.49632746", "0.49425036", "0.49282613", "0.49124885", "0.488279", "0.4862397", "0.48435387", "0.48284522", "0.4807565", "0.47711462", "0.47484973", "0.4708661" ]
0.7179533
0
repo_url is formatted as AUTHOR/REPO user and passwd required to have 5000 request limit
def get_pullReq(repo_url, user, passwd): #auth for 5000 request/h limitprint("\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT") if user=='' or passwd=='': user = input('username : ') passwd = input('passwd : ') #repo url github_pullReq_url = "https://api.github.com/repos/{}/pulls?state=all&per_page=100&page=" url = github_pullReq_url.format(repo_url) #fetch all pages pullReq = [] i=1 eop = False while not eop: print("\n\nFECTHING PAGE {}".format(i)) data = get_requests(url+str(i), user, passwd) pullReq = pullReq + data i+=1 if len(data) != 100: eop = True return pullReq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pullReq_commits(pullreq_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #fetch 250 max commits\n pullReq_commits = get_requests(pullreq_url, user, passwd)\n\n return pullReq_commits", "def test_repo_config_userpass_port() -> None:\n password = \"pa55word\" # noqa: S105\n repo = RepositoryConfiguration(\n name=\"pypi\",\n base_url=\"https://private.repo.org:8080/pypi\",\n username=\"fred\",\n password=password,\n )\n assert (\n repo.get_access_url() == f\"https://fred:{password}@private.repo.org:8080/pypi\"\n )", "def main():\n# logging.basicConfig(level=logging.DEBUG)\n try:\n user = sys.ARGV[1]\n except:\n user = 'hmm01i'\n repos = getRepos(user)\n print(\"%i Personal Repos\" % len(repos))\n logging.debug(repos)\n #print(\"Repo,[size, last update]\")\n #for k in repos.keys():\n # print(str(k),repos[k])", "def get_forks(repo_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #repo url\n github_fork_url = \"https://api.github.com/repos/{}/forks?sort=stargazers&per_page=100&page=\"\n url = github_fork_url.format(repo_url)\n\n #fetch all pages\n forks = []\n i=1\n eop = False\n while not eop:\n print(\"\\n\\nFECTHING PAGE {}\".format(i))\n data = get_requests(url+str(i), user, passwd)\n forks = forks + data\n i+=1\n if len(data) != 100:\n eop = True\n \n #reject private ones\n temp = forks\n for fork in temp:\n if fork['private'] == True:\n forks.remove(fork)\n print(\"{} private forks\".format(len(temp)-len(forks)))\n\n return forks", "def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response", "def make_req_url(user, repo, endpoint, limit=50, queries=None):\n url = \"%s%s/%s/%s\" % (API_BASE_URL, user, repo, endpoint)\n\n # Set limit is given and is above 50, set limit to 50\n if limit and limit > 50:\n limit = 50\n url += \"?limit=%d\" % limit\n\n # Add additional query parameters\n if queries:\n for key in queries:\n url += \"&%s=%s\" % (key, queries[key])\n return url", "def test_repo_config_userpass() -> None:\n password = \"pa55word\" # noqa: S105\n repo = RepositoryConfiguration(\n name=\"pypi\",\n base_url=\"https://private.repo.org/pypi\",\n username=\"fred\",\n password=password,\n )\n assert repo.get_access_url() == f\"https://fred:{password}@private.repo.org/pypi\"", "def test_repo_config_token_port() -> None:\n token = \"clksd88sadh4HhJ\" # noqa: S105\n repo = RepositoryConfiguration(\n name=\"pypi\",\n base_url=\"https://private.repo.org:8080/pypi\",\n username=\"fred\",\n token=token,\n )\n assert repo.get_access_url() == f\"https://{token}@private.repo.org:8080/pypi\"", "def main():\n\n # get all repos a user has access to\n gh = Github(options.username, options.pat)\n user = gh.get_user()\n # filter for those under the user account\n userrepos = {\n repo.name : repo.git_url for repo in user.get_repos() \\\n if repo.git_url.startswith(\"git://github.com/\" + options.username)\n }\n # create a backup dir\n dirname = datetime.today().strftime(\"%Y%m%d-%H%M%S\")\n os.makedirs(\"./backup/\" + dirname)\n # clone all user repos\n for k, v in userrepos.items():\n url = \"https://\" + options.pat + \"@\" + v.removeprefix(\"git://\")\n subprocess.check_call([\n \"git\",\n \"clone\",\n url,\n \"./backup/\" + dirname + \"/\" + k\n ])", "def __init__(self, user, proj):\n auth_hdr = {\"Authorization\" : \"token \" + input(\"Enter PA token: \")}\n self._session = requests.Session()\n self._session.headers.update(auth_hdr)\n self._base = self.API_ROOT + \"/repos/{}/{}\".format(user, proj)", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def test_repo_config_token() -> None:\n token = \"clksd88sadh4HhJ\" # noqa: S105\n repo = RepositoryConfiguration(\n name=\"pypi\", base_url=\"https://private.repo.org/pypi\", token=token,\n )\n assert repo.get_access_url() == f\"https://{token}@private.repo.org/pypi\"", "def get_repo_options(account, **kwargs):\n client = AsyncHTTPClient()\n uri = \"https://api.github.com/user/repos?per_page=100\"\n data = []\n while uri is not None:\n req = account.get_request(uri, headers={\"Accept\": \"application/vnd.github.moondragon+json\"})\n response = yield client.fetch(req)\n response_object = json.loads(response.body.decode('utf-8'))\n data += response_object\n links = parse_link_header(response.headers.get('Link', ''))\n uri = links.get('next', None)\n return [{\"title\": repo['full_name'], \"value\": repo['full_name']}\n for repo in data]", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def clone_repo(start=0,end=100000):\n repo_list=repo_url['URLs']\n count=0\n\n for url in repo_list[start:end]:\n url=str(url)\n name=url.rsplit('/', 2) #get the repo name (last 2 part) of the repository url\n last=name[-2]+'-'+name[-1]\n try:\n if not os.path.exists(last):\n os.mkdir(last) #Make folder for a repo if it does not exist\n repo=str(url) + '.git'\n folder= r'repos'\n Repo.clone_from(repo,last)\n count+=1\n print('cloned ' , repo)\n except:\n continue\n return count", "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "def __init__(self, repo_url, creds, branch, repo_path=None, validate=True):\n parsed = urlparse(repo_url)\n self.scheme = parsed.scheme\n self.hostname = parsed.hostname\n self.org, self.repo = parsed.path.strip('/').split('/')\n self.creds = creds\n self.branch = branch\n self.repo_path = repo_path\n self.git_repo = None\n self.validate = validate", "def repo_info():\n return TEST_REPOS_INFO[0]", "def digest_repo(repo_url, GProfile):\n r = gf.get_request('%s' % repo_url)\n if r.ok:\n repoItems = json.loads(r.text or r.content)\n\n signal.signal(signal.SIGALRM, timeout_handler)\n for item in repoItems:\n signal.alarm(10) # skip file if takes more than 10 seconds\n\n try:\n if item['type'] == 'file' and item['name'][-3:] == '.py':\n GProfile.n_pyfiles += 1\n print(item['download_url'])\n get_metrics_per_file(item, GProfile)\n elif item['type'] == 'dir':\n digest_repo(item['url'], GProfile)\n except TimeoutException:\n print('%s timed out, skipping!' % item['download_url'])", "def test_repo_config_basic_port() -> None:\n repo = RepositoryConfiguration(name=\"pypi\", base_url=\"https://pypi.org:443/pypi\")\n assert repo.get_access_url() == \"https://pypi.org:443/pypi\"\n assert repo.name == \"pypi\"", "def run(self, repo_url):\n\n # only supports git.door43.org\n print('* Checking the repository URL...', end=' ')\n if 'git.door43.org' not in repo_url:\n self.errors.append('Only git.door43.org repositories are supported.')\n print('')\n return False\n\n # get gogs user and repository name\n pos = repo_url.find('https://git.door43.org/')\n if pos != 0:\n self.errors.append('Invalid repository URL: {0}'.format(repo_url))\n print('')\n return False\n\n parts = filter(bool, repo_url[23:].split('/'))\n if len(parts) != 2:\n self.errors.append('Not able to determine user and project: {0}'.format(repo_url))\n print('')\n return False\n\n gogs_user = parts[0]\n repo_name = parts[1]\n print('finished.')\n\n # get most recent commit\n print('* Getting the most recent commit...', end=' ')\n commits_html = get_url(join_url_parts(repo_url, 'commits', 'master'))\n\n # parse the dom\n commits_dom = BeautifulSoup(commits_html, 'html.parser')\n commit_row = commits_dom.body.find('table', {'id': 'commits-table'}).find('tbody').find('tr')\n if not commit_row:\n self.errors.append('Commit data was not found for {0}'.format(repo_url))\n\n # commit values: 0=author, 1=sha_and_message, 2=date\n commit_values = commit_row.find_all('td')\n sha_a_tag = commit_values[1].find('a')\n short_sha = sha_a_tag.text\n print('finished.')\n\n # check the meta data\n\n # if not tS, check the usfm directory (1 file per book)\n\n # if tS, check the chapter directories (1 directory per chapter, 1 file per chunk)\n\n # check live.door43.org\n live_url = join_url_parts('https://live.door43.org/u', gogs_user, repo_name, short_sha)\n\n # first, check if the page exists\n print('* Verifying that the output file exists...', end=' ')\n try:\n get_url(live_url)\n except HTTPError as err:\n self.errors.append('Not able to open {0}, {1}'.format(live_url, str(err)))\n print('')\n return False\n print('finished.')\n\n # next, validate the HTML\n print('* Validating the generated HTML...', end=' ')\n validator_url = 'https://validator.nu/?out=json&charset=UTF-8&parser=html5&doc={0}'.format(\n urllib.quote(live_url))\n friendly_url = 'https://validator.nu/?charset=UTF-8&parser=html5&doc={0}'.format(\n urllib.quote(live_url))\n validator_results = json.loads(get_url(validator_url))\n\n html_warnings = [m for m in validator_results['messages'] if m['type'] == 'info' and m['subType'] == 'warning']\n if html_warnings:\n for html_warning in html_warnings:\n self.warnings.append('HTML Validation Warning: {0}'.format(html_warning['message']))\n self.warnings.append('For details check {0}'.format(friendly_url))\n\n html_errors = [m for m in validator_results['messages'] if m['type'] == 'error']\n if html_errors:\n for html_error in html_errors:\n self.errors.append('HTML Validation Error: {0}'.format(html_error['message']))\n self.errors.append('For details check {0}'.format(friendly_url))\n print('')\n return False\n print('finished.')\n\n return True", "def repo_url(self, repo_url):\n\n self._repo_url = repo_url", "def n_public_repos(gh, user):\n return getuser(gh, user).public_repos", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def change_config(self, repo):\n with repo.config_writer() as config:\n url = ('https://' + str(self.user.username) + ':' +\n str(self.get_user_token()) + '@github.com/' +\n str(self.user.username) + '/' + self.repo + '.git')\n config.set_value('remote \"origin\"', 'url', url)\n config.set_value('user', 'email', '[email protected]')\n config.set_value('user', 'name', 'Ranvir Singh')\n return config", "def svc_protected_repo(svc_client, identity_headers, it_protected_repo_url):\n from renku.core.models.git import GitURL\n\n payload = {\n \"git_url\": it_protected_repo_url,\n \"depth\": 0,\n }\n\n response = svc_client.post(\"/cache.project_clone\", data=json.dumps(payload), headers=identity_headers)\n\n data = {\n \"project_id\": response.json[\"result\"][\"project_id\"],\n \"skip_template_update\": True,\n \"skip_docker_update\": True,\n }\n svc_client.post(\"/cache.migrate\", data=json.dumps(data), headers=identity_headers)\n\n url_components = GitURL.parse(it_protected_repo_url)\n\n with integration_repo(identity_headers, response.json[\"result\"][\"project_id\"], url_components) as repo:\n with _mock_cache_sync(repo):\n yield svc_client, identity_headers, payload, response", "def get_repos_user(user='xmonader'):\n u = ghclient.get_user(login=user)\n repos = u.get_repos()\n repos_list = []\n for i in range(20):\n page = repos.get_page(i)\n if len(page) == 0:\n break\n repos_list.extend(repos.get_page(i))\n return repos_list" ]
[ "0.7222348", "0.6270832", "0.61279446", "0.61235946", "0.60410225", "0.60330355", "0.59770346", "0.5941654", "0.5870572", "0.5806155", "0.575964", "0.5754485", "0.5734286", "0.56632453", "0.56577796", "0.56416154", "0.5631557", "0.56242985", "0.5606981", "0.56014514", "0.55845004", "0.5526618", "0.5483853", "0.545851", "0.54471195", "0.5445943", "0.54407525", "0.5437581", "0.5431455", "0.53846824" ]
0.72966975
0
pullreq_url is from the dictionnary outputted by get_pullreq dict['_links']['commits']['href'] user and passwd required to have 5000 request limit
def get_pullReq_commits(pullreq_url, user, passwd): #auth for 5000 request/h limitprint("\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT") if user=='' or passwd=='': user = input('username : ') passwd = input('passwd : ') #fetch 250 max commits pullReq_commits = get_requests(pullreq_url, user, passwd) return pullReq_commits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pullReq(repo_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #repo url\n github_pullReq_url = \"https://api.github.com/repos/{}/pulls?state=all&per_page=100&page=\"\n url = github_pullReq_url.format(repo_url)\n\n #fetch all pages\n pullReq = []\n i=1\n eop = False\n while not eop:\n print(\"\\n\\nFECTHING PAGE {}\".format(i))\n data = get_requests(url+str(i), user, passwd)\n pullReq = pullReq + data\n i+=1\n if len(data) != 100:\n eop = True\n \n return pullReq", "def infocalypse_pull(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n if opts['hash']:\n # Use FMS to lookup the uri from the repo hash.\n if opts['uri'] != '':\n ui_.warn(\"Ignoring --uri because --hash is set!\\n\")\n if len(opts['hash']) != 1:\n raise util.Abort(\"Only one --hash value is allowed.\")\n params['FMSREAD_HASH'] = opts['hash'][0]\n params['FMSREAD_ONLYTRUSTED'] = bool(opts['onlytrusted'])\n request_uri = get_uri_from_hash(ui_, repo, params, stored_cfg)\n else:\n request_uri = opts['uri']\n\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --uri option.\\n\")\n return\n\n params['REQUEST_URI'] = request_uri\n # Hmmmm... can't really implement rev.\n execute_pull(ui_, repo, params, stored_cfg)", "def get_pull_requests():\n pull_requests = []\n url_base = f\"https://github.com/{GITHUB_OWNER}/{GITHUB_REPO}/pull/\"\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n pulls = repo.get_pulls(base=\"main\", state=\"closed\")\n last_release_date = repo.get_latest_release().published_at\n for pull in pulls:\n if not pull.draft and pull.closed_at > last_release_date and pull.merged:\n log_line = f\"* {pull.title} [#{pull.number}]({url_base}{pull.number})\"\n pull_requests.append(log_line)\n return pull_requests", "def do_the_pulls(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp_dir:\n path = os.path.join(tmp_dir, \"{}_{}_pulls.txt\".format(repo_id, user_id)\n )\n\n # the first request for pull\n the_url = get_initial_url_pulls(user_id, repo_id)\n resp_obj = requests.get(the_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n\n # prase the initial request.\n rsp_json = json.loads(resp_obj.text)\n print(\"the len of resp is {}\".format(len(rsp_json)))\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n\n # subsequent requests for pull\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))", "def get_pull_requests_count(self):\n repo_details = self.repo_url.strip().split('/')[-2:]\n pull_requests = 0\n i = 1\n while True:\n args = {'state': 'open', 'page': i, 'per_page': 100}\n api_url = \"https://api.github.com/repos/{}/{}/pulls?{}\".format(repo_details[0], repo_details[1],\n urllib.parse.urlencode(args))\n response = requests.request(\"GET\", api_url)\n response = json.loads(response.content)\n if not response:\n return pull_requests\n else:\n pull_requests += len(response)\n i += 1", "def pull_requests_model(self, entry_info, repo_id):\n github_url = entry_info['given']['github_url']\n\n logging.info('Beginning collection of Pull Requests...\\n')\n logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\\n')\n record_model_process(self, repo_id, 'pull_requests')\n\n owner, repo = self.get_owner_repo(github_url)\n\n url = (f'https://api.github.com/repos/{owner}/{repo}/pulls?state=all&' +\n 'direction=asc&per_page=100&page={}')\n\n # Get pull requests that we already have stored\n # Set pseudo key (something other than PK) to \n # check dupicates with\n table = 'pull_requests'\n table_pkey = 'pull_request_id'\n update_col_map = {'pr_src_state': 'state'} \n duplicate_col_map = {'pr_src_id': 'id'}\n\n #list to hold pull requests needing insertion\n prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, \n where_clause='WHERE repo_id = {}'.format(repo_id),\n value_update_col_map={'pr_augur_contributor_id': float('nan')})\n\n # Discover and remove duplicates before we start inserting\n logging.info(\"Count of pull requests needing update or insertion: \" + str(len(prs)) + \"\\n\")\n\n for pr_dict in prs:\n\n pr = {\n 'repo_id': repo_id,\n 'pr_url': pr_dict['url'],\n 'pr_src_id': pr_dict['id'],\n 'pr_src_node_id': None,\n 'pr_html_url': pr_dict['html_url'],\n 'pr_diff_url': pr_dict['diff_url'],\n 'pr_patch_url': pr_dict['patch_url'],\n 'pr_issue_url': pr_dict['issue_url'],\n 'pr_augur_issue_id': None,\n 'pr_src_number': pr_dict['number'],\n 'pr_src_state': pr_dict['state'],\n 'pr_src_locked': pr_dict['locked'],\n 'pr_src_title': pr_dict['title'],\n 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']),\n 'pr_body': pr_dict['body'],\n 'pr_created_at': pr_dict['created_at'],\n 'pr_updated_at': pr_dict['updated_at'],\n 'pr_closed_at': pr_dict['closed_at'],\n 'pr_merged_at': pr_dict['merged_at'],\n 'pr_merge_commit_sha': pr_dict['merge_commit_sha'],\n 'pr_teams': None,\n 'pr_milestone': pr_dict['milestone']['title'] if pr_dict['milestone'] else None,\n 'pr_commits_url': pr_dict['commits_url'],\n 'pr_review_comments_url': pr_dict['review_comments_url'],\n 'pr_review_comment_url': pr_dict['review_comment_url'],\n 'pr_comments_url': pr_dict['comments_url'],\n 'pr_statuses_url': pr_dict['statuses_url'],\n 'pr_meta_head_id': None,\n 'pr_meta_base_id': None,\n 'pr_src_issue_url': pr_dict['issue_url'],\n 'pr_src_comments_url': pr_dict['comments_url'], # NOTE: this seems redundant\n 'pr_src_review_comments_url': pr_dict['review_comments_url'], # this too\n 'pr_src_commits_url': pr_dict['commits_url'], # this one also seems redundant\n 'pr_src_statuses_url': pr_dict['statuses_url'],\n 'pr_src_author_association': pr_dict['author_association'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API'\n }\n\n if pr_dict['flag'] == 'need_insertion':\n logging.info(f'PR {pr_dict[\"id\"]} needs to be inserted\\n')\n\n result = self.db.execute(self.pull_requests_table.insert().values(pr))\n logging.info(f\"Added Pull Request: {result.inserted_primary_key}\")\n self.pr_id_inc = int(result.inserted_primary_key[0])\n\n elif pr_dict['flag'] == 'need_update':\n result = self.db.execute(self.pull_requests_table.update().where(\n self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr))\n logging.info(\"Updated tuple in the pull_requests table with existing pr_src_id: {}\".format(\n pr_dict['id']))\n self.pr_id_inc = pr_dict['pkey']\n\n else:\n logging.info(\"PR does not need to be inserted. Fetching its id from DB\")\n pr_id_sql = s.sql.text(\"\"\"\n SELECT pull_request_id FROM pull_requests\n WHERE pr_src_id={}\n \"\"\".format(pr_dict['id']))\n\n self.pr_id_inc = int(pd.read_sql(pr_id_sql, self.db).iloc[0]['pull_request_id'])\n\n self.query_labels(pr_dict['labels'], self.pr_id_inc)\n self.query_pr_events(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_pr_comments(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc)\n self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc)\n\n logging.info(f\"Inserted PR data for {owner}/{repo}\")\n self.results_counter += 1\n\n register_task_completion(self, entry_info, repo_id, 'pull_requests')", "def pull_request_commits_model(self, task_info, repo_id):\n\n # query existing PRs and the respective url we will append the commits url to\n pr_url_sql = s.sql.text(\"\"\"\n SELECT DISTINCT pr_url, pull_requests.pull_request_id\n FROM pull_requests--, pull_request_meta\n WHERE repo_id = {}\n \"\"\".format(repo_id))\n urls = pd.read_sql(pr_url_sql, self.db, params={})\n\n for pull_request in urls.itertuples(): # for each url of PRs we have inserted\n commits_url = pull_request.pr_url + '/commits?page={}'\n table = 'pull_request_commits'\n table_pkey = 'pr_cmt_id'\n duplicate_col_map = {'pr_cmt_sha': 'sha'}\n update_col_map = {}\n\n # Use helper paginate function to iterate the commits url and check for dupes\n pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, \n where_clause=\"where pull_request_id = {}\".format(pull_request.pull_request_id))\n\n for pr_commit in pr_commits: # post-pagination, iterate results\n if pr_commit['flag'] == 'need_insertion': # if non-dupe\n pr_commit_row = {\n 'pull_request_id': pull_request.pull_request_id,\n 'pr_cmt_sha': pr_commit['sha'],\n 'pr_cmt_node_id': pr_commit['node_id'],\n 'pr_cmt_message': pr_commit['commit']['message'],\n # 'pr_cmt_comments_url': pr_commit['comments_url'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API',\n }\n result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row))\n logging.info(f\"Inserted Pull Request Commit: {result.inserted_primary_key}\\n\")\n\n register_task_completion(self, task_info, repo_id, 'pull_request_commits')", "def get_pr_data_for_repo(owner, repo, client_id, client_secret):\n prs = []\n is_last = False\n url_params = {'owner': owner, 'repo': repo}\n url_pat = 'https://api.github.com/repos/%(owner)s/%(repo)s/pulls'\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'state': 'closed',\n }\n next_url = None\n first = True\n resp = requests.get(url_pat % url_params, params=payload)\n while not is_last:\n # Request next_url if this is not the first request\n if first:\n first = False\n else:\n resp = requests.get(next_url)\n print(resp.url, file=sys.stderr)\n\n # Abort if the return is an error\n out = resp.json()\n if 'message' in out:\n pprint.pprint(out, file=sys.stderr)\n raise Exception(resp.text)\n\n # Process the PRs\n for pr in resp.json():\n if pr['merged_at']:\n # Record the PR\n pr_obj = PullRequest(owner, repo, pr)\n prs.append((owner, repo, pr_obj.number, pr_obj))\n\n # Process the links and get the next URL\n links = get_links(resp.headers['Link'])\n next_url = links.get('next')\n is_last = next_url is None\n\n prs.sort()\n return prs", "def get_forks(repo_url, user, passwd):\n \n #auth for 5000 request/h limitprint(\"\\nINPUT GITHUB AUTH TO GET BETTER REQUEST LIMIT\")\n if user=='' or passwd=='':\n user = input('username : ')\n passwd = input('passwd : ')\n\n #repo url\n github_fork_url = \"https://api.github.com/repos/{}/forks?sort=stargazers&per_page=100&page=\"\n url = github_fork_url.format(repo_url)\n\n #fetch all pages\n forks = []\n i=1\n eop = False\n while not eop:\n print(\"\\n\\nFECTHING PAGE {}\".format(i))\n data = get_requests(url+str(i), user, passwd)\n forks = forks + data\n i+=1\n if len(data) != 100:\n eop = True\n \n #reject private ones\n temp = forks\n for fork in temp:\n if fork['private'] == True:\n forks.remove(fork)\n print(\"{} private forks\".format(len(temp)-len(forks)))\n\n return forks", "def get_pull_request(project, num, github_api=3):\r\n if github_api==2 :\r\n url = \"http://github.com/api/v2/json/pulls/{project}/{num}\".format(project=project, num=num)\r\n elif github_api == 3:\r\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\r\n response = requests.get(url)\r\n response.raise_for_status()\r\n if github_api == 2 :\r\n return json.loads(response.text)['pull']\r\n return json.loads(response.text)", "async def get_fetch_updates_for(github: GitHubAPI):\n if (limit := await remaining(github)) is None:\n return None\n\n if limit - RATE_LIMIT_THRESHOLD <= CALLS_PR_REPOSITORY:\n return 0\n return math.floor((limit - RATE_LIMIT_THRESHOLD) / CALLS_PR_REPOSITORY)", "async def pr(ctx, number: Option(int, \"Pull request number\")):\n url = f\"{repo}/issues/{number}\"\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"View Pull Request\", url=url))\n await ctx.respond(f\"Here's a link\", view=view)", "def make_req_url(user, repo, endpoint, limit=50, queries=None):\n url = \"%s%s/%s/%s\" % (API_BASE_URL, user, repo, endpoint)\n\n # Set limit is given and is above 50, set limit to 50\n if limit and limit > 50:\n limit = 50\n url += \"?limit=%d\" % limit\n\n # Add additional query parameters\n if queries:\n for key in queries:\n url += \"&%s=%s\" % (key, queries[key])\n return url", "def pullrequest(self, number):\r\n return pullrequests.PullRequest(self, number)", "def create_pull_requests(self, repos, key, msrp, summary, cred_hash, qa_title):\n response = {'status': True, 'data': []}\n\n for repo in repos:\n repo_name = repo['repositoryName']\n reviewed_branch = repo['reviewedBranch']\n base_branch = repo['baseBranch']\n\n json_data = {\n \"title\": qa_title,\n \"description\": summary,\n \"state\": \"OPEN\",\n \"open\": True,\n \"closed\": False,\n \"fromRef\": {\n \"id\": f\"refs/heads/{reviewed_branch}\",\n \"repository\": {\n \"slug\": repo_name,\n \"name\": None,\n \"project\": {\n \"key\": self.code_cloud_api.project_name\n }\n }\n },\n \"toRef\": {\n \"id\": f\"refs/heads/{base_branch}\",\n \"repository\": {\n \"slug\": repo_name,\n \"name\": None,\n \"project\": {\n \"key\": self.code_cloud_api.project_name\n }\n }\n },\n \"locked\": False,\n \"reviewers\": [],\n \"links\": {\"self\":[None]}\n }\n\n url = f'{self.code_cloud_api.branch_api}/{repo_name}/pull-requests'\n pull_response = self.code_cloud_api.post_json(\n url=url, \n json_data=json_data, \n cred_hash=cred_hash\n )\n\n if not pull_response['status']:\n response['data'].append({\n 'error': pull_response['data']['errors'][0]['message'],\n 'repo': repo_name\n })\n else:\n response['data'].append({\n 'link': pull_response['data']['links']['self'][0]['href'],\n 'repo': repo_name\n })\n\n return response", "async def _get_pull_requests_for_issue(self, session: ClientSession, issue: dict, data: dict) -> dict:\n url = self._build_url('dev-status/1.0/issue/detail')\n async with session.get(url, params=data) as resp:\n response = await resp.json()\n response['detail'][0].update(issue)\n return response", "def github_issue(issue_url: str, check_rate_limit: bool = True) -> Dict[str, Any]:\n user_agent = os.environ.get('THUMBSUP_USER_AGENT', 'thumbsup')\n headers = {\n 'Accept': GITHUB_ACCEPT_HEADER,\n 'User-Agent': user_agent,\n }\n github_token_file = os.environ.get('THUMBSUP_GITHUB_TOKEN_FILE')\n if github_token_file is not None:\n with open(github_token_file, 'r') as ifp:\n github_token = ifp.read().strip()\n if github_token != UNAUTHENTICATED and github_token != '':\n headers['Authorization'] = f'token {github_token}'\n\n if check_rate_limit:\n r = requests.get(GITHUB_RATE_LIMIT_URL, headers=headers)\n rate_limit_response = r.json()\n remaining_rate_limit = (\n rate_limit_response.get('resources', {}).get('core', {}).get('remaining', 0)\n )\n rate_limit_bound = os.environ.get('THUMBSUP_RATE_LIMIT_BOUND', '10')\n rate_limit_bound = int(rate_limit_bound)\n if remaining_rate_limit < rate_limit_bound:\n raise RateLimitError(f'Remaining GitHub rate limit too low: {remaining_rate_limit}')\n\n match = RE_GITHUB_ISSUE_EXTRACTOR.search(issue_url)\n if not match:\n raise SummaryError(f'URL does not match form of link to GitHub Issue: {issue_url}')\n\n owner, repo, issue_number = match.groups()\n\n issue_url = f'https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}'\n r = requests.get(issue_url, headers=headers)\n issue_response = r.json()\n\n comments_url = f'{issue_url}/comments'\n r = requests.get(comments_url, headers=headers)\n comment_response = r.json()\n comment_response.sort(\n key=lambda c: github_num_reactions(c) + github_num_positive_reactions(c),\n reverse=True\n )\n return {\n 'summarizer': 'github_issue',\n 'issue': issue_response,\n 'comments': comment_response,\n 'emojis': GITHUB_EMOJIS\n }", "async def get_pull_requests(self, issues: list) -> list:\n tasks = []\n async with ClientSession(auth=BasicAuth(*self.auth)) as session:\n for issue in issues:\n data = {\n 'issueId': issue['id'],\n 'applicationType': 'bitbucket',\n 'dataType': 'pullrequest',\n }\n tasks.append(\n asyncio.create_task(\n self._get_pull_requests_for_issue(session, issue, data),\n ),\n )\n\n pull_requests = await asyncio.gather(*tasks)\n\n return self._parser.parse_pull_request_info(pull_requests)", "def getRelevantPRData():\n prInfoFromAPI = getPRsFromAPI()\n diffHeader = headers.copy()\n diffHeader['Accept'] = \"application/vnd.github.v3.diff\"\n textForReviewPRs = []\n\n for PR in prInfoFromAPI:\n labels = [label[\"name\"] for label in PR['labels']]\n if \"Text for Review\" in labels:\n diffResponse = requests.get(PR[\"url\"], headers=diffHeader)\n diff = diffResponse.text\n # Add the info the list\n textForReviewPRs.append({\n \"pull_request_link\": PR[\"html_url\"],\n \"diff\": diff\n })\n if int(diffResponse.headers[\"X-RateLimit-Remaining\"]) <= 2:\n print('GitHub api rate limit will be exceeded; the GITHUB_TOKEN env variable needs to be set.')\n break\n return textForReviewPRs", "def on_pull_request(self, payload):\n pass", "def getPullRequest(upstream, ref, user=None, fork=None, target=None):\r\n\r\n if user is not None:\r\n user = user.login\r\n\r\n for p in upstream.get_pulls():\r\n # Check candidate request against specified criteria\r\n if p.head.ref != ref:\r\n continue\r\n\r\n if user is not None and p.head.user.login != user:\r\n continue\r\n\r\n if fork is not None and p.head.repo.url != fork.url:\r\n continue\r\n\r\n if target is not None and p.base.ref != target:\r\n continue\r\n\r\n # If we get here, we found a match\r\n return p\r\n\r\n # No match\r\n return None", "def get_open_pull_requests(self, required_labels=None):\n json = self.get_all('repos/%(owner)s/%(repo)s/pulls' % {\n 'owner': self.repo_owner,\n 'repo': self.repo_name,\n }, params={\n 'direction': 'asc',\n })\n required_labels = required_labels or []\n\n prs = []\n for pr_data in json:\n # Construct the pull request and issue data structures\n pr_id = pr_data['number']\n issue_obj = self.get_issue(pr_id)\n pr_obj = PullRequest(pr_id, pr_data['title'], pr_data['body'], pr_data['head']['ref'],\n pr_data['html_url'], pr_data['created_at'], pr_data['updated_at'], issue_obj)\n\n # Check if the PR survives the filters\n include_pr = True\n for required_label in required_labels:\n if not pr_obj.has_label(required_label):\n include_pr = False\n break\n if include_pr:\n prs.append(pr_obj)\n\n return prs", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def getPRsFromAPI():\n PRs = []\n pageNumber = 1\n numberOfPRsReturned = 1\n while numberOfPRsReturned != 0:\n PRsResponse = requests.get(\n \"https://api.github.com/repos/greenelab/covid19-review/pulls?state=all&per_page=50&page=\" +\n str(pageNumber), headers=headers)\n PRs_page = json.loads(PRsResponse.text)\n PRs = PRs + PRs_page\n numberOfPRsReturned = len(PRs_page)\n pageNumber += 1\n return PRs", "def remote_pull(*keys):", "def get_api_urls(user, repo, endpoint, start, limit=50):\n req_urls = []\n queries = {}\n queries['start'] = start\n count = 0\n stop = start/limit\n while count <= stop:\n new_url = make_req_url(user, repo, endpoint, limit, queries)\n req_urls.append(new_url)\n queries['start'] -= limit\n count += 1\n return req_urls", "def get_pr_info(num):\r\n url = \"https://api.github.com/repos/edx/edx-platform/pulls/{num}\".format(num=num)\r\n username, token = get_github_creds()\r\n headers = {\r\n \"Authorization\": \"token {}\".format(token),\r\n \"User-Agent\": \"edx-release\",\r\n }\r\n response = requests.get(url, headers=headers)\r\n result = response.json()\r\n if not response.ok:\r\n raise requests.exceptions.RequestException(result[\"message\"])\r\n return result", "def test_get_repositories_by_username_by_repo_slug_pullrequests_by_pull_request_id_statuses(self):\n pass", "def get_pull(self, pull_number):\n url = self.base_url + 'pulls/%s' % pull_number\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "def save_pull_requests(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':pullRequests:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n pull_requests = u.pullRequests(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n pull_requests = u.pullRequests(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not pull_requests:\n return False\n\n while True:\n if pull_requests['data']['user']['pullRequests']['edges']:\n index = ''.join(['gh_pull_requests-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubPullRequests',\n document=pull_requests,\n login=user.login,\n path=path)\n has_next_page = pull_requests['data']['user']['pullRequests']['pageInfo']['hasNextPage']\n end_cursor = pull_requests['data']['user']['pullRequests']['pageInfo']['endCursor']\n if has_next_page:\n pull_requests = u.pullRequests(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':pullRequests:endCursor']), end_cursor)\n break\n else:\n break\n\n return True" ]
[ "0.79000103", "0.61164", "0.6008621", "0.5987483", "0.59653175", "0.592727", "0.57375395", "0.56855536", "0.5679855", "0.5627813", "0.5618178", "0.5520393", "0.54909617", "0.5481987", "0.5375036", "0.53679514", "0.5272469", "0.52530247", "0.52414536", "0.5240988", "0.5225295", "0.5207643", "0.5205368", "0.5192983", "0.5185096", "0.5174777", "0.51732564", "0.5167139", "0.5105339", "0.5073475" ]
0.81588095
0
Builds the instance based on the spec, loading images from image_dir. Return the instance, for example with self.load(name)
def create(self, spec, force_cache=False, image_dir="~/.hyperkit"):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cls, path):\n assert os.path.exists(path), \"No such file: %r\" % path\n\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n image = Image(None)\n image._path = path\n image._format = Image.image_format(extension)\n\n return image", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def from_images(cls, specimen_id, image_files):\n views = [LoadingView(v, img) for v, img in image_files]\n return cls(specimen_id, views)", "def __init__(self, train_path='train/image', label_path='train/label', merge_path='train/merge', aug_merge_path='train/aug_merge', aug_train_path='train/aug_train', aug_label_path='train/aug_label', img_type=\"nii\"):\n\n\t\tself.train_imgs = glob.glob(\"/*.\"+img_type)\n\t\tself.label_imgs = glob.glob(\"/*.\"+img_type)\n\t\tself.train_path = train_path\n\t\tself.label_path = label_path\n\t\tself.merge_path = merge_path\n\t\tself.img_type = img_type\n\t\tself.aug_merge_path = aug_merge_path\n\t\tself.aug_train_path = aug_train_path\n\t\tself.aug_label_path = aug_label_path\n\t\tself.slices = len(self.train_imgs)\n\t\tself.datagen = ImageDataGenerator(\n\t\t\t\t\t\t\t rotation_range=0.2,\n\t\t\t\t\t\t\t width_shift_range=0.05,\n\t\t\t\t\t\t\t height_shift_range=0.05,\n\t\t\t\t\t\t\t shear_range=0.05,\n\t\t\t\t\t\t\t zoom_range=0.05,\n\t\t\t\t\t\t\t horizontal_flip=True,\n\t\t\t\t\t\t\t fill_mode='nearest')", "def __init__(self, train_path=\"./data/train/image\", label_path=\"./data/train/label\",\n\t\t\t\t merge_path=\"./data/train/merge\", aug_merge_path=\"./data/train/aug_merge\", \n\t\t\t\t aug_train_path=\"./data/train/aug_images\", \n\t\t\t\t aug_label_path=\"./data/train/aug_masks\", img_type=\"tif\"):\n\n\t\tself.train_imgs = glob.glob(train_path+\"/*.\"+img_type)\n\t\tself.label_imgs = glob.glob(label_path+\"/*.\"+img_type)\n\t\tself.train_path = train_path\n\t\tself.label_path = label_path\n\t\tself.merge_path = merge_path\n\t\tself.img_type = img_type\n\t\tself.aug_merge_path = aug_merge_path\n\t\tself.aug_train_path = aug_train_path\n\t\tself.aug_label_path = aug_label_path\n\n\t\tif not os.path.exists(merge_path):\n\t\t\tos.mkdir(merge_path)\n\t\t\tos.mkdir(aug_merge_path)\n\t\t\tos.mkdir(aug_train_path)\n\t\t\tos.mkdir(aug_label_path)\n\n\t\tself.slices = len(self.train_imgs)\n\t\tself.datagen = ImageDataGenerator(\n\t\t\t\t\t\t\t\t\tpreprocessing_function=self.preprocess,\n\t\t\t\t\t\t\t\t\trotation_range=0.2,\n\t\t\t\t\t\t\t\t\twidth_shift_range=0.1,\n\t\t\t\t\t\t\t\t\theight_shift_range=0.1,\n\t\t\t\t\t\t\t\t\tshear_range=0.05,\n\t\t\t\t\t\t\t\t\tzoom_range=0.05,\n\t\t\t\t\t\t\t\t\thorizontal_flip=True,\n\t\t\t\t\t\t\t\t\tfill_mode='nearest')", "def __init__(self, generate_image_pyramid: bool = True):\n if not os.path.exists(MANIFEST_OUTPUT_DIR):\n os.makedirs(MANIFEST_OUTPUT_DIR)\n self._manifest_factory = ManifestFactory()\n self._manifest_factory.set_base_prezi_uri(MANIFEST_BASE_URL)\n self._manifest_factory.set_base_prezi_dir(MANIFEST_OUTPUT_DIR)\n self._manifest_factory.set_base_image_uri(IMAGE_BASE_URL)\n self._manifest_factory.set_iiif_image_info(2.0, 1) # Version, ComplianceLevel\n\n self._image_reader = ImageReader(IMAGE_SOURCE_DIR)\n\n if generate_image_pyramid:\n self._tile_generator = IIIFStatic(dst=IMAGE_FILE_OUTPUT_DIR, prefix=IMAGE_BASE_URL)\n\n self._generate_images = generate_image_pyramid", "def load(cls, path, name, **kwargs):\n path = Path(path)\n assert path.exists() and path.is_dir(), f\"Load location {path} doesnt exist.\"\n\n pickle_path = path / (name + \".pkl\")\n image_path = path / (name + \"_image.npy\")\n depths_path = path / (name + \"_depths.npy\")\n\n if pickle_path.is_file():\n with open(pickle_path, 'rb') as pickle_file:\n return dill.load(pickle_file)\n\n assert image_path.is_file(), \"_image.npy file must exist if pickle doesnt.\"\n img = np.load(image_path)\n\n if depths_path.is_file():\n kwargs[\"depths\"] = np.load(depths_path)\n else:\n assert (\n \"top\" in kwargs.keys() and \"base\" in kwargs.keys()\n ), \"Depth info needed.\"\n\n return cls(img, **kwargs)", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def __init__(self, data_dir):\n self.data_dir = data_dir\n\n # reading in the images present\n self.files = os.listdir(self.data_dir)", "def __init__(self, width, height):\n\n self._width = width\n self._height = height\n \n # The images in the cache!\n self._images = {} # {filename : bitmap}\n\n return", "def __init__(self, path, margin=25, folder=\"output\"):\n self.image = Image.open(path)\n self.width, self.height = self.image.size\n self.path = path\n self.margin = margin\n self.output_path = os.path.join(os.path.dirname(self.path),\n folder,\n os.path.basename(self.path))", "def __init__(self,\n directory,\n train=True,\n imsize=(256, 256),\n num_channels=3,\n scale=True,\n invert_white_images=True):\n\n # Sets all attributes.\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n values.pop(\"self\")\n for arg, val in values.items():\n setattr(self, arg, val)\n\n self.IMG_MAX = 255.0\n\n data_pattern = os.path.join(directory, \"**/images/*.png\")\n\n self.metadata_ = []\n self.masks_ = []\n self.metadata_columns = [\"image_id\", \"orig_shape\"]\n\n self.data_ic_ = ImageCollection(data_pattern)", "def get_loader(config, image_path, crop_size, batch_size,sampler, num_workers=2, mode='train', augmentation_prob=0.4):\r\n\r\n dataset = ImageFolder(config)\r\n data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, sampler=sampler)\r\n \r\n return data_loader", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n self.images = []\n self.display_match = False\n self.useBlending = False\n print('found %d images' % len(self.files))", "def get_loader(image_dir, attr_path, selected_attrs, crop_size=178, image_size=128, \n batch_size=16, dataset='CelebA', mode='train', num_workers=0):\n transform = []\n if mode == 'train':\n transform.append(T.RandomHorizontalFlip())\n transform.append(T.CenterCrop(crop_size))\n transform.append(T.Resize(image_size))\n transform.append(T.ToTensor())\n transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\n transform = T.Compose(transform)\n\n if dataset == 'CelebA':\n dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)\n elif dataset == 'RaFD':\n dataset = ImageFolder(image_dir, transform)\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode=='train'),\n num_workers=num_workers)\n return data_loader", "def get_image(self, image_dir_root=None):\n image_dir_root = image_dir_root or image_dir\n return Image.open(os.path.join(image_dir_root, self.name))", "def __init__(self, metadata_folder='./'):\n self.metadata = self.load_metadata(metadata_folder)\n self.prefix = 'data/miap/images/'\n return", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def __init__(\n self,\n raw_img_dir: Path,\n ground_truth_dir: Path,\n pattern: str = \"\",\n transform=None,\n target_transform=None,\n ):\n pattern += \"*.tif\"\n self.raw_img_names = sorted(raw_img_dir.glob(pattern))\n self.ground_truth_names = sorted(ground_truth_dir.glob(pattern))\n assert len(self.raw_img_names) == len(self.ground_truth_names)\n self.transform = transform\n self.target_transform = target_transform", "def __init__(self, detector):\n self.base_dir = os.path.join(os.getcwd(), cfg.local[\"BASE_DB\"])\n self.images_dir = os.path.join(self.base_dir, cfg.local[\"IMG_DIR\"])\n self.X_filename = os.path.join(self.base_dir, cfg.data[\"X_NAME\"])\n self.y_filename = os.path.join(self.base_dir, cfg.data[\"y_NAME\"])\n self.le_filename = os.path.join(self.base_dir, cfg.models[\"LE_NAME\"])\n self.detector = detector\n\n if not os.path.exists(self.base_dir):\n os.mkdir(self.base_dir)\n\n if not os.path.exists(self.images_dir):\n os.mkdir(self.images_dir)\n\n #Load basic information here\n self.__initDataFromImages() #Init before load\n self.__loadPreProcessedData()", "def load_image(self, **kwargs):\n ...", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n print('found %d images' % len(self.files))", "def __init__(\n self,\n root: str = folder,\n train_subset: bool = True,\n suffix: str = '.png',\n min_num_cls: int = 5,\n max_num_cls: int = 20,\n k_shot: int = 20,\n expand_dim: bool = False,\n load_images: bool = True\n ) -> None:\n self.root = os.path.join(root, 'images_background' if train_subset else 'images_evaluation')\n self.suffix = suffix\n self.min_num_cls = min_num_cls\n self.max_num_cls = max_num_cls\n self.k_shot = k_shot\n self.expand_dim = expand_dim\n self.load_images = load_images\n\n # create a nested dictionary to store data\n self.data = dict.fromkeys(list_dir(root=self.root))\n for alphabet in self.data:\n self.data[alphabet] = dict.fromkeys(list_dir(root=os.path.join(self.root, alphabet)))\n\n # loop through each alphabet\n for character in self.data[alphabet]:\n self.data[alphabet][character] = []\n\n # loop through all images in an alphabet character\n for img_name in list_files(root=os.path.join(self.root, alphabet, character), suffix=suffix):\n if self.load_images:\n # load images\n img = _load_image(img_url=os.path.join(self.root, alphabet, character, img_name), expand_dim=self.expand_dim)\n else:\n img = img_name\n\n self.data[alphabet][character].append(img)", "def load_from_file(self, filename):\n\n loader = ImageLoader()\n loader.load(self, filename)", "def __init__(self, benchmark_name, image_set_name, dataset_path, patch_width, patch_height, cache_path=None):\n self.image_set_name = image_set_name\n self.name = benchmark_name + '_' + image_set_name + '_w{}xh{}'.format(patch_width, patch_height)\n self.dataset_path = dataset_path\n if cache_path:\n self._cache_path = cache_path\n else:\n self._cache_path = dataset_path\n\n self.patch_width = patch_width\n self.patch_height = patch_height\n\n # abstract attributes\n # self.image_set_index = []\n self.num_images = 0", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def _load(self, pkgpart, part_dict):\n # call parent to do generic aspects of load\n super(Image, self)._load(pkgpart, part_dict)\n # set file extension\n self.__ext = posixpath.splitext(pkgpart.partname)[1]\n # return self-reference to allow generative calling\n return self", "def image_loader(image_name, dev):\n image = Image.open(image_name)\n image = loader(image).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image.to(dev) #assumes that you're using GPU", "def fromcomplist(self, *args, **kwargs):\n return _image.image_fromcomplist(self, *args, **kwargs)" ]
[ "0.61524683", "0.6037816", "0.60318625", "0.59158003", "0.5908878", "0.5878213", "0.58532834", "0.582987", "0.5799404", "0.57779765", "0.5770414", "0.5768476", "0.5713612", "0.56738454", "0.5671704", "0.5643247", "0.5602958", "0.55821663", "0.55706567", "0.5565781", "0.55488724", "0.55405897", "0.55387557", "0.552259", "0.55081326", "0.54984725", "0.54686975", "0.54625624", "0.5438643", "0.5429571" ]
0.6255592
0
Test admin successful registration
def test_admin_register(self): admin = dict( name='Jonnie Pemba', username='jonnie', password='Andela8', role='admin' ) resp = self.client.post( '/api/v1/register', content_type='application/json', data=json.dumps(admin) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Jonnie Pemba has been registered') self.assertEqual(resp.status_code, 201)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_only_one_admin(self):\n reply = self.admin_register()\n\n admin = dict(\n name='Codjoe Ronnie',\n username='ronnie',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Admin is already registered, please login!')\n self.assertEqual(resp.status_code, 400)", "def test_hospital_admin_registration(self):\n\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Doctor',\n 'last_name': 'JaneDoe'\n }\n\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n user = MyUser.objects.get(email=response.data['user']['email'])\n self.assertTrue(user.check_password(payload['user']['password']))\n self.assertNotIn('password', response.data)\n self.assertIn('token', response.data['user'])\n self.assertTrue(user.is_hospital_admin)", "def test_sucess(self):\n msg = self.user.registration(\"MrShort\",\n \"[email protected]\",\n \"notshort\",\n \"notshort\")\n self.assertEqual(msg, \"Your account is now registered please proceed to login\")", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def test_successful_registration(self):\n response = self.client.post('/o/register', {\n 'email': '[email protected]',\n 'password': '123new',\n 'terms_acceptance': True,\n })\n self.assertRedirects(\n response,\n settings.ANGULAR_ROOT,\n 302,\n fetch_redirect_response=False,\n )\n self.assertEqual(User.objects.all().count(), 1)", "def test_success_register():\n assert not register(\"abc123\", \"qwerty123456\", \"Bob\", \"John\", \"[email protected]\")\n\n # Check that user data was updated and that the user is logged in\n new_user = data.users.get(\"abc123\")\n assert new_user\n assert new_user.logged_in == True", "def test_register_new_user(self):\n with self.client:\n response = self.client.post(\n url_for('register'),\n data=dict(\n first_name='Admin',\n last_name='Admin',\n email='[email protected]',\n password='admin2016',\n confirm_password='admin2016'\n ),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)", "def test_register(self):\n\t\tresponse = self.client.get('/register')\n\t\tself.assertContains(response, 'Register', 3, 200)", "def test_create_user_page(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n # Assertions\n self.assertEqual(res.status_code, 200)", "def test_01_account_register(self):\n self.register()\n self.assertEquals(\n self.selenium.current_url, self.get_absolute_url())\n print 'test_register_valid_password completed'", "def test_register_page(self):\n\n result = self.client.get('/register')\n self.assertIn('<h1>Register</h1>', result.data)\n\n print \"DONE WITH REGISTER CHECK\"", "def test_register_user_successfully(self):\n\n response = self.client.post(\n self.reg_url,\n self.base.reg_data,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn(b\"successfully\", response.content)", "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_valid_registration(self):\n r = dict(\n email='[email protected]',\n username='crow',\n password='I_do_not_caw',\n confirm_password='I_do_not_caw',\n first_name='magpie',\n last_name='corvid'\n )\n resp = self.client.post('/user/register', data=r, follow_redirects=True)\n self.assertEquals(resp.status_code, 200)", "def test_create_user(self):\n #open the django admin page.\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin\")\n )\n\n #fill in login information of admin\n username = self.selenium.find_element_by_id(\"id_username\")\n username.send_keys(\"admin\")\n password = self.selenium.find_element_by_id(\"id_password\")\n password.send_keys(\"admin\")\n\n #locate login button and click it.\n self.selenium.find_element_by_xpath('//input[@value=\"Inloggen\"]').click()\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin/auth/user/add/\")\n )\n\n # Fill the create user form with username and password\n self.selenium.find_element_by_id(\"id_username\").send_keys(\"test\")\n self.selenium.find_element_by_id(\"id_password1\").send_keys(\"test1234\")\n self.selenium.find_element_by_id(\"id_password2\").send_keys(\"test1234\")\n\n # Forms can be submitted directly by calling its method submit\n self.selenium.find_element_by_id(\"user_form\").submit()\n self.assertIn(\"Change user\", self.selenium.title)", "def test_register_page(self):\n\n result = self.client.get(\"/register\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Register New User</span><br>\", result.data)\n self.assertIn(b\"Confirm Password\", result.data)", "def test_registration(self):\n\n print(\" --------------------------- Test 1 - Registration ----------------------------\")\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n response = register_user(user_id, password, currency)\n data = response.json()['message']\n self.assertEqual(response.json()['code'], 201)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(response.headers['Content-Type'] == 'application/json')\n print(json.dumps(data, indent=4))", "def test_user_registeration(self):\n with self.client:\n response = self.client.post('/users/signup', data=dict(\n username='tigarcia',password='moxies',name=\"Tim\",email=\"[email protected]\"\n ), follow_redirects=True)\n self.assertIn(b'Welcome', response.data)\n self.assertTrue(current_user.username == \"tigarcia\")\n # make sure we hash the password!\n self.assertNotEqual(current_user.password, \"moxies\")\n self.assertTrue(current_user.is_authenticated)", "def test_register_user(self):\n response = self.signup_a_user(self.user_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['message'],\n \"User successfully created. Check email for \"\n \"verification link\")", "def test_register(self):\n selenium = self.selenium\n # Opening the link we want to test\n selenium.get(self.live_server_url + '/account/signup/')\n # find the form element\n username = selenium.find_element_by_id('id_username')\n password1 = selenium.find_element_by_id('id_password1')\n password2 = selenium.find_element_by_id('id_password2')\n submit = selenium.find_element_by_xpath('//button[text()=\"Sign up\"]')\n\n # Fill the form with data\n username.send_keys('some username')\n password1.send_keys('123456')\n password2.send_keys('123456')\n\n # submitting the form\n submit.click()\n\n # check the returned result\n assert 'Username' in selenium.page_source", "def test_register(self):\n users = User.objects.filter(username='test')\n self.assertTrue(len(users) == 0)\n\n username = \"test3\"\n data = {'username': username, 'password': \"123test\", 'email': '[email protected]',\n 'newsletter': 'false', 'research': 'true', 'device': self.device}\n\n response = self.requestRegistration(data)\n\n self.assertTrue('client_id' in response.data)\n self.assertTrue(not 'password' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n self.assertTrue(profile.research)\n self.assertFalse(profile.newsletter)\n\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)\n self.assertTrue(phone.cordova == self.device['cordova'])", "def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"", "def test_user_register(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().email, '[email protected]')", "def test_successful_registration(self):\n with self.client:\n response = register_user(\n self, 'Random', 'User', '[email protected]', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] ==\n \"Account for '[email protected]' has been created.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)", "def test_register(self):\n app = self.create_app()\n c = app.test_client()\n\n # test response of register page\n c.get('/auth/register')\n self.assert_template_used(\"auth/register.html\")\n\n # test registering user\n rv = register(c, app.config['USERNAME'], app.config['PASSWORD'])\n self.assert_status(rv, 200)\n\n # test registering user with the same name\n register(c, app.config['USERNAME'], app.config['PASSWORD'])\n self.assert_message_flashed(f\"User {app.config['USERNAME']} is already registered.\")", "def test_user_sign_up_success(self):\n res = self.client.post(reverse('sign_up'), data={\n 'username': '[email protected]',\n 'first_name': 'Test',\n 'last_name': 'User',\n 'password1': PASSWORD,\n ''\n })" ]
[ "0.7809236", "0.7805565", "0.7801625", "0.76964223", "0.7696397", "0.7694462", "0.76621336", "0.75906897", "0.7580382", "0.7477269", "0.7457051", "0.7443463", "0.7440877", "0.74344105", "0.74344105", "0.74344105", "0.74344105", "0.74272805", "0.7426961", "0.74158543", "0.7403459", "0.73766685", "0.73628396", "0.73555803", "0.7352089", "0.7331895", "0.7325464", "0.73130685", "0.7307021", "0.7302073" ]
0.82520497
0
Test admin can not register with empty name field
def test_admin_register_no_name(self): admin = dict( name='', username='jonnie', password='Andela8', role='admin' ) resp = self.client.post( '/api/v1/register', content_type='application/json', data=json.dumps(admin) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Enter name / username in string format!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_user_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the username field please\")", "def test_admin_register_no_username(self):\n admin = dict(\n name='Jonnie Pemba',\n username='',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter name / username in string format!')\n self.assertEqual(resp.status_code, 400)", "def test_empty_second_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the second name field please\")", "def test_missing_name(superuser):\n form = RegisterForm(superuser,\n description='OAuth2 Client',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('This field is required.') in form.name.errors", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_empty_first_name_field(self):\r\n result=self.user.get_user_register(\"\",\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Please fill in the first name field\")", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_name_empty_string(self):\r\n self.name = \"\"", "def test_name_required(self):\n self.required_field_fail('name', self.test_data['pants'])", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_username_is_writable_for_user_creation(self):\n request = Mock()\n assert 'username' not in self.admin.get_readonly_fields(request)", "def test_blank_names(self):\n rv = self.signup('', '', '[email protected]', 'Bo1995', 'Bo1995')\n self.assertIn(b'This field is required.', rv.data)", "def test_admin_cannot_create_users_with_same_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'This name is already registered!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def test_admin_cannot_create_user_with_invalid_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love3',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter name in a correct string format, (john doe)!')\n self.assertEqual(resp.status_code, 400)", "def test_no_name(self):\n data = self._get_form_data(name='')\n form = self._get_form(data=data)\n self.assertFalse(self._validate_form(form))\n self.assertTrue('name' in form.errors)", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_admin_register_no_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_setup(self):\n self.assertEqual(self.form._meta.model.USERNAME_FIELD, self.form.name_for_user)\n self.assertEqual(self.form._meta.model.get_email_field_name(), self.form.name_for_email)\n self.assertIn(self.form._meta.model.get_email_field_name(), self.form.fields)\n self.assertNotIn('email_field', self.form.fields)", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_user_registeration_without_username(self):\n url = reverse(\"register_user\")\n response = self.client.post(url, { \"username\": \"\", \"email\":\"[email protected]\", \"password\":\"123\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")", "def test_empty_email_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the email field please\")", "def test_empty_data(self, client):\n url = reverse('users:create')\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_add_user(self):\n pass", "def test_reserved_name(self):\n with self.assertRaises(ValidationError):\n field_name_validator('_id')", "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)" ]
[ "0.7362851", "0.73370975", "0.7315861", "0.7309367", "0.72637767", "0.7149645", "0.7137991", "0.7085262", "0.7083449", "0.69957393", "0.6902289", "0.6869284", "0.68501955", "0.68169886", "0.6810906", "0.68084204", "0.67463064", "0.67453456", "0.6722818", "0.66770905", "0.6634527", "0.6625964", "0.65963686", "0.6593949", "0.65923786", "0.6590555", "0.65623564", "0.65521204", "0.65491176", "0.65484625" ]
0.75739056
0
Test admin can not register with empty password field
def test_admin_register_no_password(self): admin = dict( name='Jonnie Pemba', username='jonnie', password='', role='admin' ) resp = self.client.post( '/api/v1/register', content_type='application/json', data=json.dumps(admin) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_password_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\"\")\r\n self.assertEqual(2,result,\"Fill in the password field please\")", "def test_invalid_password(self):\n pass", "def test_user_registeration_without_password(self):\n url = reverse(\"register_user\")\n response = self.client.post(url, { \"username\": \"janedoe\", \"email\":\"[email protected]\", \"password\":\"\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")", "def test_passwordsuccess(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '2$n5[]$nnA5Y}2}}^gba',\n 'password2': '2$n5[]$nnA5Y}2}}^gba'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertTrue(form.is_valid())", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_new_password(self):\n form_data = self.form_data(self.pwd)\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def testAddNonePassword(self):\n self.assertEquals(models.ERR_BAD_PASSWORD, self.users.add(\"userF\", None))", "def test_set_user_password(self):\n pass", "def test_user_empty_password(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"[email protected]\",\n \"password\" : \"\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_register_user_with_empty_password(self, app):\n data = RegisterUser.random()\n setattr(data, \"password\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_blank_password(self):\n rv = self.signup('Bo', 'Theo', '[email protected]', '', 'Bo1995')\n self.assertIn(b'This field is required.', rv.data)", "def test_admin_register_wrong_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='Andela',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_blank_password(self):\n response = self.register({\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': '',\n 'email': \"[email protected]\",\n 'phone_number': \"012-345-6789\"\n })\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'message': \"Password/Email cannot be empty\"}, response.json())", "def test_no_password_registration(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"kake\",\n \"email\": \"[email protected]\",\n \"password\": \"\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual('This field may not be blank.',\n self.response.json()['errors']['password'][0])", "def test_empty_password_field(self):\n self.empty_password = {'user': {\n \"username\": \"remmy\",\n \"email\": \"[email protected]\",\n \"password\": \"\"\n }\n }\n\n response = self.client.post(\n self.reg_url,\n self.empty_password,\n format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"may not be blank\", response.content)", "def test_registeration_no_password(self):\n response = self.signup_a_user(self.user_lacks_password)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"password\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_empty_invalid_password(self):\n pass_field = Field(\"\")\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def test_password_too_short(self):\n\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'tTTt'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exitst = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exitst)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_login_with_empty_password(self):\n reply = self.admin_register()\n user = dict(\n username='jonnie',\n password=''\n )\n resp = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n\n reply = json.loads(resp.data.decode())\n\n\n self.assertEqual(reply['message'], 'Wrong password!')\n self.assertEqual(resp.status_code, 400)", "def test_user_empty_conf_password(self):\n data = json.dumps({\n \"username\" : \"lenny\", \"email\" : \"[email protected]\",\n \"password\" : \"secret\", \"confirm_password\" : \"\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"\"))", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'test Name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_invalid_password_userregisterform(self):\n form = UserRegisterForm(\n data={\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password1\": \"ko\",\n \"password2\": \"ko\",\n \"robot\": True,\n }\n )\n self.assertFalse(form.is_valid())", "def test_password_too_short(self):\n\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"us\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test1',\n 'last_name': 'JustUser2'\n }\n\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_not_match_password_userregisterform(self):\n form = UserRegisterForm(\n data={\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password1\": \"fglZfYmr%?,\",\n \"password2\": \"ko_fglZfYmr%?,\",\n \"robot\": True,\n }\n )\n self.assertFalse(form.is_valid())", "def test_specialchar(self):\n form_data = {\n 'username': 'testuser',\n 'password1': 'vNzwXpzKJyTshvHsuULn',\n 'password2': 'vNzwXpzKJyTshvHsuULn'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': '[email protected]',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)" ]
[ "0.84288025", "0.7932215", "0.78030753", "0.7799765", "0.77714837", "0.7749584", "0.7745351", "0.7689125", "0.76422065", "0.76118517", "0.7564709", "0.7561372", "0.7557103", "0.75487673", "0.75414103", "0.7503708", "0.75003606", "0.7489784", "0.74815494", "0.74763495", "0.7467153", "0.7455795", "0.7431228", "0.74273753", "0.74163276", "0.7407021", "0.73983616", "0.7386312", "0.7381865", "0.7376226" ]
0.80885565
1
Test admin can not register with invalid password field
def test_admin_register_wrong_password(self): admin = dict( name='Jonnie Pemba', username='jonnie', password='Andela', role='admin' ) resp = self.client.post( '/api/v1/register', content_type='application/json', data=json.dumps(admin) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_password(self):\n pass", "def test_admin_register_no_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_empty_password_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\"\")\r\n self.assertEqual(2,result,\"Fill in the password field please\")", "def test_new_password(self):\n form_data = self.form_data(self.pwd)\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_passwordsuccess(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '2$n5[]$nnA5Y}2}}^gba',\n 'password2': '2$n5[]$nnA5Y}2}}^gba'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertTrue(form.is_valid())", "def test_invalid_password_userregisterform(self):\n form = UserRegisterForm(\n data={\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password1\": \"ko\",\n \"password2\": \"ko\",\n \"robot\": True,\n }\n )\n self.assertFalse(form.is_valid())", "def test_invalid_not_match_password_userregisterform(self):\n form = UserRegisterForm(\n data={\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password1\": \"fglZfYmr%?,\",\n \"password2\": \"ko_fglZfYmr%?,\",\n \"robot\": True,\n }\n )\n self.assertFalse(form.is_valid())", "def test_invalid_password(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)", "def test_admin_cannot_create_user_with_invalid_password(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andyandy',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_password_too_short(self):\n\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'tTTt'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exitst = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exitst)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_invalid_password(self):\n u_invalid_password = User(username=\"bad_user\", email=\"[email protected]\", password=\"df\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_invalid_password)", "def test_password_error(self):\n token = str((jwt.encode({\n \"email\": \"[email protected]\"},\n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token,\n {\"password\": \"bag\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['error'],\n \"password should be atleast 8 characters.\")", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': '123'}\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': '[email protected]',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)", "def test_invalid_username_valid_password(self):\n response = self.client.post(reverse('users:login'), {'username': 'xyzabe', 'password': self.user['password1']})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'fu'}\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n self.assertFalse(user_exists)", "def test_password_too_short(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'pwd',\n 'name': 'Test',\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Ensure that statuscode returns a HTTP400 bad request\n # becos must exist before we can ckeck password\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # chech if user exists true else false\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exists)", "def test_invalid_password(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertFalse(user.is_valid_password(\"invalid_password\"))", "def test_set_user_password(self):\n pass", "def test_password_too_short(self):\n\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"us\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test1',\n 'last_name': 'JustUser2'\n }\n\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_password(self):\n response = self.signup_a_user(self.password_lacks_specialchar)\n self.assertEqual(response.data['errors']['password'],\n [\"please consider a password that has a number, an \"\n \"uppercase letter, lowercase letter and a special\"\n \" character\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_password_too_short(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'pw',\n 'name': 'test Name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_password():\r\n auth_register_v1(email='[email protected]',\r\n password='qw3rtyAppl3s@99',\r\n name_first='Harry',\r\n name_last='Potter')\r\n\r\n invalid_password = 'ffffffffF'\r\n with pytest.raises(InputError) as e:\r\n auth_login_v1(email='[email protected]',\r\n password=invalid_password) \r\n assert f'Password {invalid_password} is not correct.' in str(e.value)", "def test_registeration_no_password(self):\n response = self.signup_a_user(self.user_lacks_password)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"password\"],\n [\"This field may not be blank.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_password_too_short(self):\n payload = {'email': '[email protected]', 'password': 'pw'}\n \n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n \n self.assertFalse(user_exists)", "def test_password_too_short(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'pw',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n user_exists = get_user_model().objects.filter(\r\n email=payload['email']\r\n ).exists()\r\n self.assertFalse(user_exists)", "def test_registeration_short_password(self):\n response = self.signup_a_user(self.user_short_password)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)" ]
[ "0.8574067", "0.80264175", "0.8024861", "0.79407185", "0.789616", "0.7894492", "0.78743935", "0.781172", "0.778892", "0.7776758", "0.77584887", "0.7728596", "0.77148277", "0.7702502", "0.7690832", "0.76812327", "0.76725155", "0.7661382", "0.76580256", "0.7653059", "0.7646939", "0.7645038", "0.7638168", "0.7637392", "0.76307124", "0.76282436", "0.75869864", "0.7576883", "0.7574778", "0.7563901" ]
0.81949
1
Test admin can not register with invalid role field
def test_admin_register_wrong_role(self): admin = dict( name='Jonnie Pemba', username='jonnie', password='Andela8', role='keeper' ) resp = self.client.post( '/api/v1/register', content_type='application/json', data=json.dumps(admin) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'role should be admin!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_role(self):\n pass", "def test_add_role_simple_post(self):\n pass", "def test_admin_cannot_create_user_with_different_roles(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='supervisor'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'role should either be admin or attendant')\n self.assertEqual(resp.status_code, 400)", "def test_add_role_simple(self):\n pass", "def test_ipam_roles_create(self):\n pass", "def test_list_role(self):\n pass", "def clean_role():", "def test_admin_register_no_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_update_user_with_different_roles(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='supervisor'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'role should either be admin or attendant')\n self.assertEqual(resp.status_code, 400)", "def test_delete_role(self):\n pass", "def test_admin_register_wrong_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='Andela',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_register_no_name(self):\n admin = dict(\n name='',\n username='jonnie',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter name / username in string format!')\n self.assertEqual(resp.status_code, 400)", "def test_register_only_one_admin(self):\n reply = self.admin_register()\n\n admin = dict(\n name='Codjoe Ronnie',\n username='ronnie',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Admin is already registered, please login!')\n self.assertEqual(resp.status_code, 400)", "def test_save_role_returns_errors_on_invalid(self):\n role = Role()\n res = role_service.save(role)\n self.assertIsInstance(res, Result)\n self.assertFalse(res)", "def test_replace_roles(self):\n pass", "def test_correct_roles(self):\r\n users_with_invalid_role = []\r\n\r\n agents = self.selenium.get_agents_data()\r\n for email, expected_role in self.new_agents.items():\r\n for agent in agents:\r\n if agent[\"email\"] == email and agent[\"role\"] != expected_role:\r\n users_with_invalid_role.append({email: f\"should be {expected_role}, but is {agent['role']}\"})\r\n self.assertFalse(users_with_invalid_role, msg=users_with_invalid_role)", "def test_admin_register_no_username(self):\n admin = dict(\n name='Jonnie Pemba',\n username='',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter name / username in string format!')\n self.assertEqual(resp.status_code, 400)", "def test_user_id_role_put(self):\n pass", "def test_check_failed_role(self):\n self.assertEqual(self.checkredis.check_failed_role(\"php\", \"qa\"), \"GREEN\")", "def test_create_returns_errors_on_invalid(self):\n res = role_service.create('ad')\n self.assertIsInstance(res, Result)\n self.assertFalse(res)", "def test_admin_cannot_create_users_with_same_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'This name is already registered!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_add_role_to_project_member(self):\n pass", "def test_admin_cannot_create_user_with_invalid_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love3',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter name in a correct string format, (john doe)!')\n self.assertEqual(resp.status_code, 400)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_ipam_roles_update(self):\n pass", "def test_create_event_route_with_incorrect_privileges(self):\n resp = self.request_with_role('/admin/events/create', role='user')\n self.assertEqual(resp.status_code, 401)", "def test_admin_register(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Jonnie Pemba has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_non_existent_course_role(self):\n self._login_as_staff()\n path = self.path(role='A')\n response = self.client.get(path)\n\n assert response.status_code == 400\n\n response = self.client.post(path)\n assert response.status_code == 400", "def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)" ]
[ "0.7693056", "0.7495057", "0.74605167", "0.7379491", "0.6992951", "0.6956861", "0.6911706", "0.69024867", "0.68894583", "0.6844062", "0.68185097", "0.6816657", "0.67910755", "0.67658985", "0.6691237", "0.6685197", "0.6672649", "0.6657749", "0.66458726", "0.66364074", "0.663412", "0.6633892", "0.6626907", "0.66058844", "0.66039824", "0.6575462", "0.6566176", "0.65439636", "0.65098137", "0.6507622" ]
0.7838657
0
Test can not register more than one admin
def test_register_only_one_admin(self): reply = self.admin_register() admin = dict( name='Codjoe Ronnie', username='ronnie', password='Andela8', role='admin' ) resp = self.client.post( '/api/v1/register', content_type='application/json', data=json.dumps(admin) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Admin is already registered, please login!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin(self):\n assert(admin)", "def test_02_second_user_is_not_admin(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"[email protected]\",\r\n password=\"tester\")\r\n self.signout()\r\n user = db.session.query(User).get(2)\r\n assert user.admin == 0, \"User ID: 2 should not be admin, but it is\"", "def test_admin_cannot_create_users_with_same_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'This name is already registered!')\n self.assertEqual(resp.status_code, 400)", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def test_admin_cannot_create_user_with_different_roles(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='supervisor'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'role should either be admin or attendant')\n self.assertEqual(resp.status_code, 400)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_13_admin_user_add_del(self):\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n # Signin with admin user\r\n self.signin()\r\n # Add user.id=1000 (it does not exist)\r\n res = self.app.get(\"/admin/users/add/1000\", follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, res.status_code\r\n assert err['error'] == \"User not found\", err\r\n assert err['status_code'] == 404, err\r\n\r\n\r\n # Add user.id=2 to admin group\r\n res = self.app.get(\"/admin/users/add/2\", follow_redirects=True)\r\n assert \"Current Users with Admin privileges\" in res.data\r\n err_msg = \"User.id=2 should be listed as an admin\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Remove user.id=2 from admin group\r\n res = self.app.get(\"/admin/users/del/2\", follow_redirects=True)\r\n assert \"Current Users with Admin privileges\" not in res.data\r\n err_msg = \"User.id=2 should be listed as an admin\"\r\n assert \"Juan Jose\" not in res.data, err_msg\r\n # Delete a non existant user should return an error\r\n res = self.app.get(\"/admin/users/del/5000\", follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, res.status_code\r\n assert err['error'] == \"User.id not found\", err\r\n assert err['status_code'] == 404, err", "def test_add_admin(self):\n self.test_create_user()\n self.test_create_organization()\n url = reverse('MGA:add_admin')\n data = {'admin id': 1, 'org_id': 1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_first_user_is_admin(self):\n user = User.objects.create(username='username', email='[email protected]')\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)\n user = User.objects.create(username='username2', email='[email protected]')\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)", "def test_add_admin_to_org(self):\n pass", "def test_admin_register(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Jonnie Pemba has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_admin_cannot_create_users_with_same_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Paul Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This username is already taken!')\n self.assertEqual(resp.status_code, 400)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_create__admin_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': True, 'isSiteEditor': True}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertTrue(actual_json['is_site_editor'])\n self.assertTrue(actual_json['is_admin'])\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertEqual('[email protected]', new_appuser.email)\n self.assertTrue(new_appuser.is_admin)\n\n # Clean up\n new_appuser.key.delete()", "def test_cannot_remove_all_admins(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n users = admin_holder.find('ul', {'class': 'users'}).findAll(\n 'li', {'class': 'deleter'})\n assert len(users) == 1\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'admin1'})\n assert r.json[\n 'error'] == 'You must have at least one user with the Admin role.'\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n users = admin_holder.find('ul', {'class': 'users'}).findAll(\n 'li', {'class': 'deleter'})\n assert len(users) == 1", "def test_new_admin_subscriptions(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n with audits('add user test-user to Admin'):\n self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n p_nbhd = M.Neighborhood.query.get(name='Projects')\n p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)\n uid = M.User.by_username('test-user')._id\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert sub, 'New admin not subscribed to app %s' % ac\n\n \"\"\"\n When user is removed from admins group then user must be unsubscribed\n from all the tools in the project\n \"\"\"\n self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert not sub, 'New admin not unsubscribed to app %s' % ac", "def test_admin_register_no_name(self):\n admin = dict(\n name='',\n username='jonnie',\n password='Andela8',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter name / username in string format!')\n self.assertEqual(resp.status_code, 400)", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def test_00_first_user_is_admin(self):\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n assert user.admin == 1, \"User ID:1 should be admin, but it is not\"", "def test_15_admin_user_add_del_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n self.register(fullname=\"Juan Jose2\", name=\"juan2\",\r\n email=\"[email protected]\", password=\"juan2\")\r\n self.signout()\r\n self.signin(email=\"[email protected]\", password=\"juan2\")\r\n # Add user.id=2 to admin group\r\n res = self.app.get(\"/admin/users/add/2\", follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\",\\\r\n \"This action should be forbidden, not enought privileges\"\r\n # Remove user.id=2 from admin group\r\n res = self.app.get(\"/admin/users/del/2\", follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\",\\\r\n \"This action should be forbidden, not enought privileges\"", "def test_admin_register_no_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_register_wrong_role(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='Andela8',\n role='keeper'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'role should be admin!')\n self.assertEqual(resp.status_code, 400)", "def test_number_of_group_admins(self):\n\n group0 = self.test_save(name='group1')\n group1 = self.test_save(name='group2')\n user0 = self.user\n user1 = self.user1\n \n group0.user_set.add(user0)\n group0.user_set.add(user1)\n user0.grant(\"admin\", group0)\n group1.user_set.add(user0)\n group1.user_set.add(user1)\n\n self.assertEqual(number_group_admins(group0), 1)\n self.assertEqual(number_group_admins(group1), 0)\n user1.grant(\"admin\", group1)\n self.assertEqual(number_group_admins(group1), 1)\n user1.grant(\"admin\", group0)\n self.assertEqual(number_group_admins(group0), 2)", "def test_admin_signup_visibility(app, resource):\n with app.admin(nethz='somethingsomething'):\n headers = {'If-Match': 'Wrong'}\n\n # Create fake signup with different nethz\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can see signups\n response = app.client.get('/' + resource,\n headers=headers,\n assert_status=200)\n assert len(response['_items']) == 1\n\n # Items\n url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(url, headers=headers, assert_status=200)\n\n # Patch (if we can see item, we get 412 since etag is wrong)\n app.client.patch(url, headers=headers, data={}, assert_status=412)\n\n # Delete (etag missing again)\n app.client.delete(url, headers=headers, assert_status=412)", "def test_registeration_for_a_super_user(self):\n admin_user = User.objects.create_superuser(\n 'jey',\n '[email protected]',\n 'jemo'\n )\n self.assertEqual(admin_user.is_active, True)\n self.assertEqual(admin_user.is_staff, True)\n self.assertEqual(admin_user.is_superuser, True)" ]
[ "0.7309909", "0.7267381", "0.7182782", "0.71491766", "0.7015131", "0.70101553", "0.700269", "0.6975732", "0.69714284", "0.6962041", "0.6954554", "0.69518054", "0.6945564", "0.6876291", "0.6874265", "0.68027735", "0.6774794", "0.6731248", "0.6718102", "0.669607", "0.668596", "0.66712224", "0.66634107", "0.66634107", "0.66634107", "0.66634107", "0.663665", "0.6608532", "0.65834475", "0.656868" ]
0.801755
0
Test admin can view all user accounts
def admin_can_view_all_user_accounts(self): resp = self.admin_create_user() reply = self.admin_create_user2() resp = self.admin_login() token = resp['token'] resp = self.client.get( '/api/v1/users', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertIn('love', str(reply['users'][1]['username'])) self.assertIn('walker', str(reply['users'][2]['username'])) self.assertEqual(resp.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_users_listed(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n # Assertions\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_view_displays_all(self):\n set_up_one_user(self, 1, 0)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.context['user']), 'test')\n self.assertEqual(len(response.context['data']), 1)", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n #assert are django checks on http request is 200\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_list_user(self):\n pass", "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def test_admin_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '<a href=\"/admin/auth/group/\">Group</a>', html=True)\n self.assertContains(response, '<a href=\"/admin/auth/user/\">User</a>', html=True)", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_super_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_super_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_user_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'testuser', html=True)\n self.assertContains(response, '[email protected]', html=True)", "def test_admin_user(self):\n user = self.template_users['staff_user']\n self.client.login(email=user['email'], password=user['password'])\n\n # Admins can see everything\n response = self.client.get(reverse('api:log-list'))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], self.object_count)\n\n # Deletion should be possible\n response = self.client.post(reverse('api:log-erase'), {\n 'before': str(timezone.now()),\n 'max_severity': LogEntry.ERROR,\n })\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['deleted'], self.object_count)\n self.assertEqual(LogEntry.objects.count(), 0)", "def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_view_all_users_profiles(self):\n self.authorize_user(self.user_login_details)\n response = self.client.get(self.profiles_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)" ]
[ "0.8039933", "0.78656775", "0.7778051", "0.77131677", "0.7622776", "0.75739825", "0.74982417", "0.74839985", "0.7449284", "0.7436807", "0.7436807", "0.7382937", "0.73566735", "0.7339939", "0.73188037", "0.7282121", "0.7279309", "0.7223879", "0.7220174", "0.71927464", "0.7173427", "0.7173427", "0.7153608", "0.7119316", "0.7110797", "0.7110025", "0.7110025", "0.7110025", "0.7110025", "0.7081914" ]
0.8417845
0
Test store attendants cannot view user accounts
def attendants_cannot_view_user_accounts(self): reply = self.admin_create_user() resp = self.attendant_login() token = resp['token'] resp = self.client.get( '/api/v1/users', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Unauthorized Access!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def not_test_without_user(self):\n # TODO", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='[email protected]', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_user(self):\n return True", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_detailview_read_for_wrong_user(self):\n\n for user in self.users:\n detailview = reverse('account_detail', args=(user.uuid,))\n\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(self.users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(detailview)\n\n self.assertEqual(response.status_code, 403)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_inactive_account(self):", "def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)", "def test_get_virtual_accounts(self):\n pass", "def test_non_user_login(self):\n self.user.list_of_accounts = [{'username': 'Parseen',\n 'pwd': 'mypassword',\n 'email': '[email protected]'}]\n msg = self.user.login(\"[email protected]\", \"idontevenhaveone\")\n self.assertEqual(msg, \"Account not registered, sign up\")", "def test_access_portal_user(self):\n # Portal user can see the confirmed SO for which they are assigned as a customer\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_portal']).read()\n\n self.order.partner_id = self.company_data['default_user_portal'].partner_id\n self.order.action_confirm()\n # Portal user can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_portal']).write({'team_id': self.company_data['default_sale_team'].id})\n # Portal user can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_portal']).create({\n 'partner_id': self.partner_a.id,\n })\n # Portal user can't delete the SO which is in 'draft' or 'cancel' state\n self.order.action_cancel()\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_portal']).unlink()", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_02_account_index(self):\r\n # As Anonymou user\r\n url = \"/account\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Community page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)" ]
[ "0.68920714", "0.6664434", "0.66508675", "0.65989894", "0.65780735", "0.65544313", "0.6523385", "0.6505487", "0.63439023", "0.63439023", "0.63439023", "0.63439023", "0.6324461", "0.6319198", "0.63036656", "0.62793696", "0.6270352", "0.62537885", "0.6243347", "0.62364614", "0.62364614", "0.62364614", "0.62364614", "0.6204414", "0.6204414", "0.6204414", "0.6204414", "0.6196788", "0.61919725", "0.6191929" ]
0.7187359
0
Tests that admin cannot view all users in the Inventory\ with blacklisted token
def test_cannot_view_all_users_with_blacklisted_token(self): resp = self.admin_create_user() reply = self.admin_create_user2() resp = self.admin_login() token = resp['token'] resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) resp = self.client.get( '/api/v1/users', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_get_users_non_admin(client: FlaskClient) -> None:\n # Non-admin users are not allowed to make the request\n username = create_random_username()\n auth_token = create_auth_token(username)\n response = get_users(client, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_no_token_get_all(self):\n response = self.app.get('/api/v3/users')\n self.assertEqual(response.status_code, 401)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_show_private_lists_invalid(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user2.id\n \n res = c.get(\"/users/tester1/private-lists\")\n\n self.assertEqual(res.status_code, 302)", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_get_all_tokens_anonymous_user(self):\r\n\r\n # Anonymoues users should be unauthorized, no matter which kind of token are requesting\r\n res = self.app.get('/api/token')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err", "def test_cannot_create_user_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_video_detail_no_permission(\n mock_user_moira_lists, logged_in_apiclient, user_admin_list_data\n):\n client, _ = logged_in_apiclient\n mock_user_moira_lists.return_value = {\"some_other_list\"}\n url = reverse(\n \"video-detail\", kwargs={\"video_key\": user_admin_list_data.video.hexkey}\n )\n result = client.get(url)\n assert result.status_code == status.HTTP_403_FORBIDDEN", "def test_detailview_read_for_wrong_user(self):\n\n for user in self.users:\n detailview = reverse('account_detail', args=(user.uuid,))\n\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(self.users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(detailview)\n\n self.assertEqual(response.status_code, 403)", "def test_not_authenticated_non_public_course_with_all_blocks(self):\n self.client.logout()\n self.query_params.pop('username')\n self.query_params['all_blocks'] = True\n self.verify_response(403)", "def get_everyone_denied(self):", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)" ]
[ "0.73880625", "0.73672503", "0.7269525", "0.7072302", "0.7024909", "0.70035934", "0.6964477", "0.6964477", "0.6938775", "0.6838921", "0.6826864", "0.678693", "0.6745519", "0.6745451", "0.67423964", "0.6729407", "0.6705149", "0.6697523", "0.6688124", "0.66815144", "0.66791004", "0.66354185", "0.65887785", "0.6587253", "0.65782595", "0.65759647", "0.65651363", "0.6564664", "0.65569365", "0.653655" ]
0.77643275
0
Test admin cannot update a store attendant\ with blacklisted token
def test_cannot_update_user_with_blacklisted_token(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) user = dict( name='Summer Lover', username='lover', password='Andela8', role='attendant' ) resp = self.client.put( '/api/v1/users/2', content_type='application/json', data=json.dumps(user), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!') self.assertEqual(resp.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_token_missing_edit(self):\n with self.client:\n id = self.get_id()\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": \"\"}))\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data.get('message'), \"Token is missing\")", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_authenticated_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').update,\r\n token)", "def test_with_unpermitted_token(self):\n email_text = self.email_template % self.token.uuid\n assert not save_from_email_reply(email_text)", "def test_update_device_token(self):\n pass", "def test_none_admin_edit(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_update_ban(self):\n pass", "def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_unlisted_addon(self):\n addon = Addon.objects.get(pk=3615)\n self.make_addon_unlisted(addon)\n\n up = self.get(self.good_data)\n assert up.is_valid()", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_anonymous_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').update,\r\n token)", "def test_wrong_admin_put(self):\n\n with self.client:\n token = self.get_token()\n id = 4\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_update_offline_status(self):\n pass", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_disabling_tenant_disables_token(self):\n # Authenticate as user to get a token *for a specific tenant*\n r = self.service_request(method='POST', path='/tokens',\n as_json={\n 'passwordCredentials': {\n 'username': self.user_id,\n 'password': 'secrete',\n 'tenantId': self.tenant_id\n }\n })\n self.service_token = r.json['auth']['token']['id']\n \n # Validate and check that token belongs to tenant\n self.admin_request(path='/tokens/%s?belongsTo=%s' % \n (self.service_token, self.tenant_id))\n \n # Disable tenant\n r = self.admin_request(method='PUT',\n path='/tenants/%s' % self.tenant_id,\n as_json={\n 'tenant': {\n 'description': 'description',\n 'enabled': False,\n }\n })\n self.assertEqual(r.json['tenant']['enabled'], False)\n \n # Assert that token belonging to disabled tenant is invalid\n r = self.admin_request(path='/tokens/%s?belongsTo=%s' % \n (self.service_token, self.tenant_id),\n assert_status=403)\n self.assertTrue(r.json['tenantDisabled'], 'Tenant is disabled')", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_wrong_token_permission_denied(self, client, token):\n with disable_logs(logging.WARNING):\n assert_hook_status(client, status=403, token=f\"{token}wrong\")" ]
[ "0.6632362", "0.6503029", "0.6456621", "0.64475024", "0.6429417", "0.64270395", "0.63401264", "0.631916", "0.63083947", "0.62978315", "0.62547594", "0.6140264", "0.61387783", "0.6122147", "0.6098927", "0.609861", "0.6074144", "0.6073103", "0.60713977", "0.60544616", "0.60531455", "0.6050761", "0.604885", "0.60479516", "0.6047816", "0.60431856", "0.6042671", "0.6034688", "0.6026288", "0.59924847" ]
0.6702801
0
Test admin cannot update a store attendant with different roles\ other than 'admin' or 'attendant'
def test_admin_cannot_update_user_with_different_roles(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] user = dict( name='Summer Lover', username='lover', password='Andela8', role='supervisor' ) resp = self.client.put( '/api/v1/users/2', content_type='application/json', data=json.dumps(user), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'role should either be admin or attendant') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)", "def test_ipam_roles_update(self):\n pass", "def test_ipam_roles_partial_update(self):\n pass", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_patch_role_inherited_demote(self):\n self.make_assignment(\n self.category, self.assign_user, self.role_contributor\n )\n self.assertEqual(RoleAssignment.objects.count(), 4)\n\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'role': PROJECT_ROLE_GUEST}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 4)\n self.update_as.refresh_from_db()\n self.assertEqual(self.update_as.role, self.role_contributor)", "def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))", "def test_admin_cannot_create_user_with_different_roles(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='supervisor'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'role should either be admin or attendant')\n self.assertEqual(resp.status_code, 400)", "def test_add_admin_to_org(self):\n pass", "def test_admin(self):\n assert(admin)", "def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)", "def test_replace_roles(self):\n pass", "def test_delete_admin_from_org(self):\n pass", "def test_add_role(self):\n pass", "def test_wrong_role_controle_acl_update(self):\n with factories.single_commit():\n control = factories.ControlFactory()\n person = factories.PersonFactory(name=\"user1\", email=\"[email protected]\")\n control.add_person_with_role_name(person, \"Admin\")\n access_control_list = {\n \"Non-existing role\": [\n {\n \"email\": \"[email protected]\",\n \"name\": \"user2\",\n },\n ]\n }\n\n response = self.api.put(control, control.id, {\n \"access_control_list\": access_control_list,\n })\n\n self.assert400(response)\n self.assertEqual(\n response.json[\"message\"],\n \"Role 'Non-existing role' does not exist\"\n )\n control = all_models.Control.query.get(control.id)\n self.assert_obj_acl(\n control,\n {\"Admin\": [{\"name\": \"user1\", \"email\": \"[email protected]\"}]}\n )", "def test_none_admin_edit(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_attendant_cannot_get_sale_record_they_didnot_make(self):\n reply = self.admin_add_product()\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.admin_create_user2()\n reply = self.attendant2_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"You haven't made any sales!\")\n self.assertEqual(resp.status_code, 404)", "def test_patch_role_inherited_equal(self):\n self.make_assignment(\n self.category, self.assign_user, self.role_contributor\n )\n self.assertEqual(RoleAssignment.objects.count(), 4)\n\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'role': PROJECT_ROLE_CONTRIBUTOR}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 4)\n expected = {\n 'project': str(self.project.sodar_uuid),\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n 'sodar_uuid': str(self.update_as.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_list_role(self):\n pass", "def test_subroles(self):\n def check_roles(r):\n dev_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[2]\n mem_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[3]\n assert 'All users in Admin group' in dev_holder.text\n assert 'All users in Developer group' in mem_holder.text\n\n r = self.app.get('/admin/groups/')\n\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n # test that subroles are intact after user added\n with audits('add user test-user to Admin'):\n r = self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)\n # test that subroles are intact after user deleted\n with audits('remove user test-user from Admin'):\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)", "def test_handle_edit_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"brS\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_delete_role(self):\n pass", "def test_modify_access_bad_role(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'unique_student_identifier': self.other_staff.email,\r\n 'rolename': 'robot-not-a-roll',\r\n 'action': 'revoke',\r\n })\r\n self.assertEqual(response.status_code, 400)", "def test_review_story_restrict_to_only_admin(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': user_token}, data=json.dumps({\n 'status': 'Approved'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Permission denied')\n self.assertEqual(res.status_code, 403)", "def test_update_role_type_name_level(self):\n response = requests.post(\"http://pulse-rest-testing.herokuapp.com/books\",\n data={\"title\": \"Update Item\", \"author\": \"Inna Korsun\"})\n body = response.json()\n id_book_new = body[\"id\"]\n\n res = requests.get(self.role_url + str(self.id_role))\n level_cur = res.json()[\"level\"]\n role = {\"name\": \"Gandalf\", \"type\": \"Maya\",\"level\":level_cur+10, \"book\":id_book_new}\n response = requests.put(self.role_url+ str(self.id_role), data=role)\n print(response.status_code)\n self.assertEqual(response.status_code, 200)\n\n body = response.json()\n\n self.assertEqual(role[\"name\"], body[\"name\"])\n self.assertEqual(role[\"type\"], body[\"type\"])\n\n res = requests.get(self.role_url + str(body[\"id\"]))#check that item present in role's list\n self.assertEqual(res.status_code, 200)\n self.roles_ids.append(body[\"id\"])\n self.id_book#add id role to list which should be deleted in tearDown", "def test_leave_accrual_access_rights(self):\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n accrual.write({\n 'line_ids': [(0, 0, {\n 'name': 'Test',\n 'amount_cash': 100,\n 'date': datetime.now(),\n })],\n })\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_3.id).check_access_rule, 'read')\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_2.id).check_access_rights, 'write')\n\n accrual.sudo(self.user_1.id).check_access_rule('read')\n self.assertTrue(\n accrual.sudo(self.user_1.id).check_access_rights('read'))\n\n # The manager can not access the leave accruals of the employee 2\n # because he is not the employee's manager\n accrual_2 = self.employee_2.get_leave_accrual(self.leave_type.id)\n\n self.assertRaises(\n Exception,\n accrual_2.sudo(self.user_1.id).check_access_rule, 'read')\n\n self.user_1.write({\n 'groups_id': [(4, self.ref('base.group_hr_manager'))]})\n\n for operation in ['read', 'write', 'create', 'unlink']:\n accrual_2.sudo(self.user_1.id).check_access_rule(operation)\n self.assertTrue(\n accrual_2.sudo(self.user_1.id).check_access_rights(operation))", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_correct_roles(self):\r\n users_with_invalid_role = []\r\n\r\n agents = self.selenium.get_agents_data()\r\n for email, expected_role in self.new_agents.items():\r\n for agent in agents:\r\n if agent[\"email\"] == email and agent[\"role\"] != expected_role:\r\n users_with_invalid_role.append({email: f\"should be {expected_role}, but is {agent['role']}\"})\r\n self.assertFalse(users_with_invalid_role, msg=users_with_invalid_role)", "def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)", "def clean_role():", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)" ]
[ "0.6734772", "0.67023313", "0.6685747", "0.6653768", "0.65826803", "0.6570354", "0.6543829", "0.6476152", "0.64435196", "0.6425734", "0.6390715", "0.63797116", "0.6317682", "0.6251953", "0.6250159", "0.6206891", "0.62026596", "0.61997855", "0.6192626", "0.6176961", "0.6169824", "0.6168254", "0.6160302", "0.6159476", "0.6154391", "0.61418515", "0.61385745", "0.61287177", "0.61247635", "0.6123235" ]
0.7228282
0
Test admin cannot update a store attendant with invalid username
def test_admin_cannot_update_user_with_invalid_username(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] user = dict( name='Summer Love', username='love summer', password='Andela8', role='attendant' ) resp = self.client.put( '/api/v1/users/2', content_type='application/json', data=json.dumps(user), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_cannot_update_user_with_invalid_name(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover3',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Enter name in a correct string format, (john doe)!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_update_user(self):\n pass", "def test_invalid_update_request_with_taken_username(self):\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(self.author.get_key()))\n response: Response = self.client.patch(BASE_URL + '/update/', data={\n 'username': self.temporary_author.username\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, msg=data)\n self.assertEqual(data, {'detail': f\"User '{self.temporary_author.username}' already exists.\"})", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_username_not_unique(self, client, users):\n user = users[0]\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_admin_cannot_create_users_with_same_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Paul Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This username is already taken!')\n self.assertEqual(resp.status_code, 400)", "def test_user_update_request(self):\n pass", "def test_modify_nonexist_username(self):\n print('(' + self.test_modify_nonexist_username.__name__+')',\n self.test_modify_nonexist_username.__doc__)\n self.assertIsNone(self.connection.modify_user(\n NON_EXIST_PATIENT_USERNAME, PATIENT['public_profile'],\n PATIENT['restricted_profile']))", "def test_username_is_writable_for_user_creation(self):\n request = Mock()\n assert 'username' not in self.admin.get_readonly_fields(request)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_users_username_delete(self):\n pass", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_update_work_type_name_user(self):\n # login as manager\n self.authenticate(self.user)\n\n # alter the work type\n response = self.client.patch(self.url_wt1, {\"name\": \"NewName\"})\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def testAddLongUsername(self):\n original_username = \"thiswillbelong\"\n longer_username = original_username*10\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(longer_username, \"password\"))", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def test_replace_user(self):\n pass", "def test_admin_cannot_create_users_with_same_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'This name is already registered!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"[email protected]\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]", "def test_admin_cannot_update_user_with_vague_user_id(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/kk',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'User_id should be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_change_metername_to_existing_(self):\n meter = Meter.objects.create(meter_name='testmeter', meter_unit='X')\n meter.save()\n meter2 = Meter.objects.create(meter_name='testmeter_alt', meter_unit='X')\n meter2.save()\n\n p = Permission.objects.get(name='Can change meter')\n self.user.user_permissions.add(p)\n\n url = reverse('api_v1:meter-detail', kwargs={'pk':2})\n self.client.login(username='testuser', password='q2w3E$R%')\n data = json.dumps({'meter_name': 'testmeter'})\n response = self.client.patch(url,\n data,\n follow=True,\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n self.assertIn('already exists', str(response.content))", "def test_admin_cannot_create_user_with_invalid_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200" ]
[ "0.6882498", "0.6805685", "0.67120755", "0.6652845", "0.6626257", "0.66236037", "0.66232955", "0.6510223", "0.6469667", "0.64549625", "0.6385028", "0.63423777", "0.6299679", "0.6264924", "0.6254841", "0.62495565", "0.6238614", "0.6227551", "0.62265486", "0.6219083", "0.6204438", "0.6195477", "0.6193788", "0.61921513", "0.61914253", "0.6189732", "0.6178268", "0.61546797", "0.6148976", "0.6099491" ]
0.6969854
0
Test admin cannot update a store attendant with invalid password
def test_admin_cannot_update_user_with_invalid_password(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] user = dict( name='Summer Love', username='love', password='Andela', role='attendant' ) resp = self.client.put( '/api/v1/users/2', content_type='application/json', data=json.dumps(user), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_password(self):\n pass", "def test_invalid_update_user_old_password(self):\n\n data = {\n 'password': 'password-invalido',\n 'new_password': 'pedro123456789',\n 'confirm_password': 'pedro123456789'\n }\n response = self.client.put(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response.data['detail'],\n \"Senha antiga inválida.\"\n )", "def test_invalid_update_user_new_password(self):\n\n data = {\n 'password': 'pedro123456',\n 'new_password': 'pedro12345678',\n 'confirm_password': 'pedro123456789'\n }\n response = self.client.put(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response.data['detail'],\n \"As novas senhas não combinam.\"\n )", "def test_incorrect_password(self):\n input = (\"admin\", \"\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)", "def test_invalid_password(self):\n self.request.json_body = deepcopy(self.good_dict)\n invalids = ['5horT']\n for val in invalids:\n self.request.json_body['password'] = val\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'password must be at least 8 characters'))", "def test_update(sqlite_db):\n updated_pass = \"TheUpdatedPassword\"\n site = \"www.example.com\"\n response = smm.update_passwd(site, updated_pass)\n assert response\n assert smm.read_passwd(site) == updated_pass\n bad_response = smm.update_passwd(\"NotASite\", updated_pass)\n assert not bad_response", "def test_change_password_invalid_input(self):\n role = Role.query.filter_by(name=\"Event Organizer\").first()\n user = TestModelFactory.create_user(password=\"password\")\n user.role = role\n db.session.add(user)\n db.session.commit()\n\n # log the user in\n with self.client:\n response = self.client.post(\n \"/auth/login\",\n data={\"email\": user.email, \"password\": \"password\"},\n follow_redirects=True,\n )\n\n for data in ViewFunctionTestData.INVALID_CHANGE_PASSWORD_DATA:\n with self.subTest(data=data):\n response = self.client.post(\n \"/settings/change-password\",\n data={\n \"old_password\": data[\"old_password\"],\n \"new_password\": data[\"new_password\"],\n \"confirm_password\": data[\"confirm_password\"],\n },\n follow_redirects=True,\n )\n self.assertTrue(\n data[\"error_message\"] in response.get_data(as_text=True)\n )\n self.assertTrue(user.verify_password(\"password\"))", "def test_admin_register_wrong_password(self):\n admin = dict(\n name='Jonnie Pemba',\n username='jonnie',\n password='Andela',\n role='admin'\n )\n\n resp = self.client.post(\n '/api/v1/register',\n content_type='application/json',\n data=json.dumps(admin)\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_set_user_password(self):\n pass", "def test_invalid_password(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)", "def test_password_error(self):\n token = str((jwt.encode({\n \"email\": \"[email protected]\"},\n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token,\n {\"password\": \"bag\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['error'],\n \"password should be atleast 8 characters.\")", "def test_valid_update_user_password(self):\n\n data = {\n 'password': 'pedro123456',\n 'new_password': 'pedro123456789',\n 'confirm_password': 'pedro123456789'\n }\n response = self.client.put(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def testEditPassword(self):\n self._login_user('eschoppik','secret')\n response = self.client.post('/users/1/edit_password?_method=PATCH',\n data=dict(new_password='newpass', confirm_password='newpass',\n old_password='secret'), follow_redirects=True)\n user = User.query.filter_by(username='eschoppik').first()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bcrypt.check_password_hash(user.password, 'newpass'),True)", "def test_set_password_with_wrong_type(self):\n user = UserModel()\n with pytest.raises(ValueError):\n user.password = 12345", "def test_password_change_provided(self):\n token = str((jwt.encode(\n {\"email\": \"[email protected]\"}, \n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token, {\"pwd\": \"bagenda1234\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['errors']\n [0], \"Password field is required.\")", "def test_new_password(self):\n form_data = self.form_data(self.pwd)\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_launch_entry_on_change_password_server_response(self):\n self.validate_attributes_in_launch_response()", "def test_account_modification_superuser_wrong_pw(flask_server, create_account):\n import requests\n\n config = flask_server\n data = {\n 'superuserpassword': '123',\n 'name': create_account['name'],\n 'new_name': 'foo2',\n 'new_password': 'bar2',\n 'new_code': '456',\n }\n\n req = requests.post('{}/account/modify'.format(API_URL), data=data)\n assert req.content == b'Wrong superuserpassword'\n assert req.status_code == 400", "def test_42_password_link(self):\r\n self.register()\r\n res = self.app.get('/account/johndoe/update')\r\n assert \"Change your Password\" in res.data\r\n user = User.query.get(1)\r\n user.twitter_user_id = 1234\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.app.get('/account/johndoe/update')\r\n assert \"Change your Password\" not in res.data, res.data", "def test_invalid_length_for_new_password():\n user = User(email=\"[email protected]\", user_type=0)\n user_password = \"ILoveHTML\"\n user.SetPassword(user_password)\n\n new_password1 = \"pwd\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password1)\n assert not user.VerifyPassword(new_password1)\n assert user.VerifyPassword(user_password)\n\n new_password2 = \"I love meatball and tuna.\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password2)\n assert not user.VerifyPassword(new_password2)\n assert user.VerifyPassword(user_password)", "def test_password_too_short(self):\n\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'tTTt'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n user_exitst = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exitst)", "def test_login_update_bad(testapp):\n from webtest.app import AppError\n with pytest.raises(AppError):\n testapp.get('/journal/1/edit-entry')", "def testLoginPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userI\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"userI\", \"passw0rd\"))", "def test_wrong_password(self):\n self.login_as(self.USER)\n\n bad_credentials = self.payload.copy()\n bad_credentials[\"password\"] = \"wrooooong\"\n\n with self.assertNumQueries(3):\n response = self.client.post(self.url, bad_credentials)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"Wrong password.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_41_password_change(self):\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': password,\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n assert \"Yay, you changed your password succesfully!\" in res.data, res.data\r\n\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': \"wrongpassword\",\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Your current password doesn't match the one in our records\"\r\n assert msg in res.data\r\n\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': '',\r\n 'new_password':'',\r\n 'confirm': '',\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Please correct the errors\"\r\n assert msg in res.data", "def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))", "def test_account_view_wrong_pw(flask_server, create_account):\n import requests\n\n data = create_account\n data['password'] += '123'\n\n req = requests.post('{}/account/view'.format(API_URL), data=data)\n assert req.content == b'Wrong password'\n assert req.status_code == 400", "def test_account_password_failure(self):\r\n params = {\r\n 'current_password': 'test',\r\n 'new_password': 'not_testing'\r\n }\r\n\r\n res = self.testapp.post(\r\n \"/api/v1/admin/password?api_key=\" + str(API_KEY),\r\n params=params,\r\n status=403)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n self.assertTrue(\r\n 'error' in user,\r\n \"Should have a error key in there: {0}\".format(user))\r\n self.assertTrue(\r\n 'typo' in user['error'],\r\n \"Should have a error key in there: {0}\".format(user))\r\n self._check_cors_headers(res)" ]
[ "0.7587666", "0.7190806", "0.7065232", "0.70605147", "0.69997144", "0.6972896", "0.69573975", "0.693463", "0.6927393", "0.6889472", "0.6862712", "0.6851641", "0.6851611", "0.68099064", "0.6807714", "0.6806558", "0.6805681", "0.67896575", "0.6773845", "0.67709357", "0.67707455", "0.67613083", "0.6729547", "0.67122185", "0.67088693", "0.67014503", "0.6689202", "0.6686059", "0.668109", "0.6664507" ]
0.7285022
1
Test admin cannot update a store attendant with vague user id
def test_admin_cannot_update_user_with_vague_user_id(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] user = dict( name='Summer Love', username='love', password='Andela8', role='attendant' ) resp = self.client.put( '/api/v1/users/kk', content_type='application/json', data=json.dumps(user), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'User_id should be numbers!') self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_edition_of_other_users_aid(client, contributor):\n\n aid = AidFactory()\n form_url = reverse('aid_edit_view', args=[aid.slug])\n client.force_login(contributor)\n res = client.get(form_url)\n assert res.status_code == 404", "def test_update_user(self):\n pass", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_update_request(self):\n pass", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_update_virtual_account_by_id(self):\n pass", "def test_none_admin_edit(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_handle_edit_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"brS\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_updateview_read_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(updateview)\n\n self.assertEqual(response.status_code, 403)", "def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_patch_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'user': str(new_user.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_updateview_write_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 403)\n self.assertEqual(invalid_data_response.status_code, 403)", "def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)", "def test_wrong_admin_put(self):\n\n with self.client:\n token = self.get_token()\n id = 4\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_editing_supplies_user(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'aaa'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyDetailsView.as_view()(request, pk=id)\n # normal user should get forbidden error\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)", "def test_attendant_cannot_get_sale_record_they_didnot_make(self):\n reply = self.admin_add_product()\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.admin_create_user2()\n reply = self.attendant2_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"You haven't made any sales!\")\n self.assertEqual(resp.status_code, 404)", "def test_handle_lead_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"lead brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_patch_user(self):\n pass", "def test_owner_edit_assessment_invalid(self):\n req, resp = data.get_assessment(self.contract['id'])\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_mailpiece_put_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n mailPiecePK = MailPiece.objects.filter(user=userPK)[0].pk\n url = reverse('MailPiece-detail', kwargs={'pk': mailPiecePK})\n self.data['user'] = userPK\n response = self.client.put(url, self.data, format='json')\n #This is 404 instead of 403 because there is no way to view a mail piece\n #that you arent the user on.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(MailPiece.objects.get(pk=mailPiecePK).user,\n self.data['user'])", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)" ]
[ "0.6781378", "0.65461284", "0.64795804", "0.6436433", "0.6332345", "0.6313285", "0.6267433", "0.622346", "0.62083393", "0.61932874", "0.61855006", "0.61781687", "0.6140063", "0.6139956", "0.6132134", "0.609968", "0.60942966", "0.6083457", "0.6070109", "0.6059979", "0.6030719", "0.60053056", "0.5986859", "0.5986564", "0.5985148", "0.5984966", "0.59844726", "0.5975713", "0.5972517", "0.59659445" ]
0.6831623
0
Test admin cannot updates a user that doesnt exist
def test_admin_cannot_update_non_existant_user(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] user = dict( name='Summer Lover', username='lover', password='Andela8', role='attendant' ) resp = self.client.put( '/api/v1/users/5', content_type='application/json', data=json.dumps(user), headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], "This user doesn't exist!") self.assertEqual(resp.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_user(self):\n pass", "def test_user_update_request(self):\n pass", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_admin_cannot_update_user_with_empty_fields(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)", "def test_api_user_is_not_admin(self):\n\n\t\t# register the user\n\t\treg_user = self.register_user('lilbaby', '[email protected]', 'test#op3456', 'test#op3456')\n\t\tdata = json.loads(reg_user.data.decode())\n\t\tself.assertEqual(reg_user.status_code, 201)\n\t\tself.assertIn('successfully registered', str(data))\n\n\t\t# login user\n\t\tlogin_res = self.client.post(\n\t\t\tf'{URL_AUTH}login',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\tusername='lilbaby',\n\t\t\t\t\tpassword='test#op3456'\n\t\t\t\t)\n\t\t\t),\n\t\t\tcontent_type='application/json'\n\t\t)\n\t\tlogin_data = json.loads(login_res.data.decode())\n\t\ttoken = login_data['auth_token']\n\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='text',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['error'] == 'forbidden')\n\t\tself.assertTrue(book.status_code == 403)", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_admin_update_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'user updated!')\n self.assertEqual(resp.status_code, 200)", "def test_patch_user(self):\n pass", "def test_admin_cannot_update_user_with_invalid_name(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover3',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Enter name in a correct string format, (john doe)!')\n self.assertEqual(resp.status_code, 400)", "def test_admin_cannot_update_user_with_invalid_username(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)", "def test_user_is_really_updated():\n response = api_helper.get_user(user_id=pytest.test_user.id)\n check_user_data_in_response(response.json()[\"data\"][0])", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_admin_cannot_update_user_with_vague_user_id(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/kk',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'User_id should be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_02_second_user_is_not_admin(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"[email protected]\",\r\n password=\"tester\")\r\n self.signout()\r\n user = db.session.query(User).get(2)\r\n assert user.admin == 0, \"User ID: 2 should not be admin, but it is\"", "def test_update_user(self):\n\n update_dict = dict(\n username='test_another_username',\n role='test_new_role',\n department='test_new_department'\n )\n\n # Update non-existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 0)\n\n # Insert user in Database\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Update existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 1)\n # Verify that data was updated\n selected = self.user_api.get_user(MAGEN_USER['user_uuid'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['username'], update_dict['username'])\n self.assertEqual(selected.documents['role'], update_dict['role'])\n self.assertEqual(selected.documents['department'], update_dict['department'])", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def test_admin_cannot_update_user_with_invalid_password(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Password should be longer than 6 characters, have atleast an uppercase and a lowercase!')\n self.assertEqual(resp.status_code, 400)", "def test_handle_refresh_not_admin(self):\n test_user = User(user)\n self.db.retrieve.return_value = test_user\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team refresh\",\n user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_another_user(self):\n user1_response = self.client.post(reverse('user-list'), {\n 'username': 'aseem', 'password': 'passwrodaosida123'\n })\n update_user_resp = self.client.patch(\n reverse('user-list') + '1/', {\n 'username': 'rakesh', 'password': 'passwrodaosida123'\n })\n\n self.assertEqual(update_user_resp.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_handle_edit_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"brS\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def test_updateview_write_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 403)\n self.assertEqual(invalid_data_response.status_code, 403)", "def test_updateview_read_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(updateview)\n\n self.assertEqual(response.status_code, 403)", "def test_user_mod(self):\r\n\r\n self._setstaff_login()\r\n\r\n self.client.login(username=self.user.username, password='foo')\r\n\r\n # Create user tests\r\n\r\n # No uname\r\n response = self.client.post(reverse('sysadmin'),\r\n {'action': 'create_user',\r\n 'student_fullname': 'blah',\r\n 'student_password': 'foozor', })\r\n self.assertIn(_('Must provide username'), response.content.decode('utf-8'))\r\n # no full name\r\n response = self.client.post(reverse('sysadmin'),\r\n {'action': 'create_user',\r\n 'student_uname': '[email protected]',\r\n 'student_password': 'foozor', })\r\n self.assertIn(_('Must provide full name'), response.content.decode('utf-8'))\r\n\r\n # Test create valid user\r\n self.client.post(reverse('sysadmin'),\r\n {'action': 'create_user',\r\n 'student_uname': '[email protected]',\r\n 'student_fullname': 'test cuser',\r\n 'student_password': 'foozor', })\r\n\r\n self.assertIsNotNone(\r\n User.objects.get(username='[email protected]',\r\n email='[email protected]'))\r\n\r\n # login as new user to confirm\r\n self.assertTrue(self.client.login(\r\n username='[email protected]', password='foozor'))\r\n\r\n self.client.logout()\r\n self.client.login(username=self.user.username, password='foo')\r\n\r\n # Delete user tests\r\n\r\n # Try no username\r\n response = self.client.post(reverse('sysadmin'),\r\n {'action': 'del_user', })\r\n self.assertIn(_('Must provide username'), response.content.decode('utf-8'))\r\n\r\n # Try bad usernames\r\n response = self.client.post(reverse('sysadmin'),\r\n {'action': 'del_user',\r\n 'student_uname': '[email protected]',\r\n 'student_fullname': 'enigma jones', })\r\n self.assertIn(_('Cannot find user with email address'), response.content.decode('utf-8'))\r\n\r\n response = self.client.post(reverse('sysadmin'),\r\n {'action': 'del_user',\r\n 'student_uname': 'flabbergast',\r\n 'student_fullname': 'enigma jones', })\r\n self.assertIn(_('Cannot find user with username'), response.content.decode('utf-8'))\r\n\r\n self.client.post(reverse('sysadmin'),\r\n {'action': 'del_user',\r\n 'student_uname': '[email protected]',\r\n 'student_fullname': 'test cuser', })\r\n\r\n self.assertEqual(0, len(User.objects.filter(\r\n username='[email protected]',\r\n email='[email protected]')))\r\n\r\n self.assertEqual(1, len(User.objects.all()))", "def test_dont_save_new_user(self):\n self.assertEqual(get_user_model().objects.exists(), 1)", "def test_admin_cannot_update_user_with_different_roles(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='supervisor'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'role should either be admin or attendant')\n self.assertEqual(resp.status_code, 400)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)" ]
[ "0.81179094", "0.76583564", "0.76581556", "0.76460975", "0.75170904", "0.74860865", "0.74507725", "0.7441613", "0.7438987", "0.74172455", "0.73982227", "0.7366298", "0.73619944", "0.7332433", "0.7329521", "0.7268531", "0.7250227", "0.7234456", "0.7233554", "0.72302", "0.72285295", "0.72134006", "0.7199672", "0.7190936", "0.7189486", "0.71757084", "0.7161102", "0.7156889", "0.7155278", "0.70912373" ]
0.8409781
0
Test admin cannot delete a user that doesnt exist
def test_admin_cannot_delete_non_existant_user(self): resp = self.admin_create_user() reply = self.admin_login() token = reply['token'] resp = self.client.delete( '/api/v1/users/5', content_type='application/json', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], "This attendant does not exist!") self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def test_delete_users_non_admin(client: FlaskClient) -> None:\n username = create_random_username()\n # Non-admin users are not allowed to make the request\n auth_token = create_auth_token(username)\n response = delete_users(client, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def test_users_username_delete(self):\n pass", "def test_remove_user(self):\n pass", "def test_jenkins_user_delete(self):\n ju = JenkinsUser.objects.get(username=\"user_1\")\n self.assertRaises(django.db.models.deletion.ProtectedError, ju.delete)", "def test_user_id_delete(self):\n pass", "def testDeleteUserIsDenied(self):\n [(objectID, username)] = UserAPI().create(\n [(u'user', u'secret', u'User', u'[email protected]')])\n self.store.commit()\n with login(u'user', objectID, self.transact) as session:\n deferred = self.facade.deleteUser(session, u'doomed')\n error = yield self.assertFailure(deferred, TPathPermissionDenied)\n self.assertEqual(u'doomed', error.path)", "def test_delete_user_by_id_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n admin_username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(admin_username, admin=True)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None", "def test_delete_user_by_id_non_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None", "def test_delete_users_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session)\n # Admin users are allowed to make the request\n auth_token = create_auth_token(username, admin=True)\n response = delete_users(client, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n # No users exist in the database\n all_users = GifSyncUser.get_all()\n assert len(all_users) == 0", "def test_delete_user_field(self):\n pass", "def test_admin_can_delete_a_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"User deleted!\")\n self.assertEqual(resp.status_code, 200)", "def testDeleteIsAllowed(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n namespaces = SecureNamespaceAPI(self.system.users['fluiddb'])\n namespaces.delete([u'user/private'])\n self.users.delete([u'user'])\n self.assertIdentical(None, getUser(u'user'))", "def test_delete_user_does_not_exists(self, client, users):\n user = users[0]\n url = reverse('users:delete', args=(user.pk + 100,))\n response = client.post(url)\n assert response.status_code == 404", "def test_del_user(self, api):\n self.builder.add_user(api.get_user())\n resp = api.del_user(api.get_user())\n assert resp.status_code == 204\n with pytest.raises(ObjectDeletedError):\n assert self.builder.check_user(api.get_user()) is False", "def test_delete_no_username(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n # This should fail either an username or user object should be given\n self.assertRaises(ValueError, api.user.delete)\n self.assertRaises(ValueError, api.user.delete,\n username='[email protected]', user=mock.Mock())\n\n api.user.create(email='[email protected]', password='secret')\n api.user.delete(username='[email protected]')\n\n user = api.user.create(email='[email protected]', password='secret')\n api.user.delete(user=user)", "def test_delete_user(self):\n\n with self.client:\n result = self.client.post('/users/cool-guy-johnny-B/delete',\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertNotIn(b'cool-guy-johnny-B', result.data)", "def delete_user():", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_delete(self, client, users):\n user = users[0]\n url = reverse('users:delete', args=(user.pk,))\n response = client.get(url)\n assert response.status_code == 405\n response = client.post(url)\n assert response.status_code == 302\n assert response.url == reverse('users:list')\n assert not get_user_model().objects.filter(pk=user.pk).exists()", "def delete_user():\n #TODO user delete\n pass", "def test_jenkins_user_delete(self):\n ju = JenkinsUser.objects.get(username=\"shib_id\")\n ju.delete()\n self.assertRaises(\n JenkinsUserProfile.DoesNotExist,\n JenkinsUserProfile.objects.get,\n shib_uid=\"shib_id\")", "def test_delete_user_404(self):\n resp = self.app.delete('/users/thisuserdoesntexist')\n assert resp.status_code == 404", "def test_logically_delete_user(self):\n ju = JenkinsUser.objects.get(username=\"shib_id\")\n self.assertTrue(ju.is_active)\n self.assertTrue(ju.is_staff)\n self.assertTrue(ju.is_superuser)\n self.assertTrue(ju.registrationprofile.activated)\n RegistrationProfile.objects.get(user=ju)\n self.assertEqual(ju.groups.count(), 2)\n\n logically_delete_user(ju)\n\n self.assertFalse(ju.is_active)\n self.assertFalse(ju.is_staff)\n self.assertFalse(ju.is_superuser)\n self.assertRaises(\n RegistrationProfile.DoesNotExist,\n RegistrationProfile.objects.get,\n user=ju)\n self.assertEqual(ju.groups.count(), 0)", "def test_delete_user(self):\n # First create a user to delete\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now delete it\n resp = self.app.delete('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n\n # Finally check to make sure it's not in the db\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 404", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_admin_cannot_delete_user_with_vague_user_id(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The user id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def test_delete_device_user(self):\n pass" ]
[ "0.8464141", "0.8464141", "0.8194369", "0.80601615", "0.80286217", "0.79667133", "0.7949473", "0.79252636", "0.791296", "0.7894087", "0.78859013", "0.7866681", "0.78086597", "0.7806129", "0.7803511", "0.7781318", "0.7777231", "0.77508134", "0.7748713", "0.774801", "0.7744327", "0.7673631", "0.76578355", "0.7655369", "0.7640626", "0.76389104", "0.7624503", "0.76234055", "0.7587152", "0.75827265" ]
0.8555734
0
Test user cannot logout with a blacklisted token
def test_cannot_logout_with_blacklisted_token(self): reply = self.admin_register() user = dict( username='jonnie', password='Andela8' ) resp = self.client.post( '/api/v1/login', content_type='application/json', data=json.dumps(user) ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'Login sucessful!') self.assertTrue(reply['token']) self.assertEqual(resp.status_code, 200) token = reply['token'] resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are successfully logged out!') self.assertEqual(resp.status_code, 200) resp = self.client.delete( '/api/v1/logout', headers={'Authorization': 'Bearer {}'.format(token)} ) reply = json.loads(resp.data.decode()) self.assertEqual(reply['message'], 'You are already logged out!') self.assertEqual(resp.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_logout_without_token(self):\n self.create_user()\n\n url = reverse_lazy('authenticate:logout')\n response = self.client.get(url)\n\n detail = str(response.data['detail'])\n status_code = int(response.data['status_code'])\n\n self.assertEqual(len(response.data), 2)\n self.assertEqual(detail, 'Authentication credentials were not provided.')\n self.assertEqual(status_code, 401)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_cannot_delete_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_logout_un_logged_in_user_false(self):\n resp = self.client().post('/api/v1/auth/logout/1')\n self.assertEqual(resp.status_code, 400)\n resp = resp.get_json()\n self.assertEqual(resp['error'],\n 'That user is not logged in')", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_logout_revoked(self):\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token has been revoked', str(response.data))", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def blacklist_token(token, user):\r\n user = User.query.filter_by(username=user).first()\r\n user.login_status = False\r\n token = Token.query.filter_by(token=token).first()\r\n token.blacklist = True\r\n db.session.commit()\r\n return {'Message': 'You have successfully logged out', \"Status\": \"Success\"}, 201", "def auth_logout(token):\n if verify_token(token):\n return { \"is_success\": True }\n else:\n raise AccessError(description=\"Logout failed. Token is invalid\")", "def test_valid_logout(self):\n with self.client:\n # user registration\n user_response = register_user(self)\n response_data = json.loads(user_response.data.decode())\n self.assertTrue(response_data[\"Authorization\"])\n self.assertEqual(user_response.status_code, 201)\n\n # registered user login\n login_response = login_user(self)\n data = json.loads(login_response.data.decode())\n self.assertTrue(data[\"Authorization\"])\n self.assertEqual(login_response.status_code, 200)\n\n # valid token logout\n response = self.client.post(\n \"/auth/destroy_token\",\n headers=dict(\n Authorization=\"Token \"\n + json.loads(login_response.data.decode())[\"Authorization\"]\n ),\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data[\"status\"] == \"success\")\n self.assertEqual(response.status_code, 200)", "def test_logout_user_without_token(client, url):\n response = client.delete(\"/auth/logout/\")\n payload = response.get_json()\n assert response.status_code == HTTPStatus.UNAUTHORIZED\n assert payload[\"msg\"] == \"Missing Authorization Header\"", "def test_user_logout(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.delete(reverse('accounts:user-logout'))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_anonymous_logout(self):\n resp = self.client.post(reverse('logout'))\n assert resp.status_code == 200, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)\n assert not self.is_authenticated(self.other_user)", "async def attempt_logout(token, expect=204):\n\n response = await database_sync_to_async(UserLogoutView.as_view())(factory.post('/logout', HTTP_AUTHORIZATION='Token ' + token))\n assert response.status_code == expect", "def test_other_logout(self):\n User.objects.create_user('user', '', 'user')\n client = Client()\n client.login(username='user', password='user')\n\n r = client.post('/accounts/logout/')\n self.assertCASLogoutNotInMessages(r)", "def test_logout_no_jwt(self, test_client):\n response = test_client.post('/api/auth/logout')\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['msg'] == \"Missing Authorization Header\"", "def test_logout_user(self):\n # register a user\n self.register_user()\n # login the user\n result = self.log_in()\n access_token = json.loads(result.data.decode())['access_token']\n # logout the same user\n response = self.client().post(AuthTestCase.logout, headers=dict(Authorization =\"Bearer \" + access_token), data={\"token\": access_token})\n # after successfully logging out\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"You have successfully logged out\", str(response.data))", "def test_user_can_logout(self):\n response = self.client.post(\n CONSTS.USER_LOGOUT_URL,\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotEqual(User.objects.get().last_login, datetime.datetime.now())", "def test_profile_api_anon(self):\n self.client.logout()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 403)", "def test_logout(self):\n with self.client:\n self.client.post(\n '/users/login',\n data=dict(username=\"eschoppik\", password=\"secret\"),\n follow_redirects=True\n )\n response = self.client.get('/users/logout', follow_redirects=True)\n self.assertIn(b'You are now logged out', response.data)\n self.assertFalse(current_user.is_authenticated)", "def test_anonymous_user_delete(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').delete,\r\n token)", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_valid_logout(self):\n with self.client:\n # user registration\n user_response = register_user(self)\n register_data = json.loads(user_response.data.decode())\n self.assertTrue(register_data['Authorization'])\n self.assertEqual(user_response.status_code, 201)\n\n # registered user login\n login_response = login_user(self)\n login_data = json.loads(login_response.data.decode())\n self.assertTrue(login_data['Authorization'])\n self.assertEqual(login_response.status_code, 200)\n\n # valid token logout\n access_token = login_data['Authorization']['access_token']\n response = self.client.post(\n '/api/auth/logout',\n headers=dict(\n Authorization='Bearer ' + access_token\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertEqual(response.status_code, 200)\n\n # unregister user\n login_response = login_user(self)\n login_data = json.loads(login_response.data.decode())\n self.assertTrue(login_data['Authorization'])\n self.assertEqual(login_response.status_code, 200)\n\n access_token = login_data['Authorization']['access_token']\n response = self.client.delete(\n '/api/admin/unregister',\n headers=dict(\n Authorization='Bearer ' + access_token\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertEqual(response.status_code, 200)", "def test_logout(self):\r\n self.logout()", "def test_do_logout(self):\r\n with app.test_request_context():\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n do_login(u1)\r\n self.assertIn(CURR_USER_KEY, session)\r\n do_logout()\r\n self.assertNotIn(CURR_USER_KEY, session)", "def test_logout_route_requires_login(self):\n response = self.client.get('/users/logout', follow_redirects=True)\n self.assertIn(b'Please log in to access this page', response.data)", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def test_wrong_token_permission_denied(self, client, token):\n with disable_logs(logging.WARNING):\n assert_hook_status(client, status=403, token=f\"{token}wrong\")", "def test_unauthorized_user(self):\n response_decoded_json = requests.post(URL_AUTH['url_login'], \n data=json.dumps(AUTH_PAYLOADS['payload_unauth']),\n headers=HEADER['header'])\n mes = response_decoded_json.json()\n assert 400 == response_decoded_json.status_code, \"You have BAD REQUEST\"\n assert \"User not found\" == mes, \"There is unexpected ability to login as unknown user\"", "def test_get_all_tokens_anonymous_user(self):\r\n\r\n # Anonymoues users should be unauthorized, no matter which kind of token are requesting\r\n res = self.app.get('/api/token')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err" ]
[ "0.76390165", "0.7464493", "0.7407765", "0.7369426", "0.72350377", "0.7150207", "0.7141968", "0.71375614", "0.71186984", "0.71078527", "0.71049744", "0.70918167", "0.7071774", "0.703527", "0.69626963", "0.6960639", "0.691597", "0.69064254", "0.6890695", "0.6852276", "0.68518996", "0.68445915", "0.6836993", "0.6818334", "0.6809075", "0.68039477", "0.6721773", "0.6712291", "0.67111313", "0.6704843" ]
0.84015685
0
Creates a pipeline that reads tweets from Cloud Datastore from the last N days. The pipeline finds the top mostused words, the top mosttweeted URLs, ranks word cooccurrences by an 'interestingness' metric (similar to on tf idf).
def process_datastore_tweets(project, dataset, pipeline_options): ts = str(datetime.datetime.utcnow()) p = beam.Pipeline(options=pipeline_options) # Create a query to read entities from datastore. query = make_query('Tweet') # Read entities from Cloud Datastore into a PCollection. lines = (p | 'read from datastore' >> ReadFromDatastore(project, query, None)) global_count = AsSingleton( lines | 'global count' >> beam.combiners.Count.Globally()) # Count the occurrences of each word. percents = (lines | 'split' >> (beam.ParDo(WordExtractingDoFn()) .with_output_types(unicode)) | 'pair_with_one' >> beam.Map(lambda x: (x, 1)) | 'group' >> beam.GroupByKey() | 'count' >> beam.Map(lambda (word, ones): (word, sum(ones))) | 'in tweets percent' >> beam.Map( lambda (word, wsum), gc: (word, float(wsum) / gc), global_count)) top_percents = (percents | 'top 500' >> combiners.Top.Of(500, lambda x, y: x[1] < y[1]) ) # Count the occurrences of each expanded url in the tweets url_counts = (lines | 'geturls' >> (beam.ParDo(URLExtractingDoFn()) .with_output_types(unicode)) | 'urls_pair_with_one' >> beam.Map(lambda x: (x, 1)) | 'urls_group' >> beam.GroupByKey() | 'urls_count' >> beam.Map(lambda (word, ones): (word, sum(ones))) | 'urls top 300' >> combiners.Top.Of(300, lambda x, y: x[1] < y[1]) ) # Define some inline helper functions. def join_cinfo(cooccur, percents): """Calculate a co-occurence ranking.""" import math word1 = cooccur[0][0] word2 = cooccur[0][1] try: word1_percent = percents[word1] weight1 = 1 / word1_percent word2_percent = percents[word2] weight2 = 1 / word2_percent return (cooccur[0], cooccur[1], cooccur[1] * math.log(min(weight1, weight2))) except: return 0 def generate_cooccur_schema(): """BigQuery schema for the word co-occurrence table.""" json_str = json.dumps({'fields': [ {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]}) return parse_table_schema_from_json(json_str) def generate_url_schema(): """BigQuery schema for the urls count table.""" json_str = json.dumps({'fields': [ {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]}) return parse_table_schema_from_json(json_str) def generate_wc_schema(): """BigQuery schema for the word count table.""" json_str = json.dumps({'fields': [ {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]}) return parse_table_schema_from_json(json_str) # Now build the rest of the pipeline. # Calculate the word co-occurence scores. cooccur_rankings = (lines | 'getcooccur' >> (beam.ParDo(CoOccurExtractingDoFn())) | 'co_pair_with_one' >> beam.Map(lambda x: (x, 1)) | 'co_group' >> beam.GroupByKey() | 'co_count' >> beam.Map(lambda (wordts, ones): (wordts, sum(ones))) | 'weights' >> beam.Map(join_cinfo, AsDict(percents)) | 'co top 300' >> combiners.Top.Of(300, lambda x, y: x[2] < y[2]) ) # Format the counts into a PCollection of strings. wc_records = top_percents | 'format' >> beam.FlatMap( lambda x: [{'word': xx[0], 'percent': xx[1], 'ts': ts} for xx in x]) url_records = url_counts | 'urls_format' >> beam.FlatMap( lambda x: [{'url': xx[0], 'count': xx[1], 'ts': ts} for xx in x]) co_records = cooccur_rankings | 'co_format' >> beam.FlatMap( lambda x: [{'w1': xx[0][0], 'w2': xx[0][1], 'count': xx[1], 'log_weight': xx[2], 'ts': ts} for xx in x]) # Write the results to three BigQuery tables. wc_records | 'wc_write_bq' >> beam.io.Write( beam.io.BigQuerySink( '%s:%s.word_counts' % (project, dataset), schema=generate_wc_schema(), create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)) url_records | 'urls_write_bq' >> beam.io.Write( beam.io.BigQuerySink( '%s:%s.urls' % (project, dataset), schema=generate_url_schema(), create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)) co_records | 'co_write_bq' >> beam.io.Write( beam.io.BigQuerySink( '%s:%s.word_cooccur' % (project, dataset), schema=generate_cooccur_schema(), create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)) # Actually run the pipeline. return p.run()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTopNTweets(retrievedTweets, numberOfTweets):\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]", "def get_tweets(n=1):\n tweets = list(collection.find())[-n:]\n return tweets", "def build_feature_trajectories(tweets, firstEpochTime, lastEpochTime, bucketSize):\n\n # The tweets are represented as a list of dictionaries\n # T is the defined period\n\n # delta\n T = (lastEpochTime - firstEpochTime) // bucketSize\n\n # local Term-Frequency for each word feature\n # map of word feature to list, where the list is having T elements\n TFt = {}\n\n # global term frequency, total number of documents containing each feature\n TF = {}\n\n #feature-documentlists\n Mf = {}\n\n # number of documents for day t\n Nt = [0] * (T + 1)\n\n # total number of documents\n N = len(tweets)\n\n # iterate over the tweets\n tweetID = 0\n for tweet in tweets:\n tweetID+=1\n\n # convert the timestamp\n t = (int(tweet['createdAtAsLong']) - firstEpochTime) // bucketSize\n\n # increase the number of documents for day t\n Nt[t] += 1\n\n for word in tweet['text']:\n if word == \"\":\n continue\n else:\n # if the word does not exist\n if word not in TFt:\n TFt[word] = [0] * (T + 1)\n TF[word] = 0\n Mf[word] = []\n\n # increase the frequency of the current word for day t\n TFt[word][t] += 1\n TF[word] += 1\n Mf[word].append(tweetID)\n\n featTraj = {}\n\n for key in TFt:\n featTraj[key] = [0] * (T + 1)\n for idx, val in enumerate(TFt[key]):\n try:\n featTraj[key][idx] = (float(val) / Nt[idx]) * math.log(float(N) / TF[key])\n except:\n print (\"NO DOCUMENTS ON DAY \", idx)\n return featTraj, Mf", "def display_sentiment(ticker: str, n_tweets: int, n_days_past: int, export: str = \"\"):\n # Date format string required by twitter\n dtformat = \"%Y-%m-%dT%H:%M:%SZ\"\n\n # Algorithm to extract\n dt_recent = datetime.now() - timedelta(seconds=20)\n dt_old = dt_recent - timedelta(days=n_days_past)\n print(\n f\"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n df_tweets = pd.DataFrame(\n columns=[\n \"created_at\",\n \"text\",\n \"sentiment\",\n \"positive\",\n \"negative\",\n \"neutral\",\n ]\n )\n while True:\n # Iterate until we haven't passed the old number of days\n if dt_recent < dt_old:\n break\n # Update past datetime\n dt_past = dt_recent - timedelta(minutes=60)\n\n temp = twitter_model.load_analyze_tweets(\n ticker,\n n_tweets,\n start_time=dt_past.strftime(dtformat),\n end_time=dt_recent.strftime(dtformat),\n )\n\n if temp.empty:\n return\n\n df_tweets = pd.concat([df_tweets, temp])\n\n if dt_past.day < dt_recent.day:\n print(\n f\"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n # Update recent datetime\n dt_recent = dt_past\n\n # Sort tweets per date\n df_tweets.sort_index(ascending=False, inplace=True)\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n df_tweets[\"prob_sen\"] = 1\n\n # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)\n df_tweets.reset_index(inplace=True)\n df_tweets[\"Month\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(\n lambda x: x.month\n )\n df_tweets[\"Day\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(lambda x: x.day)\n df_tweets[\"date\"] = pd.to_datetime(df_tweets[\"created_at\"])\n df_tweets = df_tweets.sort_values(by=\"date\")\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI)\n ax[0].plot(\n pd.to_datetime(df_tweets[\"created_at\"]),\n df_tweets[\"cumulative_compound\"].values,\n lw=3,\n c=\"cyan\",\n )\n ax[0].set_ylabel(\"Cumulative VADER Sentiment\")\n xlocations = []\n xlabels = []\n for _, day_df in df_tweets.groupby(by=\"Day\"):\n day_df[\"time\"] = pd.to_datetime(day_df[\"created_at\"])\n day_df = day_df.sort_values(by=\"time\")\n ax[0].plot(day_df[\"time\"], day_df[\"sentiment\"].cumsum(), c=\"tab:blue\")\n xlocations.append(day_df.time.values[0])\n xlabels.append(day_df[\"time\"].apply(lambda x: x.strftime(\"%m-%d\")).values[0])\n\n ax[1].bar(df_tweets[\"date\"], df_tweets[\"positive\"], color=\"green\", width=0.02)\n ax[1].bar(df_tweets[\"date\"], -1 * df_tweets[\"negative\"], color=\"red\", width=0.02)\n ax[0].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[0].minorticks_on()\n ax[0].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[0].set_xticks(xlocations)\n ax[0].set_xticklabels(xlabels)\n\n ax[1].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[1].minorticks_on()\n ax[1].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[1].set_ylabel(\"VADER Polarity Scores\")\n ax[1].set_xticks(xlocations)\n ax[1].set_xticklabels(xlabels)\n plt.suptitle(\n f\"Twitter's {ticker} total compound sentiment over time is {np.sum(df_tweets['sentiment'])}\"\n )\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n print(\"\")\n export_data(\n export, os.path.dirname(os.path.abspath(__file__)), \"sentiment\", df_tweets\n )", "def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets", "def top_n_words(top_n: int, text: str, language='english') -> frame:\n stop_words = stopwords.words(language)\n text_blob = TextBlob(text)\n text_blob_items = text_blob.word_counts.items()\n text_blob_items = [item for item in text_blob_items if item[0] not in stop_words]\n text_blob_items_sorted = sorted(text_blob_items, key=itemgetter(1), reverse=True)\n top_n_items = text_blob_items_sorted[1:top_n + 1]\n data_frame = pd.DataFrame(top_n_items, columns=['Word', 'Count'])\n return data_frame", "def ccnews_pipeline():\n\n # set a random seed for reproducability\n rng = random.Random(FLAGS.random_seed)\n\n # BooksCorpus is organized into directories of genre and files of books\n # adventure-all.txt seems to contain all the adventure books in 1 file\n # romance-all.txt is the same. None of the other directories have this,\n # so we will skip it to not double count those books\n file_name_set = set()\n input_files_by_genre = collections.defaultdict(list)\n for path, _, fnames in tf.gfile.Walk(FLAGS.input_file):\n genre = path.split(\"/\")[-1]\n for fname in fnames:\n if fname == \"adventure-all.txt\" or fname == \"romance-all.txt\":\n continue\n if fname in file_name_set:\n continue\n file_name_set.add(fname)\n input_files_by_genre[genre].append(path + \"/\" + fname)\n\n # Sort genres and iterate in order for reproducability\n train_files, test_files = [], []\n for genre, file_list in sorted(input_files_by_genre.items()):\n rng.shuffle(file_list)\n genre_size = len(file_list)\n test_size = int(FLAGS.test_size * genre_size)\n test_files.extend(file_list[:test_size])\n train_files.extend(file_list[test_size:])\n assert len(file_list[:test_size]) + \\\n len(file_list[test_size:]) == len(file_list)\n\n # make sure there is no test train overlap\n for filename in train_files:\n assert filename not in test_files\n\n rng.shuffle(train_files)\n rng.shuffle(test_files)\n\n def pipeline(root):\n \"\"\"Beam pipeline for converting CCNews files to TF Examples.\"\"\"\n _ = (\n root | \"Create test files\" >> beam.Create(test_files)\n | \"Read test files\" >> beam.FlatMap(read_file)\n | \"test Shuffle\" >> beam.Reshuffle()\n | \"Preproc test docs\" >> beam.FlatMap(preproc_doc)\n | \"record test Shuffle\" >> beam.Reshuffle()\n | \"Write to test tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".cc_cpc.test.tfrecord\", num_shards=50))\n _ = (\n root | \"Create train files\" >> beam.Create(train_files)\n | \"Read train files\" >> beam.FlatMap(read_file)\n | \"train Shuffle\" >> beam.Reshuffle()\n | \"Preproc train docs\" >> beam.FlatMap(preproc_doc)\n | \"record train Shuffle\" >> beam.Reshuffle()\n | \"Write to train tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".cc_cpc.train.tfrecord\", num_shards=450))\n return\n\n return pipeline", "def get_random_tweets(n):\r\n sample = list(mongo_coll_tweets.aggregate([{'$sample': {'size': n}}]))\r\n\r\n return sample", "def get_data(max_users = 30):\n\n #cache here\n\n\n mongo_db = pymongo.Connection('grande.rutgers.edu', 27017)['citybeat_production']\n tweets_collection = mongo_db['tweets']\n\n\n test_tweets = []\n seed_users = []\n\n\n\n try:\n with open('./cache_tweets.pkl'):\n tweets, test_tweets = pickle.load(open('./cache_tweets.pkl'))\n except:\n print 'in'\n # not here. fetch\n tweets = []\n for n, tweet in enumerate(tweets_collection.find({\"created_time\": {\"$gte\":\"1380643200\", \"$lt\":\"1380902400\"}})):\n tweet['text'] = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", tweet['text'])\n tweet['text'] = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', tweet['text'], flags=re.MULTILINE)\n tweets.append(tweet)\n print n\n\n #print 'len of tweets ', len(tweets), 'len of test = ', len(test_tweets)\n test_tweets = tweets[-100:-1]\n #pickle.dump((tweets, test_tweets), open('./cache_tweets.pkl','w'))\n\n tweets = [tweet for tweet in tweets if len(tweet['text'].split(' ')) >= 10]\n\n\n\n\n\n\n return tweets, test_tweets", "def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n top_10_tweets['top_tweets'] = []\n for tweet in Tweet.top_tweets[:10]:\n top_10_tweets['top_tweets'].append({'hashtag': \"#\"+tweet[0], 'count': tweet[1]})\n return top_10_tweets", "def get_top_keywords(self,in_filename, K, ignore_hashtags, ignore_usernames, tu):\n words = {}\n # Traverse the tweet file and count the words.\n with open(in_filename) as fp:\n for temp in fp:\n tweetobj = json.loads(temp)\n if \"text\" in tweetobj:\n text = tweetobj[\"text\"]\n text = re.sub(\"\\\\s+\", \" \", text.lower())\n\n # Get the token by using TupleUtil class object tu\n tokens = tu.tokenize_text(text, ignore_hashtags, ignore_usernames)\n keys = tokens.keys()\n\n # Increment the count if the word already exists\n for key in keys:\n if key in words:\n words[key] = words[key] + tokens[key]\n else:\n words[key] = tokens[key]\n\n # Get all words and create tag class object from it.\n keys = set(words.keys())\n tags = []\n for key in keys:\n tag = Tags()\n tag.key = key\n tag.value = words[key]\n tags.append(tag)\n\n # Sort the tags\n tags.sort(reverse=True)\n cloudwords = []\n numwords = K\n\n # Reduce K if the number of words are less than K\n if len(tags) < numwords:\n numwords = len(tags)\n\n # Take K words and create the list of dictionary object for D3js Library\n for i in range(numwords):\n wordfreq = {}\n tag = tags[i]\n wordfreq[\"text\"] = tag.key\n wordfreq[\"size\"] = tag.value/80\n cloudwords.append(wordfreq)\n\n return cloudwords", "def construct_df_topics(self, n_words=20):\n\n self.check_model()\n topic_keywords = []\n keywords = array(self.vectorizer.get_feature_names())\n\n for topic_weights in self.model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n\n self.df_topic_keywords = pd.DataFrame(topic_keywords)\n self.df_topic_keywords.columns = ['Word ' + str(i) for i in range(self.df_topic_keywords.shape[1])]\n self.df_topic_keywords.index = ['Topic ' + str(i) for i in range(self.df_topic_keywords.shape[0])]", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def process_tweets(s3_working_bucket: str, date: tuple) -> DataFrame:\n\n logging.debug(\"Start reading tweets csv.\")\n df_tweets = stdm.read_csv(spark, s3_working_bucket, date, \"twitter\", \"tweets\")\n\n logging.debug(\"Calling extract_tweet_source function.\")\n df_tweets = stp.extract_tweet_source(df_tweets)\n\n logging.debug(\"Calling col_to_datetime function with df_tweets data.\")\n df_tweets = stp.col_to_datetime(df_tweets, \"tweet_created_at\")\n\n logging.debug(\"Calling merge_texts function.\")\n df_tweets = stp.merge_texts(df_tweets)\n\n logging.debug(\"Calling get_tickers function.\")\n df_tweets = stp.get_tickers(df_tweets)\n\n # In case json files are loaded not in order.\n logging.debug(\"Calling order_by_col function with df_tweets data.\")\n df_tweets = stp.order_by_col(df_tweets, \"tweet_created_at\")\n\n logging.debug(\"Calling drop_outofrange function with df_tweets data.\")\n df_tweets = stp.drop_outofrange(df_tweets, \"tweet_created_at\", date)\n\n return df_tweets", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def getTopNWords(self, n=5):\n word_id = []\n for i in range(self.topic_word_matrix.shape[0]):\n word_id.append(self.topic_word_matrix[i].argsort()[:n])\n top_word_df = pd.DataFrame(index=['topic{}'.format(x) for x in range(self.K)],\n columns=['word{}'.format(x) for x in range(n)])\n for i in range(len(word_id)):\n for j in range(n):\n top_word_df.loc['topic{}'.format(i), 'word{}'.format(j)] = self.id2word[word_id[i][j]]\n return top_word_df", "def top_n(userid):\n agg = [s[\"search_id\"] for s in db_searches.find()]\n table = pd.DataFrame()\n table[\"searches\"] = Counter(agg).keys()\n table[\"count\"] = Counter(agg).values()\n table = table.sort_values(\"count\", ascending=False)\n table = table[:10]\n search_ids = table[\"searches\"].values\n counts = table[\"count\"].values\n n = 0\n top_n = []\n while n < len(search_ids):\n top_n.append([str(db_search_terms.find_one({\"_id\": search_ids[n]}).get(\"value\")), str(counts[n])])\n n += 1\n jsonob = jsonify(top_n=top_n)\n return jsonob", "def collect_tweets(redis_client, twitter_client, search_term):\n search = Search(redis_client, twitter_client, search_term)\n search.get_term_state()\n search.parse_term_state()\n search.set_query_string()\n search.set_execution_time()\n search.execute_query()\n search.incr_query_counters()\n search.set_newest_id()\n search.set_oldest_id()\n search.set_scenario()\n search.set_term_state()\n search.store_results()\n search.set_score()\n search.log_state()", "def update_tweets_feed(n):\n \n # Retrieve the tweets\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Compute the sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Return the tweet contents and a pie graph of the sentiment.\n \n return html.Div([\n html.Div([\n\n# First Tweet\n html.Div([\n html.Div([\n html.Pre(str(first_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '2px 2px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px',\n }\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_first_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\", }\n ),\n ], \n className = 'row' \n ),\n \n# Second Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(second_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_second_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Third Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(third_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_third_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Fourth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fourth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fourth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n\n # Fifth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fifth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fifth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n\n # Sixth Tweet\n html.Div([\n html.Div([\n html.Pre(str(sixth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_sixth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Seventh Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(seventh_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_seventh_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Eighth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(eighth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_eighth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Nineth\n \n html.Div([\n html.Div([\n html.Pre(str(nineth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_nineth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Tenth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(tenth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_tenth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n ], style = {'overflowY': 'scroll', 'overflowX': 'hidden',\n 'maxHeight': '105ex', 'backgroundColor' : '#eaeaea'}\n ),\n \n ])", "def analyze_tweets(tweets, model, w2v_model):\n # TODO DO EVERYTHING HERE\n #tweets = [(\"StarWars\", tc.query_tweets(\"StarWars\"))]\n \n #tweets = tc.query_tweets('starwars')\n df = pd.DataFrame(columns=['pos', 'neu', 'neg'])\n if not os.path.isdir('results'):\n os.mkdir('results')\n for topic, topic_tweets in tweets:\n tokenized_tweets = tp.process_raw_tweets(topic_tweets)\n df.loc[topic], dummy = classify_tweets(tokenized_tweets, model, w2v_model)\n vis.word_cloud_from_frequencies(tp.count_tokens(tokenized_tweets), f\"results/{topic}_cloud.png\", width=800, height=400,)\n \n vis.bar_plot_from_dataframe(df, 'results/results.png')\n print(\"\\n\")\n print(df)", "def main(keyword):\n fs_db = initFirestore()\n # keyword = \"whatshappeninginmyanmar\"\n all_posts = []\n\n twitterCrawler = TwitterCrawler(fs_db,keyword) \n cleaned_tweets = twitterCrawler.crawl() \n\n redditCrawler = RedditCrawler(fs_db, keyword) \n cleaned_submissions = redditCrawler.crawl() \n\n twitterCrawler.exportToDB(cleaned_tweets)\n redditCrawler.exportToDB(cleaned_submissions)\n\n crawler = Crawler()\n\n crawler.generateSentimentAnalysis(fs_db, cleaned_submissions, cleaned_tweets)\n wordcloud_img = crawler.generateWordCloud()\n\n # Send wordcloud to DB\n doc_ref = fs_db.collection(u'wordcloud').document('first')\n doc_ref.set({\n u'image': wordcloud_img\n })", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_tweets_from_search(api, search_string, parameters=\" -filter:retweets\", since=\"2021-08-09\", lang=\"en\", max_tweets=1000):\n\n tweet_list = []\n count = 0\n search = search_string\n params = parameters\n\n for tweet in tweepy.Cursor(api.search, q=search + params,\n count=100,\n tweet_mode=\"extended\",\n lang=lang,\n since=since,\n # until=\"2015-02-01\",\n ).items():\n tweet_list.append(tweet._json[\"full_text\"])\n count += 1\n if count == max_tweets:\n break\n print(count)\n return pd.DataFrame({\"text\": tweet_list})", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def evaluate_coverage_tweets(event, n_words, session, ids):\n print('-------- {} ------------'.format(event))\n summaries_path = Path(LOCAL_DATA_DIR_2, 'data', event, 'summaries', 'system')\n summaries = [x for x in summaries_path.iterdir() if x.is_file()]\n words, distribution, pairs = calculate_distribution_event(event, session, ids, steam=True)\n print(words[:n_words])\n for summary in summaries:\n with open(summary, 'r') as summary_file:\n print(summary_file.name)\n text_summary = summary_file.read()\n popular_summary = calculate_most_popular(text_summary, n_words, steam=True)\n popular_words = [x[0] for x in popular_summary]\n print(popular_words)\n print(\n float(len(set(words[:n_words]) & set(popular_words))) / len(set(words[:n_words]) | set(popular_words)))", "def test_mapreduce_wordcount():\n\n state = mapreduce_wordcount.run(\n url=\"https://raw.githubusercontent.com/topher-lo/prefect-with-k8/main/src/prefect_kube_demo/data/dream.txt\",\n executor=DaskExecutor(),\n )\n task_ref = mapreduce_wordcount.get_tasks(\"reducer\")[0]\n result = state.result[task_ref].result\n # Get top 3 tokens\n result_top_tokens = sorted(result, key=lambda x: x[1])[-3:]\n expected_top_tokens = [(\"will\", 17), (\"freedom\", 13), (\"from\", 12)]\n assert state.is_successful()\n assert result_top_tokens == expected_top_tokens", "def get_sentiment_trends(order):\r\n\r\n # Get date seven days ago\r\n seven_days_ago = datetime.now() - timedelta(days=7)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([\r\n {\r\n \"$match\":\r\n {\r\n \"tweet_time\": {\"$gt\": seven_days_ago}\r\n }\r\n },\r\n {\r\n \"$group\":\r\n {\r\n \"_id\": \"$keyword_search_term\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n },\r\n {\r\n \"$sort\":\r\n {\r\n \"average\": order\r\n }\r\n },\r\n {\r\n \"$limit\": 10\r\n }\r\n ])\r\n\r\n return result" ]
[ "0.55078226", "0.54873246", "0.53973114", "0.5381288", "0.53680915", "0.534437", "0.5264347", "0.52361506", "0.52216566", "0.5216795", "0.52121794", "0.52082795", "0.5202691", "0.51886666", "0.5179647", "0.5176253", "0.5159707", "0.51566535", "0.51549745", "0.51432204", "0.5142679", "0.51315886", "0.5116016", "0.50891703", "0.50860834", "0.50779045", "0.5077248", "0.50444674", "0.5030199", "0.50207543" ]
0.66543806
0
BigQuery schema for the word cooccurrence table.
def generate_cooccur_schema(): json_str = json.dumps({'fields': [ {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]}) return parse_table_schema_from_json(json_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_wc_schema():\n json_str = json.dumps({'fields': [\n {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def co_table():\n\n connection = MongoClient(\n \"mongodb://anshuman264:[email protected]:27017,\"\n \"cluster0-shard-00-01-ouybv.mongodb.net:27017,\"\n \"cluster0-shard-00-02-ouybv.mongodb.net:27017/co_table?ssl=true&replicaSet=Cluster0-shard-0&authSource\"\n \"=admin\")\n\n collection = connection[DBS_NAME][COLLECTION_NAME]\n co_occurrences = collection.find_one()\n co_occurrences = todict(co_occurrences)\n co_list = list(co_occurrences.items())\n co_list = sorted(co_list, key=itemgetter(1), reverse=True)\n print(co_list)\n labels = ['Word', 'Co-Occurrence']\n\n df = pd.DataFrame.from_records(co_list, columns=labels)\n connection.close()\n\n return render_template(\"analysis.html\", tables=[df.to_html()], titles=['na', 'of Research Articles'])", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def tabler(subcorpus_names, list_of_dicts, num_rows):\n import pandas as pd\n cols = []\n for subcorp, data in zip(subcorpus_names, list_of_dicts):\n col = pd.Series([w for w, v in data.most_common(num_rows)], name = subcorp)\n cols.append(col)\n word_table = pd.concat(cols, axis = 1)\n return word_table", "def createTableBOW(self,DBcursor):\n sql=\"create table if not exists BOW (bow_id INTEGER PRIMARY KEY, word TEXT, total_count INTEGER, netloc_count INTEGER, path_count INTEGER, params_count INTEGER, query_count INTEGER, fragment_count INTEGER);\"\n DBcursor.execute(sql)", "def _create_tf_table(self, words) -> dict:\r\n\r\n freqTable = dict()\r\n tfTable = dict()\r\n\r\n totalWords = len(words)\r\n for word in words:\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n \r\n uniqueWords = set(words)\r\n for word in uniqueWords:\r\n tfTable[word] = freqTable[word] / float(totalWords)\r\n\r\n return tfTable", "def createTableBOW(self,DBcursor):\n sql=\"create table if not exists BOW (bow_id INTEGER PRIMARY KEY, word TEXT, word_count INTEGER);\"\n DBcursor.execute(sql)", "def schema() -> None:\n pass", "def fill_words_table(self, statistics, path, filemoving, conn, logg, parser):\n logg.writing_log(conn, 'Starting filling words table')\n c = conn.cursor()\n val1 = statistics.book_name(path, filemoving, parser).replace(' ', '_')\n sql1 = \"CREATE TABLE \" + val1 + \" (word text, count integer, count_uppercase integer)\"\n c.execute(sql1)\n val2 = statistics.frequency(path, filemoving, parser)\n sql2 = \"INSERT INTO \" + val1 + \" VALUES(?,?,?)\"\n for key, value in val2.items():\n if not key.istitle():\n c.execute(sql2, (key, value, (0 if val2.get(key.capitalize()) == None else val2.get(key.capitalize()))))\n logg.writing_log(conn, 'Words table is filled')\n conn.commit()", "def _createWordsTable(self):\n\t\tcommand = \"\"\"CREATE TABLE words (ID INTEGER PRIMARY KEY,\n\t\t\tword TEXT,\n\t\t\ttranslation TEXT,\n\t\t\tlast_refresh INTEGER,\n\t\t\tlevel INTEGER,\n\t\t\tcourse INTEGER\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)", "def create_lookup_tables(text):\n word_count = Counter(text)\n #sorted_word = sorted(word_count, key=word_count.get, reverse=True) # key=word_count.get 按照key原始顺序排序,reverse=True 降序\n int_to_vocab = { idx:word for idx,word in enumerate(word_count)}\n vocab_to_int = { word:idx for idx,word in enumerate(word_count)}\n return vocab_to_int, int_to_vocab", "def get_co_occurrences(self, word1, word2):\n raise NotImplementedError(\"Word2Vec model does not support co-occurrence counting\")", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def build_cooccurrence(\n cleansed_cvs: Union[str, Path],\n output_path: Union[str, Path],\n text_colname: str = \"text\",\n language_colname: str = \"main_language\",\n lang: Optional[str] = None,\n input: str = \"content\",\n encoding: str = \"utf-8\",\n decode_error: str = \"strict\",\n strip_accents: Optional[str] = None,\n lowercase: bool = True,\n preprocessor: Optional[callable] = None,\n tokenizer: Optional[callable] = None,\n stop_words: Optional[Union[str, List[str]]] = None,\n token_pattern: str = r\"(?u)\\b\\w\\w+\\b\",\n ngram_range: Tuple[int] = (1, 1),\n context_size: Optional[int] = None,\n analyzer: str = \"word\",\n max_df: float = 1.0,\n min_df: int = 1,\n max_features: int = None,\n vocabulary: List[str] = None,\n binary: bool = False,\n dtype=np.int64,\n):\n output_path = Path(output_path)\n\n df = read_json(cleansed_cvs)\n if lang is not None:\n df = df.query(f\"{language_colname} == '{lang}'\")\n\n cooc_matrix_tfx = CoOccurrenceMatrixTransformer(\n encoding=encoding,\n decode_error=decode_error,\n strip_accents=strip_accents,\n lowercase=lowercase,\n preprocessor=preprocessor,\n tokenizer=tokenizer,\n stop_words=stop_words,\n token_pattern=token_pattern,\n ngram_range=ngram_range,\n context_size=context_size,\n analyzer=analyzer,\n max_df=max_df,\n min_df=min_df,\n max_features=max_features,\n vocabulary=vocabulary,\n binary=binary,\n dtype=dtype,\n )\n\n cooc_matrix = cooc_matrix_tfx.fit_transform(df[text_colname])\n\n np.savez_compressed(\n output_path,\n data=cooc_matrix.data,\n indices=cooc_matrix.indices,\n indptr=cooc_matrix.indptr,\n shape=cooc_matrix.shape,\n )\n\n with open(output_path.with_name(\"counter.pkl\"), \"wb\") as ostream:\n pkl.dump(cooc_matrix_tfx.counter, ostream)\n\n if output_path.exists():\n return 0\n return 1", "def ReadSchema(schema, bigquery_messages):\n\n return bigquery_messages.TableSchema(\n fields=[\n _TableFieldSchemaForEntry(entry, bigquery_messages)\n for entry in schema.split(',')])", "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)", "def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)", "def compute_tfs(descriptions):\n\n # Initialize a dictionary that maps words to their IDF values.\n tf_dict = {}\n\n # Loop over game descriptions\n for game_name in descriptions:\n\n # Loop over words in each document\n for word in descriptions[game_name]:\n\n # continue if the word was already processed in\n # previous documents\n if word in tf_dict:\n continue\n\n # Count number of documents that contain the word\n word_count = 0\n for game_name in descriptions:\n if word in descriptions[game_name]:\n word_count += 1\n\n # add tf_score to tf_dict\n tf_dict[word] = word_count\n\n return tf_dict", "def get_schema(self) -> dict:", "def pre_build_idf_table(self):\r\n doc_per_word_table = dict() # in how many documents does a word occur\r\n \r\n for doc in self.documents:\r\n # converting list to set will delete any duplicate words\r\n doc = self.preprocess_document(doc)\r\n doc_words = set(self.word_tokenize_preprocessed(doc))\r\n\r\n for word in doc_words:\r\n if word in doc_per_word_table:\r\n doc_per_word_table[word] += 1\r\n else:\r\n doc_per_word_table[word] = 1\r\n\r\n total_documents = len(self.documents)\r\n idf_table = dict()\r\n\r\n for word in doc_per_word_table:\r\n idf_table[word] = math.log2(total_documents / float(doc_per_word_table[word]))\r\n\r\n return idf_table", "def gen_bag_of_words_df(self):\n\t\tdef word_vector(doc_text):\n\t\t\tfreqs = pd.Series(collections.Counter(doc_text.split()))\n\t\t\treturn freqs.loc[set(freqs.index.values)|set(self.stems)]\n\t\tself.bagofwords = self.dataframe.text.apply(word_vector).replace({np.nan:0})", "def _create_lookup_tables(self, text):\n word_counts = Counter(text)\n sorted_words = sorted(word_counts, key=word_counts.get, reverse=True)\n vocab_to_int = {word: ii for ii, word in enumerate(sorted_words)}\n int_to_vocab = {ii: word for ii, word in enumerate(sorted_words)}\n return (vocab_to_int, int_to_vocab)", "def schema(self):\n pass", "def schema(self):\n parts = ['GLOBAL', self.index_type, 'INDEX']\n parts.append(\"('%s', %s,\" % (self.name, self.hash_key.name))\n if self.range_key:\n parts.append(\"%s,\" % self.range_key.name)\n if self.includes:\n parts.append(\"[%s],\" % ', '.join((\"'%s'\" % i for i in\n self.includes)))\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n return ' '.join(parts)", "def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'", "def schema(self):", "def compute_TF(doc_info):\n tf_scores = []\n\n for idx, doc in enumerate(doc_info):\n tf_score_table = {}\n for word in doc['freq_dict'].keys():\n count = doc['freq_dict'][word]\n tf_score_table[word] = count/doc_info[idx]['doc_length']\n tf_scores.append(tf_score_table)\n\n return tf_scores", "def create_schemas():\n\n # TEXT: the field is indexed, analyzed. By default it is not stored.\n # phrase=False does not allow to search for phrases.\n # sortable=True allows to sort the indexed values\n # ID: the file is indexed, without being analyzed.\n # STORED: the file is saved but not indexed.\n\n pub_schema = Schema(\n pubtype=TEXT(stored=True),\n key=STORED,\n author=TEXT(stored=True),\n title=TEXT(stored=True),\n pages=STORED,\n year=TEXT(stored=True),\n journal=STORED,\n volume=STORED,\n number=STORED,\n url=STORED,\n ee=STORED,\n crossref=ID(stored=True),\n )\n\n ven_schema = Schema(\n pubtype=STORED,\n key=ID(stored=True),\n author=STORED,\n title=TEXT(stored=True),\n journal=STORED,\n publisher=TEXT(stored=True),\n url=STORED,\n ee=STORED,\n year=STORED,\n isbn=STORED,\n )\n\n return pub_schema, ven_schema", "def compute_co_occurrence_matrix(corpus, window_size=8): \n words, num_words = distinct_words(corpus)\n M = None\n word2Ind = {}\n \n # YOUR CODE HERE\n \n indexes = [x for x in range(0,len(words))]\n word2Ind = dict(zip(words,indexes))\n \n M = np.zeros((num_words,num_words))\n \n for text in corpus:\n len_txt = len(text)\n \n for j in range(0,len_txt):\n mid_index = word2Ind[text[j]]\n \n for k in range(j-window_size,j+window_size+1):\n \n if k>=0 and k<len_txt and k!=j:\n last_index = word2Ind[text[k]]\n M[mid_index,last_index] += 1.0\n \n \n #raise NotImplementedError()\n\n return M, word2Ind" ]
[ "0.6825363", "0.5536852", "0.542956", "0.5351534", "0.5351479", "0.5240092", "0.52194554", "0.52166784", "0.5211417", "0.5208293", "0.5185048", "0.51837534", "0.5175961", "0.51413655", "0.511861", "0.5096498", "0.50657487", "0.50420344", "0.5041939", "0.5027259", "0.5024432", "0.5009024", "0.49796608", "0.49710596", "0.4946543", "0.49456236", "0.4938104", "0.49200335", "0.49073184", "0.4905459" ]
0.6641016
1
BigQuery schema for the urls count table.
def generate_url_schema(): json_str = json.dumps({'fields': [ {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]}) return parse_table_schema_from_json(json_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tables():\n with db.connect() as conn:\n conn.execute(\n \"CREATE TABLE IF NOT EXISTS url_list \"\n \"(url_id VARCHAR(20) NOT NULL UNIQUE, url_data VARCHAR(2083) NOT NULL);\"\n )", "def get_table_count(table_name, query, headers, base_url, maxpagesize):\n logging.info(\"Running get_table_count() . . . \")\n\n #task_instance = context['task_instance']\n #headers = task_instance.xcom_pull('build_auth_headers', key='auth_headers')\n\n r_count = requests.get('{0}/ws/schema/table/{1}/count?{2}'.format(base_url, table_name, query), headers=headers)\n r_status = r_count.status_code\n if r_status != 200:\n logging.info('Response NOT successful. I got code {} '.format(r_status))\n raise ValueError('Response NOT successful. I got code {} '.format(r_status))\n else:\n logging.info('Response successful! I got code {} '.format(r_status))\n\n count_json = r_count.json()\n row_count = count_json['count']\n\n pages = int(math.ceil(row_count / maxpagesize))\n\n return row_count, pages", "def create_schema(self):\n schema = '''CREATE TABLE jping (\n ip_address text not null,\n interface text not null,\n hostname text not null,\n ping_results integer not null,\n UNIQUE(ip_address, hostname)\n )\n '''\n self.query(schema)", "def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)", "def schema(self):\n pass", "def describe(self, url: str) -> dict:\n return {\n \"@context\": {\n \"schema\": \"http://schema.org/\",\n \"void\": \"http://rdfs.org/ns/void#\",\n 'sage': 'http://sage.univ-nantes.fr/sage-voc#'\n },\n \"@id\": self._uri,\n \"@type\": \"http://schema.org/Dataset\",\n \"schema:url\": url,\n \"schema:name\": self._name,\n \"schema:description\": self._description,\n \"void:triples\": self.nb_triples,\n \"void:distinctSubjects\": self._connector.nb_subjects if self._connector.nb_subjects is not None else \"unknown\",\n \"void:properties\": self._connector.nb_predicates if self._connector.nb_predicates is not None else \"unknown\",\n \"void:distinctObjects\": self._connector.nb_objects if self._connector.nb_objects is not None else \"unknown\",\n \"sage:timeQuota\": self._quantum,\n \"sage:maxResults\": self.max_results if self.max_results is not inf else 'inf'\n }", "def urltable(self):\n return self._urltable", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def generate_wc_schema():\n json_str = json.dumps({'fields': [\n {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def generate_cooccur_schema():\n json_str = json.dumps({'fields': [\n {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def _load_bigquery_schemas(self):\n logger.info(\"Reading BigQuery schema files...\")\n for table_name in self.tables + self.type_tables:\n logger.info(f\"Reading schema file for table '{table_name}'...\")\n schema_json = resource_stream('sotorrent_pipeline',\n f'bigquery_schemas/{table_name}.json').read().decode()\n self.bigquery_schemas[table_name] = json.loads(schema_json)\n self.bigquery_schemas_with_fields[table_name] = json.loads('{\"fields\":' + schema_json + '}')\n logger.info(f\"Read {len(self.bigquery_schemas)} schema file(s).\")", "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema", "def ReadSchema(schema, bigquery_messages):\n\n return bigquery_messages.TableSchema(\n fields=[\n _TableFieldSchemaForEntry(entry, bigquery_messages)\n for entry in schema.split(',')])", "def fast_count(db, Model): # noqa\n return db.session.execute(\n 'SELECT n_live_tup FROM pg_stat_all_tables WHERE relname = :tablename',\n {'tablename': Model.__tablename__}\n ).scalar()", "def schema(self):", "def schema() -> None:\n pass", "def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])", "def table(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable':\n return pulumi.get(self, \"table\")", "def num_links(self, column='posts_separated', new_col_name='num_of_links'):\n self.df[new_col_name] = [sum([url.count('http') for url in x]) for x in self.df[column]]", "def __len__(self, schema):\r\n raise NotImplementedError", "def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'", "def create_index_tables(self):\n # List of urls that have been indexed\n self.con.execute('create table urllist(url)')\n # List of words\n self.con.execute('create table wordlist(word)')\n # What doc the word is and where it is in the doc\n self.con.execute('create table wordlocation(urlid, wordid, location)')\n # Indicates a link from one url to another\n self.con.execute('create table link(fromid integer, toid integer)')\n # which words are actually in a link\n self.con.execute('create table linkwords(wordid, linkid)')\n self.con.execute('create index wordidx on wordlist(word)')\n self.con.execute('create index urlidx on urllist(url)')\n self.con.execute('create index wordurlidx on wordlocation(wordid)')\n self.con.execute('create index urltoidx on link(toid)')\n self.con.execute('create index urlfromidx on link(fromid)')\n self.dbcommit()", "def build_song_schema():\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema", "def count(self, query):", "def count(self, table_name=None):\n return self._get_storage().count(table_name=table_name)", "def test_table_sizes(self):\n labels_tables = self.labels.find_one({ 'dataset': 'SF1' })['tables']\n\n for label_data in labels_tables.values():\n self.assertEqual(label_data['size'], len(label_data['labels']))", "def get_schema(self) -> dict:", "def read_database_urls(self):\n\n col_kvk = self.kvk_url_keys[KVK_KEY]\n col_name = self.kvk_url_keys[NAME_KEY]\n col_url = self.kvk_url_keys[URL_KEY]\n\n self.url_df = self.read_csv_input_file(self.url_input_file_name,\n usecols=[col_kvk, col_name, col_url],\n names=[KVK_KEY, NAME_KEY, URL_KEY],\n remove_spurious_urls=True,\n unique_key=URL_KEY)\n\n self.logger.info(\"Removing duplicated table entries\")\n self.remove_duplicated_url_entries()", "def recordCount(self, schema, table):\r\n r = self.fetchSqlRecords(\r\n \"SELECT count(*) FROM {}\".format(self.encodeTableName(schema, table)))\r\n return r[0][0]", "def getSchema(self):\n\n schema = [\n \"title\",\n \"body\",\n \"created_at\",\n \"id\",\n \"summary\",\n \"abstract\",\n \"keywords\",\n ]\n\n return schema" ]
[ "0.56397116", "0.55176735", "0.54308933", "0.53076", "0.50500786", "0.5012381", "0.5005372", "0.49362692", "0.4921254", "0.49172506", "0.49127892", "0.49102753", "0.49071926", "0.48962775", "0.48466182", "0.48446733", "0.48303398", "0.48188177", "0.48101094", "0.48087117", "0.48009402", "0.47861502", "0.47826767", "0.47672236", "0.47653726", "0.47556397", "0.4755447", "0.47455248", "0.47363994", "0.47116077" ]
0.6956805
0
BigQuery schema for the word count table.
def generate_wc_schema(): json_str = json.dumps({'fields': [ {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]}) return parse_table_schema_from_json(json_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_cooccur_schema():\n json_str = json.dumps({'fields': [\n {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def ReadSchema(schema, bigquery_messages):\n\n return bigquery_messages.TableSchema(\n fields=[\n _TableFieldSchemaForEntry(entry, bigquery_messages)\n for entry in schema.split(',')])", "def create_whoosh_schema(self) -> whoosh.fields.Schema:\n schema_classname = \"WhooshSchema\"\n schema_classname = str(schema_classname)\n attrs = OrderedDict()\n for field in self.fields:\n if field.type_is_ngram:\n whoosh_field = whoosh.fields.NGRAM(\n stored=field.type_is_store,\n minsize=field.ngram_minsize,\n maxsize=field.ngram_maxsize,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_phrase:\n whoosh_field = whoosh.fields.TEXT(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_keyword:\n whoosh_field = whoosh.fields.KEYWORD(\n stored=field.type_is_store,\n lowercase=field.keyword_lowercase,\n commas=field.keyword_commas,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_numeric:\n whoosh_field = whoosh.fields.NUMERIC(\n stored=field.type_is_store,\n field_boost=field.weight,\n sortable=field.is_sortable,\n )\n elif field.type_is_store:\n whoosh_field = whoosh.fields.STORED()\n else: # pragma: no cover\n raise NotImplementedError\n attrs[field.name] = whoosh_field\n SchemaClass = type(schema_classname, (whoosh.fields.SchemaClass,), attrs)\n schema = SchemaClass()\n return schema", "def build_song_schema():\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema", "def build_song_schema():\n schema = T.StructType(\n [\n T.StructField('artist_id', T.StringType(), True),\n T.StructField('artist_latitude', T.DecimalType(), True),\n T.StructField('artist_longitude', T.DecimalType(), True),\n T.StructField('artist_location', T.StringType(), True),\n T.StructField('artist_name', T.StringType(), True),\n T.StructField('duration', T.DecimalType(), True),\n T.StructField('num_songs', T.IntegerType(), True),\n T.StructField('song_id', T.StringType(), True),\n T.StructField('title', T.StringType(), True),\n T.StructField('year', T.IntegerType(), True)\n ]\n )\n return schema", "def schema() -> None:\n pass", "def get_schema(self) -> dict:", "def getSchema(self):\n\n schema = [\n \"title\",\n \"body\",\n \"created_at\",\n \"id\",\n \"summary\",\n \"abstract\",\n \"keywords\",\n ]\n\n return schema", "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema", "def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)", "def fill_words_table(self, statistics, path, filemoving, conn, logg, parser):\n logg.writing_log(conn, 'Starting filling words table')\n c = conn.cursor()\n val1 = statistics.book_name(path, filemoving, parser).replace(' ', '_')\n sql1 = \"CREATE TABLE \" + val1 + \" (word text, count integer, count_uppercase integer)\"\n c.execute(sql1)\n val2 = statistics.frequency(path, filemoving, parser)\n sql2 = \"INSERT INTO \" + val1 + \" VALUES(?,?,?)\"\n for key, value in val2.items():\n if not key.istitle():\n c.execute(sql2, (key, value, (0 if val2.get(key.capitalize()) == None else val2.get(key.capitalize()))))\n logg.writing_log(conn, 'Words table is filled')\n conn.commit()", "def create_schema(self):\n schema = '''CREATE TABLE jping (\n ip_address text not null,\n interface text not null,\n hostname text not null,\n ping_results integer not null,\n UNIQUE(ip_address, hostname)\n )\n '''\n self.query(schema)", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "async def count_all_words(self) -> int:\n pipeline = [\n {'$group': {'_id': 0, 'count': {'$sum': 1}}},\n ]\n results_words = await self._db_client.aggregate(self._db_name, self._db_collection_name, pipeline)\n if results_words:\n return results_words[0]['count']\n else:\n return 0", "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def createTableBOW(self,DBcursor):\n sql=\"create table if not exists BOW (bow_id INTEGER PRIMARY KEY, word TEXT, total_count INTEGER, netloc_count INTEGER, path_count INTEGER, params_count INTEGER, query_count INTEGER, fragment_count INTEGER);\"\n DBcursor.execute(sql)", "def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'", "def word_count_graph(input_stream_name: str, text_column: str = 'text', count_column: str = 'count') -> Graph:\n return Graph.graph_from_iter(name=input_stream_name) \\\n .map(operations.FilterPunctuation(text_column)) \\\n .map(operations.LowerCase(text_column)) \\\n .map(operations.Split(text_column)) \\\n .sort([text_column]) \\\n .reduce(operations.Count(count_column), [text_column]) \\\n .sort([count_column, text_column])", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def schema():\n return vol.Schema({\"venus\": cv.boolean, \"mars\": cv.boolean, \"jupiter\": cv.boolean})", "def schema(self):\n pass", "def _create_tf_table(self, words) -> dict:\r\n\r\n freqTable = dict()\r\n tfTable = dict()\r\n\r\n totalWords = len(words)\r\n for word in words:\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n \r\n uniqueWords = set(words)\r\n for word in uniqueWords:\r\n tfTable[word] = freqTable[word] / float(totalWords)\r\n\r\n return tfTable", "def table(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable':\n return pulumi.get(self, \"table\")", "def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None", "def _createWordsTable(self):\n\t\tcommand = \"\"\"CREATE TABLE words (ID INTEGER PRIMARY KEY,\n\t\t\tword TEXT,\n\t\t\ttranslation TEXT,\n\t\t\tlast_refresh INTEGER,\n\t\t\tlevel INTEGER,\n\t\t\tcourse INTEGER\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)", "def generate_url_schema():\n json_str = json.dumps({'fields': [\n {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def column_to_bq_schema(self):\n kwargs = {}\n if len(self.fields) > 0:\n fields = [field.column_to_bq_schema() for field in self.fields]\n kwargs = {\"fields\": fields}\n\n return google.cloud.bigquery.SchemaField(self.name, self.dtype,\n self.mode, **kwargs)", "def parse_table_schema(conn):\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"PRAGMA table_info({})\".format(\"week5\"))\r\n print(cur.fetchall())", "def get_schema() -> dict:\n raise NotImplementedError()" ]
[ "0.6019789", "0.5929392", "0.5895341", "0.56691307", "0.56456923", "0.56099075", "0.5580256", "0.5541957", "0.55183524", "0.546304", "0.5438903", "0.5436714", "0.54277676", "0.5387588", "0.5350829", "0.53483534", "0.53423476", "0.53415966", "0.5339946", "0.53039205", "0.52996534", "0.5289723", "0.52749264", "0.52530223", "0.5245949", "0.524241", "0.5240842", "0.52361685", "0.5228935", "0.5226282" ]
0.71177036
0
Gets the error_details of this WorkRequest.
def error_details(self): return self._error_details
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> pulumi.Output[Sequence['outputs.ErrorDetailResponse']]:\n return pulumi.get(self, \"error_details\")", "def getError(self):\n \n return self.resp[\"error\"]", "def get_error(self):\n return self.exc_info", "def error_details(self) -> Optional['outputs.UserFacingErrorResponse']:\n return pulumi.get(self, \"error_details\")", "def error(self):\n return self['error']", "def error_body(self):\n return self._status.error_body", "def query_error(self):\n return self.details[KEY_QUERY_ERROR]", "def error_message(self):\n\n return self._error_message", "def get_error(self):\n return self.e", "def error(self):\n errors = self._info.get('error', {}).get('errors')\n if not errors:\n return None\n return ' '.join(err.get('message', 'unknown') for err in errors)", "def error_reason(self):\n return self._error_reason", "def getErrorMessage(self):\n return self._errorMessage", "def error(self):\n error = self._wrapped.error\n if error:\n return error\n\n return self.json['response'].get('error')", "def get_error_message(self):\n return self.error_message.get_error_message()", "def error(self):\n return self._error", "def error(self):\n return self._error", "def error(self):\n return self._error", "def error_message(self) -> str:\n return self._error_message", "def error(self):\n return self.get('error')", "def protection_error_details(self) -> 'outputs.UserFacingErrorResponse':\n return pulumi.get(self, \"protection_error_details\")", "def get_error_message(self):\n\n return self.err_message", "def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")", "def getErrorMessage(self):\n return self._message", "def error_string(self):\n return self._error_string" ]
[ "0.7687312", "0.7687312", "0.7687312", "0.7687312", "0.75932133", "0.71988595", "0.7126288", "0.70466316", "0.67343795", "0.672883", "0.6612877", "0.659201", "0.65748554", "0.6505508", "0.6468211", "0.6435177", "0.64291966", "0.63854223", "0.6382505", "0.6382505", "0.6382505", "0.636671", "0.6361289", "0.6284388", "0.62784874", "0.6247723", "0.6247723", "0.6247723", "0.62347287", "0.6216379" ]
0.8006274
0
Sets the error_details of this WorkRequest.
def error_details(self, error_details): self._error_details = error_details
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_detail(self, error_detail):\n\n self._error_detail = error_detail", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def set_error_details(code, desc):\n MDC.put('errorCode', code)\n MDC.put('errorDescription', desc)", "def error_reason(self, error_reason):\n\n self._error_reason = error_reason", "def error_details(self):\n return self._error_details", "def error(self, error):\n\n self._error = error", "def error(self, error):\n\n self._error = error", "def error(self, error):\n\n self._error = error", "def error_message(self, error_message):\n\n self._error_message = error_message", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def setError(self,err):\n self.error = err", "def error(self, text, info=None):\n self.details[\"message\"] = text\n if info:\n self.details[\"details\"] = info", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def error(self, exception=None):\n self._error = exception", "def set_error(self, name, value):\n self.errors[name] = value", "def error_details(self) -> pulumi.Output[Sequence['outputs.ErrorDetailResponse']]:\n return pulumi.get(self, \"error_details\")", "def test_add_error_details(self):\n self.protocol.addError(\n self.test, details=self.sample_tb_details)\n self.assertThat([\n compat._b((\"error: %s [ multipart\\n\"\n \"Content-Type: text/plain\\n\"\n \"something\\n\"\n \"F\\r\\nserialised\\nform0\\r\\n\"\n \"Content-Type: \"\n \"text/x-traceback;charset=utf8,language=python\\n\"\n \"traceback\\n\" + _remote_exception_str_chunked +\n \"]\\n\") % self.test.id()),\n compat._b((\"error: %s [ multipart\\n\"\n \"Content-Type: text/plain\\n\"\n \"something\\n\"\n \"F\\r\\nserialised\\nform0\\r\\n\"\n \"Content-Type: \"\n \"text/x-traceback;language=python,charset=utf8\\n\"\n \"traceback\\n\" + _remote_exception_str_chunked +\n \"]\\n\") % self.test.id()),\n ],\n matchers.Contains(self.io.getvalue())),", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_details(self) -> Sequence['outputs.JobErrorDetailsResponse']:\n return pulumi.get(self, \"error_details\")", "def error_code(self, error_code):\n\n self._error_code = error_code", "def error_code(self, error_code):\n\n self._error_code = error_code", "def error_code(self, error_code):\n\n self._error_code = error_code", "def error_recovery_settings(self, error_recovery_settings):\n\n self._error_recovery_settings = error_recovery_settings", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def set_invalid_notes(self, error):\n self._invalid_notes = error", "def error_entity(self, error_entity):\n \n self._error_entity = error_entity", "def set_error(self, code: Optional[int] = None, text: Optional[str] = None) -> None:\n if code is not None:\n self.error_code = code\n if text is not None:\n self.error_text = text", "def with_error(self, prev_error):\n self.prev_error = prev_error\n return self" ]
[ "0.7288278", "0.62475634", "0.6221895", "0.6116249", "0.6079993", "0.6001644", "0.6001644", "0.6001644", "0.5962383", "0.5880566", "0.5878469", "0.58783144", "0.57903564", "0.5766922", "0.5754919", "0.574586", "0.57366425", "0.5700703", "0.5700703", "0.5700703", "0.5700703", "0.56759346", "0.56759346", "0.56759346", "0.5665206", "0.56150216", "0.5607043", "0.56063557", "0.5580639", "0.5558574" ]
0.81081605
0
Gets the load_balancer_id of this WorkRequest. The `OCID`__ of the load balancer with which the work request is associated.
def load_balancer_id(self): return self._load_balancer_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"balancer_id\")", "def nodebalancer_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"nodebalancer_id\")", "def nodebalancer_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"nodebalancer_id\")", "def nodebalancer_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"nodebalancer_id\")", "def _root_loadbalancer_id(self, obj_type, obj_dict):\n\n try:\n # For Mitaka\n if obj_type == lb_const.LOADBALANCER:\n lb = obj_dict['id']\n elif obj_type == lb_const.LISTENER:\n lb = obj_dict[lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.L7POLICY:\n lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.L7RULE:\n lb = obj_dict['policy'][lb_const.LISTENER][\n lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.POOL:\n lb = obj_dict[lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.SNI:\n lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n else:\n # Pool Member or Health Monitor\n lb = obj_dict[lb_const.POOL][lb_const.LOADBALANCER]['id']\n # For Liberty\n # if obj_type == lb_const.LOADBALANCER:\n # lb = obj_dict['id']\n # elif obj_type == lb_const.LISTENER:\n # lb = obj_dict[lb_const.LOADBALANCER]['id']\n # elif obj_type == lb_const.POOL:\n # lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n # elif obj_type == lb_const.SNI:\n # lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n # else:\n # # Pool Member or Health Monitor\n # lb = obj_dict[lb_const.POOL][lb_const.LISTENER][\n # lb_const.LOADBALANCER]['id']\n except Exception:\n raise exceptions.IncompleteData(\n 'Root loadbalancer id was not found')\n else:\n return lb", "def load_balancer_name(self) -> str:\n return pulumi.get(self, \"load_balancer_name\")", "def get_load_balancer_ip(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n\n lb_ip = cluster[\"load_balancers\"][0][\"ip\"]\n return lb_ip", "def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']", "def load_balancer_id(self, load_balancer_id):\n self._load_balancer_id = load_balancer_id", "def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")", "def get(self, load_balancer_id):\n response.status = 201\n return None", "def load_balancer_type(self) -> Optional[pulumi.Input['CloudRunConfigLoadBalancerType']]:\n return pulumi.get(self, \"load_balancer_type\")", "def id(self):\n return self.job_proto.id", "def load_balancing(self) -> pulumi.Input['FrontdoorOriginGroupLoadBalancingArgs']:\n return pulumi.get(self, \"load_balancing\")", "def client_request_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"client_request_id\")", "def __loadHyperSearchJobID(cls, permWorkDir, outputLabel):\n filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,\n outputLabel=outputLabel)\n\n jobID = None\n with open(filePath, \"r\") as jobIdPickleFile:\n jobInfo = pickle.load(jobIdPickleFile)\n jobID = jobInfo[\"hyperSearchJobID\"]\n\n return jobID", "def load_balancer_profile(self) -> Optional[pulumi.Input['LoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")", "def _get_lb(self, lb_or_id):\n if isinstance(lb_or_id, CloudLoadBalancer):\n ret = lb_or_id\n else:\n ret = self.get(lb_or_id)\n return ret", "def slb_id(self) -> str:\n return pulumi.get(self, \"slb_id\")", "def load_balancing(self) -> Optional[pulumi.Input['FrontdoorOriginGroupLoadBalancingArgs']]:\n return pulumi.get(self, \"load_balancing\")", "def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")", "def job_id(self) -> JobId:\r\n return self._job_id", "def load_balancing(self) -> pulumi.Output['outputs.FrontdoorOriginGroupLoadBalancing']:\n return pulumi.get(self, \"load_balancing\")" ]
[ "0.7890315", "0.7890315", "0.7890315", "0.78116715", "0.7780647", "0.7739434", "0.7739434", "0.656374", "0.6504513", "0.6472288", "0.63884926", "0.63022023", "0.6237915", "0.60421634", "0.58649844", "0.55915666", "0.55746996", "0.5552787", "0.5515452", "0.5499319", "0.54482126", "0.5440887", "0.54087025", "0.5404194", "0.540285", "0.5370252", "0.5355735", "0.53505373", "0.5340783", "0.53277314" ]
0.79292524
0
Sets the load_balancer_id of this WorkRequest. The `OCID`__ of the load balancer with which the work request is associated.
def load_balancer_id(self, load_balancer_id): self._load_balancer_id = load_balancer_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, request, loadbalancer_id):\n kwargs = {'loadbalancer_id': loadbalancer_id}\n update_loadbalancer(request, **kwargs)", "def load_balancer_id(self):\n return self._load_balancer_id", "def load_balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def update_loadbalancer(request, **kwargs):\n data = request.DATA\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.update_load_balancer(\n loadbalancer_id,\n name=data['loadbalancer'].get('name'),\n description=data['loadbalancer'].get('description'),\n admin_state_up=data['loadbalancer'].get('admin_state_up'))\n\n return _get_sdk_object_dict(loadbalancer)", "def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def assign_to_load_balancer_rule(self, load_balancer_rule_id, \n virtualmachineids): \n params = {'command':'assignToLoadBalancerRule',\n 'id':load_balancer_rule_id,\n 'virtualmachineids':virtualmachineids} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['assigntoloadbalancerruleresponse']['jobid']\n self.logger.debug('Start job - assignToLoadBalancerRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def __init__(__self__, *,\n load_balancer_id: pulumi.Input[str],\n security_group_id: pulumi.Input[str],\n dry_run: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"load_balancer_id\", load_balancer_id)\n pulumi.set(__self__, \"security_group_id\", security_group_id)\n if dry_run is not None:\n pulumi.set(__self__, \"dry_run\", dry_run)", "def get(self, load_balancer_id):\n response.status = 201\n return None", "def delete(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_load_balancer(loadbalancer_id,\n ignore_missing=True,\n cascade=True)", "def setRequestId(self, reqid) :\n self.request_id = reqid", "def nodebalancer_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"nodebalancer_id\")", "def _root_loadbalancer_id(self, obj_type, obj_dict):\n\n try:\n # For Mitaka\n if obj_type == lb_const.LOADBALANCER:\n lb = obj_dict['id']\n elif obj_type == lb_const.LISTENER:\n lb = obj_dict[lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.L7POLICY:\n lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.L7RULE:\n lb = obj_dict['policy'][lb_const.LISTENER][\n lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.POOL:\n lb = obj_dict[lb_const.LOADBALANCER]['id']\n elif obj_type == lb_const.SNI:\n lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n else:\n # Pool Member or Health Monitor\n lb = obj_dict[lb_const.POOL][lb_const.LOADBALANCER]['id']\n # For Liberty\n # if obj_type == lb_const.LOADBALANCER:\n # lb = obj_dict['id']\n # elif obj_type == lb_const.LISTENER:\n # lb = obj_dict[lb_const.LOADBALANCER]['id']\n # elif obj_type == lb_const.POOL:\n # lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n # elif obj_type == lb_const.SNI:\n # lb = obj_dict[lb_const.LISTENER][lb_const.LOADBALANCER]['id']\n # else:\n # # Pool Member or Health Monitor\n # lb = obj_dict[lb_const.POOL][lb_const.LISTENER][\n # lb_const.LOADBALANCER]['id']\n except Exception:\n raise exceptions.IncompleteData(\n 'Root loadbalancer id was not found')\n else:\n return lb", "def balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"balancer_id\")", "def nodebalancer_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"nodebalancer_id\")", "def __init__(__self__, *,\n dry_run: Optional[pulumi.Input[bool]] = None,\n load_balancer_id: Optional[pulumi.Input[str]] = None,\n security_group_id: Optional[pulumi.Input[str]] = None):\n if dry_run is not None:\n pulumi.set(__self__, \"dry_run\", dry_run)\n if load_balancer_id is not None:\n pulumi.set(__self__, \"load_balancer_id\", load_balancer_id)\n if security_group_id is not None:\n pulumi.set(__self__, \"security_group_id\", security_group_id)", "def affiliate_ledger_oid(self, affiliate_ledger_oid):\n\n self._affiliate_ledger_oid = affiliate_ledger_oid", "def request_id(self, request_id):\n\n self._request_id = request_id", "def request_id(self, request_id):\n\n self._request_id = request_id", "def request_id(self, request_id):\n\n self._request_id = request_id", "def delete(self, loadbalancer_id):\n response.status = 201", "def subnet_id(self, subnet_id):\n self._subnet_id = subnet_id", "def loan_id(self, loan_id):\n\n self._loan_id = loan_id", "def job_id(self, job_id: JobId):\r\n self._job_id = job_id", "def delete_load_balancer_rule(self, load_balancer_rule_id): \n params = {'command':'deleteLoadBalancerRule',\n 'id':load_balancer_rule_id} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deleteloadbalancerruleresponse']['jobid']\n self.logger.debug('Start job - deleteLoadBalancerRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def setOperationId(self, opid) :\n self.operation_id = opid" ]
[ "0.66619974", "0.65204686", "0.6427702", "0.6352528", "0.6352528", "0.6352528", "0.6221372", "0.6221372", "0.6017137", "0.59453", "0.5768332", "0.57498705", "0.57174456", "0.5444608", "0.5366137", "0.5321263", "0.5312888", "0.5203628", "0.51420414", "0.51369774", "0.5123431", "0.51221055", "0.51221055", "0.51221055", "0.50258887", "0.5012895", "0.498434", "0.49409828", "0.49258152", "0.49078906" ]
0.8018049
0
Sets the message of this WorkRequest. A collection of data, related to the load balancer provisioning process, that helps with debugging in the event of failure.
def message(self, message): self._message = message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def message(self, value: str):\n self._properties[\"message\"] = value", "def _set_message(self, value):\n self.__message = value", "def message(self, message: str):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message):\n\n self._message = message", "def message(self, message: \"str\"):\n self._attrs[\"message\"] = message", "def setMessage(self, message):\n self._message = message", "def message(self, msg):\n self._message = msg", "def message(self, message: Union[Message, bytes]) -> None:\n self._message = message", "def progress_message(self, progress_message):\n\n self._progress_message = progress_message", "def message(self, message: \"str\"):\n if message is None:\n raise ValueError(\"Invalid value for `message`, must not be `None`\")\n self._attrs[\"message\"] = message", "def __setstate__(self, message):\n self._message = message", "def setMessage(self, message: str):\r\n\r\n if not self.isClosed:\r\n if self.__message != '':\r\n self.__message = ''\r\n else:\r\n raise HDDOPermissionException('Tried to set non-existing message in a closed HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to set message in a closed HealthDominoDataObject.')", "def set_message(self, message):\n if len(message) > globals.MAX_MESSEGE_LENGTH:\n mess = message[0:globals.MAX_MESSEGE_LENGTH-3]+\"...\"\n else:\n mess = message\n self._message.set_message(mess)", "def message(self, message):\n if python_utils.is_string(self._message):\n raise TypeError('self.message must be assigned to exactly once')\n if not python_utils.is_string(message):\n raise TypeError('self.message must be a string')\n if not message:\n raise ValueError('self.message must be a non-empty string')\n model_name, quoted_model_id = self._message\n self._message = '%s in %s(id=%s): %s' % (\n self.__class__.__name__, model_name, quoted_model_id, message)", "def message(self, msg: AgentMessage):\n self._message = msg", "def message(self, message):\n if message is None:\n raise ValueError(\"Invalid value for `message`, must not be `None`\") # noqa: E501\n\n self._message = message", "def setMessage(self, *args):\n return _libsbml.Constraint_setMessage(self, *args)", "def message(self, message):\n if message is not None and len(message) < 1:\n raise ValueError(\"Invalid value for `message`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._message = message", "def with_additional_message(self: _Diagnostic, message: str) -> _Diagnostic:\n if self.additional_message is None:\n self.additional_message = message\n else:\n self.additional_message = f\"{self.additional_message}\\n{message}\"\n return self", "def message(self):\n if not python_utils.is_string(self._message):\n raise NotImplementedError(\n 'self.message must be assigned a value in __init__')\n return self._message", "def _create_job_message(self):\n #TODO: Final check of source file, add xml settings, allow for user\n # to set priority, verify all job data is correct format\n\n if not hasattr(self.required_files, '_get_message'):\n self.add_file_collection()\n\n if self.pool and hasattr(self.pool, 'id'):\n pool_options = {'poolId': self.pool.id}\n\n elif self.pool:\n pool_options = {'poolId': str(self.pool)}\n\n else:\n size = max(int(self.instances), 1)\n pool_options = {'autoPoolSpecification': self._auto_pool(size)}\n\n job_message = {\n 'Name': str(self.name),\n 'Type': self._api.jobtype(),\n 'RequiredFiles': self.required_files._get_message(\"submit\"),\n 'Parameters': list(self._filter_params()),\n 'JobFile': str(self.source),\n 'Settings': str(self.settings),\n 'Priority': 'Medium'\n }\n job_message.update(pool_options)\n\n self._log.debug(\"Job message: {0}\".format(job_message))\n return job_message", "def msg(self, msg):\n\n self._msg = msg", "def set_status_message(self, message):\n\n # Nagios considers a pipe (|) a split from STATUS MESSAGE and perf\n # data. If we replace it with a space, that should safely render the\n # message safe without risking making it unreadable.\n\n try:\n assert message is not None\n self.__exit_message = message.replace('|', ' ')\n except (AttributeError, AssertionError):\n self.unknown_error(\"Status message must be a standard string!\")" ]
[ "0.6355234", "0.63029337", "0.62257034", "0.6203849", "0.6203849", "0.6203849", "0.6203849", "0.6203849", "0.6203849", "0.6203849", "0.6203849", "0.61765045", "0.61677366", "0.60702616", "0.60020745", "0.6000318", "0.5923937", "0.58947843", "0.57720613", "0.57564867", "0.57302725", "0.5727567", "0.5696028", "0.56193584", "0.5580382", "0.5564869", "0.55407506", "0.54945046", "0.5472821", "0.54279983" ]
0.6311244
1
Gets the time_accepted of this WorkRequest. The date and time the work request was created, in the format defined by RFC3339.
def time_accepted(self): return self._time_accepted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def submit_time(self) -> datetime:\n return self._submit_time", "def accepted_time(self, accepted_time):\n\n self._accepted_time = accepted_time", "def created_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"created_time\")", "def time_accepted(self, time_accepted):\n self._time_accepted = time_accepted", "def create_time(self):\n return self._create_time", "def create_time(self):\n return self._create_time", "def create_time(self):\n return self._create_time", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def created_time(self) -> datetime.datetime:\n return self.__created_time", "def created_time(self) -> datetime.datetime:\n return self.__created_time", "def accept_transfer(self):\n self.is_accepted = True\n self.date_time_accepted = models.DateTimeField(auto_now=True)", "def getSubmitTime():", "def time_created(self) -> str:\n return pulumi.get(self, \"time_created\")", "def time_created(self) -> str:\n return pulumi.get(self, \"time_created\")", "def time_created(self) -> str:\n return pulumi.get(self, \"time_created\")", "def validation_time(self):\n return self._validation_time", "def create_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_time\")", "def get_time(self):\n return self.block.create_time", "def arrival_time(self):\r\n return self.__arrival_time", "def arrival_time(self):\r\n return self.__arrival_time", "def create_time(self) -> Optional[str]:\n return pulumi.get(self, \"create_time\")", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def completion_time(self) -> datetime:\n return self._completion_time", "def getPostJobSubmitTime(self):\n ent = self.getPostJob()\n if ent is None:\n return None\n return ent.submitTime" ]
[ "0.66144276", "0.646957", "0.63799495", "0.6186461", "0.61386454", "0.6039994", "0.6039994", "0.6039994", "0.5985838", "0.5985838", "0.5985838", "0.5985838", "0.5956583", "0.5956583", "0.58897996", "0.5854773", "0.5841652", "0.5841652", "0.5841652", "0.5810735", "0.58032846", "0.58032846", "0.58032846", "0.57827145", "0.57443476", "0.57443476", "0.56840265", "0.5632147", "0.56045425", "0.55928475" ]
0.78294075
0
Sets the time_accepted of this WorkRequest. The date and time the work request was created, in the format defined by RFC3339.
def time_accepted(self, time_accepted): self._time_accepted = time_accepted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def accepted_time(self, accepted_time):\n\n self._accepted_time = accepted_time", "def time_accepted(self):\n return self._time_accepted", "def accept_transfer(self):\n self.is_accepted = True\n self.date_time_accepted = models.DateTimeField(auto_now=True)", "def rejected_time(self, rejected_time):\n\n self._rejected_time = rejected_time", "def submit_time(self, submit_time: datetime):\n\n self._submit_time = submit_time", "def time_registrations_approved(self, time_registrations_approved):\n\n self._time_registrations_approved = time_registrations_approved", "def time_created(self, time_created):\n self._time_created = time_created", "def time_created(self, time_created):\n self._time_created = time_created", "def time_created(self, time_created):\n self._time_created = time_created", "def time_created(self, time_created):\n self._time_created = time_created", "def submit_time(self) -> datetime:\n return self._submit_time", "def active_time(self, active_time):\n if self.local_vars_configuration.client_side_validation and active_time is None: # noqa: E501\n raise ValueError(\"Invalid value for `active_time`, must not be `None`\") # noqa: E501\n\n self._active_time = active_time", "def make_time_request(self, time_request=None, **kwargs):\n pass", "def setSubmitTime(t):", "def set_accepted(self):\n self.logger.info(\"status: ACCEPTED\")\n self._callback('on_accepted')\n return self.update_response(self.encoder.encode_accepted())", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def time_registrations_pending(self, time_registrations_pending):\n\n self._time_registrations_pending = time_registrations_pending", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def cancelled_time(self, cancelled_time):\n\n self._cancelled_time = cancelled_time", "def creation_time(self, creation_time):\n\n self._creation_time = creation_time", "def creation_time(self, creation_time):\n\n self._creation_time = creation_time" ]
[ "0.84088874", "0.71794397", "0.6400894", "0.61765873", "0.6028219", "0.58656025", "0.55198085", "0.55198085", "0.55198085", "0.55198085", "0.5302736", "0.52900857", "0.526864", "0.52670693", "0.5256942", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.5244525", "0.51902986", "0.5185791", "0.5184636", "0.51831704", "0.51831704" ]
0.8216104
1
Gets the time_finished of this WorkRequest. The date and time the work request was completed, in the format defined by RFC3339.
def time_finished(self): return self._time_finished
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_datetime_finish(self):\n return self.get_t_sect()['datetime_finish']", "def date_finished(self):\n return self._date_finished", "def completion_time(self) -> datetime:\n return self._completion_time", "def getEndTime(self):\n assert self.isFinished(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__jobInfo.endTime", "def finalize_time(self) -> str:\n return pulumi.get(self, \"finalize_time\")", "def submit_time(self) -> datetime:\n return self._submit_time", "def completion_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"completion_time\")", "def completion_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"completion_time\")", "def last_completion_time(self) -> float:\n return self._last_completion_time - self._start_time", "def end_time(self):\n return self._end_time", "def end_time(self):\n return self._end_time", "def end_time(self):\n return self._end_time", "def end_time(self):\n return self._end_time", "def end_time(self):\n return self._end_time", "def end_time(self):\n return self._end_time", "def completed(self):\n if not self.completion_ts:\n return None\n return datetime.utcfromtimestamp(self.completion_ts)", "def get_last_finish_time(self, file_path) -> datetime | None:\n stat = self._file_stats.get(file_path)\n return stat.last_finish_time if stat else None", "def end_time(self) -> str:\n return self._end_time", "def getEndTime(self):\n assert self.isFinished(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.endTime", "def get_time(self):\n # if the job is being processed or the CC had a crash return None\n if self.status <= 0:\n return None\n\n if self.status in (STATUS_FINISHED, 21):\n return self.resultTime\n\n return None", "def get_end_time(self):\n return str(self._end_time)", "def end_time(self):\n return self.time_parser.end_time", "def completed_on(self):\n return self.get_time(\"completed_on\")", "def completed_on(self):\n return self.get_time(\"completed_on\")", "def duration(self):\n started = self.started_at\n finished = self.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet", "def getPostJobSubmitTime(self):\n ent = self.getPostJob()\n if ent is None:\n return None\n return ent.submitTime", "def end_time(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"end_time\")", "def get_time(self):\n return self._total_time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time" ]
[ "0.7161936", "0.71145535", "0.66829485", "0.6392356", "0.63675296", "0.6357172", "0.62655723", "0.62655723", "0.6258675", "0.62255806", "0.62255806", "0.62255806", "0.62255806", "0.62255806", "0.62255806", "0.6190014", "0.61434066", "0.6117809", "0.6112199", "0.60793656", "0.6076553", "0.6071059", "0.60559285", "0.60559285", "0.6036193", "0.60214615", "0.60198927", "0.6013585", "0.5996084", "0.5996084" ]
0.7401861
0
Sets the time_finished of this WorkRequest. The date and time the work request was completed, in the format defined by RFC3339.
def time_finished(self, time_finished): self._time_finished = time_finished
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def date_finished(self, date_finished):\n self._date_finished = date_finished", "def datefinished(self, datefinished):\n\n self._datefinished = datefinished", "def date_finished(self):\n return self._date_finished", "def completion_time(self, completion_time: datetime):\n\n self._completion_time = completion_time", "def time_finished(self):\n return self._time_finished", "def set_finish(self, t: float = 0.0) -> None:\n if not t:\n t = time()\n self.tfinish = t\n self.log.debug(\"%s %s\", self.prefix, {\"end-time\": self.tfinish})", "def report_finish(self):\n # TODO: remove changing the pended time descriptive, as the signal does this already!\n self.status = Status.FINISHED\n self.pended_time_descriptive = self.pending_time_descriptive\n self.save()", "def finish(self, finish_time=None):\n pass", "def submit_time(self, submit_time: datetime):\n\n self._submit_time = submit_time", "def task_finished(self, current_time):\r\n if self.tasks_finished == 0:\r\n self.first_task_completion = current_time\r\n self.tasks_finished += 1\r\n self.stats_manager.task_finished(self.user_id, current_time)\r\n if self.tasks_finished == self.num_tasks:\r\n self.completion_time = current_time\r\n self.stats_manager.job_finished(self)", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n self._end_time = end_time", "def end_time(self, end_time):\n self._end_time = end_time", "def get_datetime_finish(self):\n return self.get_t_sect()['datetime_finish']", "def completion_time(self) -> datetime:\n return self._completion_time", "def completed_at(self, completed_at):\n\n self._completed_at = completed_at", "def finalize_time(self) -> str:\n return pulumi.get(self, \"finalize_time\")", "def set_job_finished(self, job_id):\n try:\n self._session.query(JobEntity).\\\n filter(JobEntity.id == job_id).\\\n update({'finished': datetime.datetime.now()})\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def submit_time(self) -> datetime:\n return self._submit_time", "def completed(self, completed):\n\n self._completed = completed", "def completed(self, completed):\n\n self._completed = completed", "def completed(self, completed):\n\n self._completed = completed", "def addFinishTimeVar(self, order):\n\t\tvar = str(order.id) + \"-finish\"\n\t\tlastMachine = self.plant.machines[-1]\n\t\tself.problem.addVariable(var, range(order.deadline - self.endMargin,\n\t\t\torder.deadline + self.endMargin))\n\t\tself.problem.addConstraint(lambda x, y, yt: x == y + yt,\n\t\t\t[var, self.createEnterTimeVarName(order, lastMachine),\n\t\t\tself.createTimeAtMachineVarName(order, lastMachine)])", "def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time", "def end_time(self) -> str:\n return self._end_time" ]
[ "0.7065115", "0.7065115", "0.7065115", "0.685432", "0.66010696", "0.6252097", "0.6162726", "0.6158714", "0.6031602", "0.5875143", "0.58484834", "0.57532066", "0.54509366", "0.5416478", "0.5416478", "0.5416478", "0.5403264", "0.5403264", "0.5389085", "0.531435", "0.5231423", "0.5185966", "0.5166902", "0.51615226", "0.51441795", "0.51441795", "0.51441795", "0.5142747", "0.50878644", "0.5045359" ]
0.7655443
0
Extracts features for the cue classifier from the sentence dictionaries. Returns (modified) sentence dictionaries, a list of feature dictionaries, and if called in training mode, a list of labels.
def extract_features_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon, mode='training'): instances = [] for sent in sentence_dicts: # print(sent) for key, value in sent.items(): features = {} if isinstance(key, int): if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon): sent[key]['not-pred-cue'] = True continue features['token'] = value[3].lower() features['lemma'] = value[4].lower() features['pos'] = value[5] if key == 0: features['bw-bigram1'] = 'null' else: features['bw-bigram1'] = "%s_*" %sent[key-1][4].lower() if not (key+1) in sent: features['fw-bigram1'] = 'null' else: features['fw-bigram1'] = "*_%s" %sent[key+1][4].lower() affix = get_affix_cue(value[3].lower(), affixal_cue_lexicon) if affix != None: base = value[3].lower().replace(affix, "") features['char-5gram1'], features['char-5gram2'] = get_character_ngrams(base, affix, 5) features['char-4gram1'], features['char-4gram2'] = get_character_ngrams(base, affix, 4) features['char-3gram1'], features['char-3gram2'] = get_character_ngrams(base, affix, 3) features['char-2gram1'], features['char-2gram2'] = get_character_ngrams(base, affix, 2) features['char-1gram1'], features['char-1gram2'] = get_character_ngrams(base, affix, 1) features['affix'] = affix else: features['char-5gram1'], features['char-5gram2'] = 'null','null' features['char-4gram1'], features['char-4gram2'] = 'null','null' features['char-3gram1'], features['char-3gram2'] = 'null','null' features['char-2gram1'], features['char-2gram2'] = 'null','null' features['char-1gram1'], features['char-1gram2'] = 'null','null' features['affix'] = 'null' instances.append(features) if mode == 'training': labels = extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon) return sentence_dicts, instances, labels return sentence_dicts, instances
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_features_scope(sentence_dicts, mode='training'):\n instances = []\n sentence_splits = []\n for sent in sentence_dicts:\n if not sent['neg']:\n continue\n print(sent)\n graph = make_dir_graph_for_sentence(sent)\n bidir_graph = make_bidir_graph_for_sentence(sent)\n for cue_i, (cue, cue_position, cue_type) in enumerate(sent['cues']):\n seq_length = -1\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n features['token'] = value[3]\n features['lemma'] = value[4]\n features['pos'] = value[5]\n features['dir-dep-dist'] = get_shortest_path(graph, sent, cue_position, key)\n features['dep-graph-path'] = get_dep_graph_path(bidir_graph, sent, cue_position, key)\n\n dist = key - cue_position\n nor_index = find_nor_index(sent)\n if cue == \"neither\" and nor_index > -1 and abs(key-nor_index) < abs(dist):\n dist = key - nor_index\n #token is to the left of cue\n if dist < 0:\n if abs(dist) <= 9:\n features['left-cue-dist'] = 'A'\n else:\n features['left-cue-dist'] = 'B'\n features['right-cue-dist'] = 'null'\n #token is to the right of cue\n elif dist > 0:\n if dist <= 15:\n features['right-cue-dist'] = 'A'\n else:\n features['right-cue-dist'] = 'B'\n features['left-cue-dist'] = 'null'\n else:\n features['left-cue-dist'] = '0'\n features['right-cue-dist'] = '0'\n features['cue-type'] = cue_type\n features['cue-pos'] = sent[cue_position][5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n features['bw-bigram2'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4]\n features['bw-bigram2'] = \"%s_*\" %sent[key-1][5]\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n features['fw-bigram2'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4]\n features['fw-bigram2'] = \"*_%s\" %sent[key+1][5]\n instances.append(features)\n if key > seq_length:\n seq_length = key\n sentence_splits.append(seq_length)\n if mode == 'training':\n labels = extract_labels_scope(sentence_dicts, mode)\n return sentence_dicts, instances, labels, sentence_splits\n return sentence_dicts, instances, sentence_splits", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def _sentence_to_features(self,sentence):\n\n configured_features = self.configFeatures\n sentence_features = []\n\n for word_idx in range(len(sentence)):\n # word before(-1), current word(0), next word(+1)\n feature_span = len(configured_features)\n half_span = feature_span // 2\n feature_range = range(-half_span, half_span + 1)\n prefixes = [str(i) for i in feature_range]\n word_features = {}\n for f_i in feature_range:\n if word_idx + f_i >= len(sentence):\n word_features[\"EOS\"] = True\n # End Of Sentence\n elif word_idx + f_i < 0:\n word_features[\"BOS\"] = True\n # Beginning Of Sentence\n else:\n word = sentence[word_idx + f_i]\n f_i_from_zero = f_i + half_span\n prefix = prefixes[f_i_from_zero]\n features = configured_features[f_i_from_zero]\n for feature in features:\n if feature == \"pattern\":\n # add all regexes as a feature\n regex_patterns = self.function_dict[feature](word)\n # pytype: disable=attribute-error\n for p_name, matched in regex_patterns.items():\n feature_name = prefix + \":\" + feature + \":\" + p_name\n word_features[feature_name] = matched\n # pytype: enable=attribute-error\n else:\n # append each feature to a feature vector\n value = self.function_dict[feature](word)\n word_features[prefix + \":\" + feature] = value\n sentence_features.append(word_features)\n return sentence_features", "def features(self, sentence, tags, index):\n return{\n 'word': sentence[ index ],\n 'prevWord': '' if index == 0 else sentence[ index - 1 ],\n 'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],\n 'isFirst': index == 0,\n 'isLast': index == len( sentence ) - 1,\n 'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],\n 'isAllCaps': sentence[ index ].upper() == sentence[ index ],\n 'isAllLowers': sentence[ index ].lower() == sentence[ index ],\n 'prefix-1': sentence[ index ][ 0 ],\n 'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],\n 'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],\n 'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],\n 'suffix-1': sentence[ index ][ -1 ],\n 'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],\n 'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],\n 'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],\n 'tag-1': '' if index == 0 else tags[ index - 1 ],\n 'tag-2': '' if index < 2 else tags[ index - 2 ]\n }", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def parse_article(article_dict, cue_verbs, poly=None):\n features = []\n speakers_in_article = article_dict['article'].people['mentions']\n\n for i, speaker in enumerate(speakers_in_article):\n speaker_features = attribution_features_baseline(\n article_dict['article'],\n article_dict['sentences'],\n article_dict['quotes'],\n speaker,\n speakers_in_article[:i] + speakers_in_article[i + 1:],\n cue_verbs\n )\n if poly:\n speaker_features = poly.fit_transform(speaker_features.reshape((-1, 1))).reshape((-1,))\n features.append(\n speaker_features\n )\n\n labels = len(speakers_in_article) * [0]\n\n # Set labels by looking at which quote belongs to which speaker\n for i, sent_index in enumerate(article_dict['quotes']):\n # List of indices of the tokens of the true author of the quote\n true_author = article_dict['authors'][i]\n true_mention_index = find_true_author_index(true_author, speakers_in_article)\n if true_mention_index >= 0:\n labels[true_mention_index] = 1\n\n return features, labels, len(speakers_in_article)", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def getBERTFeatures(model, text, attn_head_idx=-1): # attn_head_idx - index o[]\n\n tokenized_text = tokenizer.tokenize(text)\n if len(tokenized_text) > 200:\n tokenized_text = tokenized_text[0:200]\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n #print('indexed_tokens: ', indexed_tokens)\n tokens_tensor = torch.tensor([indexed_tokens])\n #print('tokens_tensor: ', tokens_tensor)\n _, _, token_feats, pool_out = model(tokens_tensor)\n final_feats = list(getPooledFeatures(token_feats[attn_head_idx]).T)\n return token_feats[attn_head_idx][0],final_feats,tokenized_text", "def get_dataset_features(text):\n return model.extract(text)", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def sentences_to_features(self, sentences, labels):\n\n input_examples = [run_classifier.InputExample(guid=\"\", text_a=s, text_b=None, label=l) for s, l in\n zip(sentences, labels)] # here, \"\" is just a dummy label\n input_features = run_classifier.convert_examples_to_features(input_examples, self.label_list,\n self.params[\"MAX_SEQ_LENGTH\"],\n self.tokenizer)\n return input_features", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def extract_video_features():\r\n\r\n # Face feature extraction from Openface output file\r\n file = open(\"Extracted_Features/\"+input_video[:len(input_video)-4]+\"_Features/\"+input_video[:len(input_video)-4]+\".csv\")\r\n reader = csv.DictReader(file)\r\n features = {}\r\n\r\n for row in reader:\r\n\r\n # Taking only good frames where faces have been detected with a confidence higher than 0.8 (Openface standard)\r\n if int(row[' success']) == 1 and float(row[' confidence']) > 0.5:\r\n face_id = int(row[' face_id'])\r\n frame = int(row['frame']) - 1\r\n\r\n features.setdefault(frame, {})\r\n face_features = []\r\n\r\n # Mouth LandMarks\r\n for i in range(0, 68):\r\n face_features.append(float(row[' x_' + str(i)]))\r\n\r\n for i in range(0, 68):\r\n face_features.append(float(row[' y_' + str(i)]))\r\n\r\n if f_type == \"AU\":\r\n au = [\"10\", \"12\", \"14\", \"15\", \"17\", \"20\", \"23\", \"25\", \"26\"]\r\n for i in au:\r\n face_features.append(float(row[' AU' + i + '_r']))\r\n\r\n features[frame][face_id] = face_features\r\n\r\n return features", "def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances", "def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def extract_features(self, src_tokens, **kwargs):\n return self.decoder.extract_features(src_tokens, **kwargs)", "def extract_features_for_file(input_file, output_file, posfile):\n if not unlabeled:\n sents = read_file(input_file)\n else:\n sents = read_file_unlabeled(input_file)\n postags = get_pos_tags(posfile)\n with open(output_file,'w') as output_fileobj:\n if not unlabeled:\n for tokens,goldtags in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\\t%s\" % (goldtags[t], feats_tabsep)\n print>>output_fileobj, \"\"\n else:\n for tokens in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\" % (feats_tabsep) #for nolabels dat\n print>>output_fileobj, \"\"", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def get_features(sentences: tuple) -> np.ndarray:\n sen_embedding = [_single_sentence(st) for st in sentences]\n sen_embedding = np.array(sen_embedding)\n return sen_embedding", "def _extract_features(self, a_rel, a_parses):\n feats = {}\n doc_id = a_rel[DOC_ID]\n toks_pos1 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG1)\n toks_pos2 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG2)\n self._get_product_rules(feats, doc_id, a_rel, a_parses)\n self._get_dep_rules(feats, doc_id, a_rel, a_parses)\n self._get_first_last_toks(feats, toks_pos1, toks_pos2)\n self._get_modality(feats, toks_pos1, toks_pos2)\n self._get_vb_class(feats, toks_pos1, toks_pos2)\n self._get_brown_clusters(feats, toks_pos1, toks_pos2)\n self._get_inquirer(feats, toks_pos1, toks_pos2)\n self._get_MPQA(feats, toks_pos1, toks_pos2)\n return feats", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def time_question_features(self, text):\n features = {}\n\n # A list of all words from the known sentences\n all_words = \" \".join(self.positive + self.negative).split()\n\n # A list of the first word in each of the known sentence\n all_first_words = []\n for sentence in self.positive + self.negative:\n all_first_words.append(\n sentence.split(' ', 1)[0]\n )\n\n for word in text.split():\n features['first_word({})'.format(word)] = (word in all_first_words)\n\n for word in text.split():\n features['contains({})'.format(word)] = (word in all_words)\n\n for letter in 'abcdefghijklmnopqrstuvwxyz':\n features['count({})'.format(letter)] = text.lower().count(letter)\n features['has({})'.format(letter)] = (letter in text.lower())\n\n return features", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def extract_features(sentence, vocabulary):\n n_tokens = len(sentence)\n n_features = n_feature_functions + len(vocabulary)\n X = sp.lil_matrix((n_tokens, n_features), dtype=bool)\n\n for i in xrange(n_tokens):\n for j, f in enumerate(FEATURE_FUNCTIONS):\n X[i, j] = f(sentence, i)\n\n # Vocabulary feature\n try:\n X[i, n_feature_functions + vocabulary[sentence[i][0].lower()]] = 1\n except KeyError:\n pass\n\n return X", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def collect(self, vcfname, tag):\n if tag not in [\"TP\", \"FN\"]:\n return extractPiscesIndelFeatures(vcfname, tag, self.chr_depth)\n else:\n features = [\"CHROM\", \"POS\", \"REF\", \"ALT\", \"QUAL\", \"S.1.VT\",\n \"I.T_ALT_RATE\", \"I.DP_normal\", \"I.DP_tumor\", \"I.tag\", \"I.count\"]\n return GenericFeatures.collectFeatures(vcfname, tag, features, processor=StrelkaAdmixIndelFeatures.processValue)", "def feat_collect(infile, feat_mode):\n from analysis.seqfile_ops import load_genbank\n gb_record = load_genbank(infile)\n feat_list = gb_record.features\n collected = []\n # establish collection parameters\n types_list = feat_mode['types'] # default entry is ('CDS')\n tags_dict = feat_mode['tags'] # default is an empty dictionary\n # start collecting features\n for feature in feat_list:\n if feature.type in types_list:\n if len(tags_dict.keys()) is 0:\n collected.append(feature)\n else:\n for tag_key in tags_dict.keys():\n if tag_key in feature.qualifiers:\n feat_value = feature.qualifiers.get(tag_key)\n if feat_value[0] in tags_dict[tag_key]:\n collected.append(feature)\n else: pass\n else: pass\n else: pass\n ## consider adding some info to the log\n return collected", "def extract_features(x, cnn):\n features = []\n prev_feat = x\n for i, layer in enumerate(cnn.net.layers[:-2]):\n next_feat = layer(prev_feat)\n features.append(next_feat)\n prev_feat = next_feat\n return features" ]
[ "0.66892666", "0.64949316", "0.64540094", "0.6393714", "0.6249462", "0.6235507", "0.62350196", "0.6123919", "0.6111465", "0.6105283", "0.6068304", "0.6055675", "0.6046535", "0.60203534", "0.5974322", "0.59696513", "0.58575445", "0.5842558", "0.58295137", "0.582673", "0.57915246", "0.5767559", "0.5756687", "0.5739525", "0.57225955", "0.57178396", "0.5690059", "0.56872874", "0.5674365", "0.56730974" ]
0.77999353
0
Extracts labels for training the cue classifier. Skips the words that are not known cue words. For known cue words, label 1 means cue and label 1 means non cue. Returns a list of integer labels.
def extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon): labels = [] for sent in sentence_dicts: for key, value in sent.items(): if isinstance(key, int): if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon): continue if any(cue_position == key for (cue, cue_position, cue_type) in sent['cues']) or any(mw_pos == key for (mw_cue, mw_pos) in sent['mw_cues']): labels.append(1) else: labels.append(-1) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def _labels_of_sentence(self, sentence, esplit):\n\n labels = torch.zeros(len(sentence))\n for token_index, token_attribute_list in enumerate(sentence):\n label_string = token_attribute_list[self.task_label_index]\n labels[token_index] = self.category_int_of_label_string(label_string)\n return labels", "def generate_labels():\n label_set = set([])\n for data in load_data():\n label = data.split(' ', 1)[0]\n label_set.add(label)\n labels = list(label_set)\n labels.sort()\n return labels", "def get_labels(self):\n return [\"contradiction\", \"entailment\", \"neutral\"]", "def get_labels(self):\n return set(category.label for category in\n self.get_categories(LABELS_SCHEME))", "def _labels_of_sentence(self, sentence, split):\n #print(self.label_vocab)\n self.category_int_of_label_string('O')\n bioes_tags = self._string_labels_of_sentence(sentence)\n labels = torch.zeros(len(sentence))\n for index, label in enumerate(bioes_tags):\n labels[index] = self.category_int_of_label_string(label)\n return labels", "def getSurrogateLabels(docs, **kargs):\n import pattern.medcode as med\n\n # if labels are given, map them to the right format (i.e. res: label -> document positions)\n\n # labels = kargs.get('labels', None)\n # if labels is not None: \n # assert len(labels) == len(docs)\n # res = {} # {'dummy_label': [ ]}\n # for i, l in enumerate(labels): \n # if not res.has_key(l): res[l] = []\n # res[l].append(i)\n # return res\n\n # cohort_name = kargs['cohort']\n assert kargs.has_key('cohort'), \"No cohort specified.\"\n\n nDoc = len(docs)\n # ret = med.getSurrogateLabels(docs, **kargs) # cohort select appropriate disease which must provide generate()\n # [output] ret: labels -> docs IDs \n\n labels = med.label(docs, **kargs) # [params] cohort\n\n # convert to a list of labels matching docs positions \n # labels = []\n # if isinstance(ret, dict): # a dict mapping from labels to document IDs (positional IDs)\n # labels = [None] * len(docs)\n # for label, docIDs in ret.items(): \n # for i in docIDs: \n # labels[i] = label\n assert hasattr(labels, '__iter__') # desired format, a list of labels, one for each document\n assert len(labels) == nDoc\n\n # [condition] no unlabeled docs \n unlabeled = [i for i, l in enumerate(labels) if l is None]\n assert len(unlabeled) == 0, \"There exist unlabeled documents at positions: %s\" % unlabeled\n\n n_classes = len(set(labels))\n if n_classes == 1: print('getSurrogateLabels> One class only!')\n \n return labels # a list of class labels", "def labels(self):\n return self.label2cc.keys()", "def get_labels(self):\n return [token.label for token in self.tokens]", "def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]", "def _extract_labels(srcs):\n # Tuples are already labels.\n if type(srcs) == type(()):\n return list(srcs)\n return []", "def labels(self):\n return self._get_labels(self.label_vector)", "def test_text_classifier_get_labels(self):\n pass", "def get_labels(self):\n\n labels = list(self.meta_data[self.target_column])\n\n return labels", "def get_labels(self) -> List[str]:\n return self.labels", "def get_labels(self):\n return []", "def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]", "def _labels_of_sentence(self, sentence, split):\n labels = torch.ones(1)\n labels[0] = self.category_int_of_label_string(sentence[0][self.name_to_index_dict['label']]) #\n return labels", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def labels(self) -> List[str]:\n\n return list(self.t0.keys())", "def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels", "def list_labels(self):\n # Create empty list\n label_names = []\n \n # For every name in training directory\n for name in os.listdir(self.train_data):\n # If it does not start with . (which hidden files do)\n if not name.startswith('.'):\n label_names.append(name)\n \n return label_names", "def get_true_test_labels(self, label_map, dataset):\n\n num_samples = len(dataset.tensors[0])\n label_id2str = {v: k for k, v in label_map.items()}\n attention_mask_all = dataset.tensors[1].data.numpy()\n trailing_mask_all = dataset.tensors[2].data.numpy()\n label_ids_all = dataset.tensors[3].data.numpy()\n seq_len = len(trailing_mask_all[0])\n labels = []\n\n for idx in range(num_samples):\n attention_mask = attention_mask_all[idx]\n trailing_mask = trailing_mask_all[idx]\n label_ids = label_ids_all[idx]\n one_sample = []\n\n for sid in range(seq_len):\n if attention_mask[sid] == 0:\n break\n\n if not trailing_mask[sid]:\n continue\n\n label_id = label_ids[sid]\n one_sample.append(label_id2str[label_id])\n labels.append(one_sample)\n return labels", "def get_labels(self):\n return self.labels[1:]", "def decodeLabels(predictions, vocab):\n\n decoded_labels =[]\n for sequence in predictions:\n temp = []\n for label in list(sequence):\n if label in vocab:\n temp.append(vocab[label])\n else:\n temp.append(label)\n decoded_labels.append(temp)\n \n return decoded_labels", "def labels(self) -> list:\n return self._labels", "def get_labels_decomposed(self) -> List[List[str]]:\n return [list(label) for label in self.labels]", "def get_fashion_mnist_labels(labels): #@save\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]", "def getCSLabels(self):\n\n if self._n_csets:\n return list(self._cslabels)", "def data_labels(data):\n\n # The data consists of a equal number of benign and deleterious samples\n # The first part of the data are the benign samples (label 0), and the second part the deleterious ones (label 1)\n n_samples = data.shape[0]\n n_class_samples = int(n_samples / 2)\n\n # Get a numpy array of the labels\n labels_ben = [0] * n_class_samples\n labels_del = [1] * n_class_samples\n labels = np.array(labels_ben + labels_del)\n\n # Create float numbers for the labels\n labels = labels.astype(float)\n\n # Convert the data into a numpy array\n # One hot encoded vector is not necessary, because the data is binary\n labels = np.reshape(labels, [-1, 1])\n\n return labels" ]
[ "0.6373983", "0.636646", "0.63285345", "0.63244736", "0.6292417", "0.62817603", "0.6245523", "0.6224025", "0.6190308", "0.6177026", "0.6169095", "0.6158527", "0.6142776", "0.61409414", "0.6128977", "0.61261064", "0.61201036", "0.6105373", "0.60926604", "0.60921204", "0.6073379", "0.60715485", "0.6071357", "0.6065038", "0.6061794", "0.60481775", "0.60449237", "0.603706", "0.60085726", "0.59950787" ]
0.7232671
0
Extracts features for the scope classifier from the sentence dictionaries. Returns (modified) sentence dictionaries, a list of feature dictionaries, a list of the sentence lengths, and if called in training mode, a list of labels.
def extract_features_scope(sentence_dicts, mode='training'): instances = [] sentence_splits = [] for sent in sentence_dicts: if not sent['neg']: continue print(sent) graph = make_dir_graph_for_sentence(sent) bidir_graph = make_bidir_graph_for_sentence(sent) for cue_i, (cue, cue_position, cue_type) in enumerate(sent['cues']): seq_length = -1 for key, value in sent.items(): features = {} if isinstance(key, int): features['token'] = value[3] features['lemma'] = value[4] features['pos'] = value[5] features['dir-dep-dist'] = get_shortest_path(graph, sent, cue_position, key) features['dep-graph-path'] = get_dep_graph_path(bidir_graph, sent, cue_position, key) dist = key - cue_position nor_index = find_nor_index(sent) if cue == "neither" and nor_index > -1 and abs(key-nor_index) < abs(dist): dist = key - nor_index #token is to the left of cue if dist < 0: if abs(dist) <= 9: features['left-cue-dist'] = 'A' else: features['left-cue-dist'] = 'B' features['right-cue-dist'] = 'null' #token is to the right of cue elif dist > 0: if dist <= 15: features['right-cue-dist'] = 'A' else: features['right-cue-dist'] = 'B' features['left-cue-dist'] = 'null' else: features['left-cue-dist'] = '0' features['right-cue-dist'] = '0' features['cue-type'] = cue_type features['cue-pos'] = sent[cue_position][5] if key == 0: features['bw-bigram1'] = 'null' features['bw-bigram2'] = 'null' else: features['bw-bigram1'] = "%s_*" %sent[key-1][4] features['bw-bigram2'] = "%s_*" %sent[key-1][5] if not (key+1) in sent: features['fw-bigram1'] = 'null' features['fw-bigram2'] = 'null' else: features['fw-bigram1'] = "*_%s" %sent[key+1][4] features['fw-bigram2'] = "*_%s" %sent[key+1][5] instances.append(features) if key > seq_length: seq_length = key sentence_splits.append(seq_length) if mode == 'training': labels = extract_labels_scope(sentence_dicts, mode) return sentence_dicts, instances, labels, sentence_splits return sentence_dicts, instances, sentence_splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sentence_to_features(self,sentence):\n\n configured_features = self.configFeatures\n sentence_features = []\n\n for word_idx in range(len(sentence)):\n # word before(-1), current word(0), next word(+1)\n feature_span = len(configured_features)\n half_span = feature_span // 2\n feature_range = range(-half_span, half_span + 1)\n prefixes = [str(i) for i in feature_range]\n word_features = {}\n for f_i in feature_range:\n if word_idx + f_i >= len(sentence):\n word_features[\"EOS\"] = True\n # End Of Sentence\n elif word_idx + f_i < 0:\n word_features[\"BOS\"] = True\n # Beginning Of Sentence\n else:\n word = sentence[word_idx + f_i]\n f_i_from_zero = f_i + half_span\n prefix = prefixes[f_i_from_zero]\n features = configured_features[f_i_from_zero]\n for feature in features:\n if feature == \"pattern\":\n # add all regexes as a feature\n regex_patterns = self.function_dict[feature](word)\n # pytype: disable=attribute-error\n for p_name, matched in regex_patterns.items():\n feature_name = prefix + \":\" + feature + \":\" + p_name\n word_features[feature_name] = matched\n # pytype: enable=attribute-error\n else:\n # append each feature to a feature vector\n value = self.function_dict[feature](word)\n word_features[prefix + \":\" + feature] = value\n sentence_features.append(word_features)\n return sentence_features", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def extract_features(sentence, vocabulary):\n n_tokens = len(sentence)\n n_features = n_feature_functions + len(vocabulary)\n X = sp.lil_matrix((n_tokens, n_features), dtype=bool)\n\n for i in xrange(n_tokens):\n for j, f in enumerate(FEATURE_FUNCTIONS):\n X[i, j] = f(sentence, i)\n\n # Vocabulary feature\n try:\n X[i, n_feature_functions + vocabulary[sentence[i][0].lower()]] = 1\n except KeyError:\n pass\n\n return X", "def extract_features_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon, mode='training'):\n instances = []\n for sent in sentence_dicts:\n # print(sent)\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon):\n sent[key]['not-pred-cue'] = True\n continue\n\n features['token'] = value[3].lower()\n features['lemma'] = value[4].lower()\n features['pos'] = value[5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4].lower()\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4].lower()\n \n affix = get_affix_cue(value[3].lower(), affixal_cue_lexicon)\n if affix != None:\n base = value[3].lower().replace(affix, \"\")\n features['char-5gram1'], features['char-5gram2'] = get_character_ngrams(base, affix, 5)\n features['char-4gram1'], features['char-4gram2'] = get_character_ngrams(base, affix, 4)\n features['char-3gram1'], features['char-3gram2'] = get_character_ngrams(base, affix, 3)\n features['char-2gram1'], features['char-2gram2'] = get_character_ngrams(base, affix, 2)\n features['char-1gram1'], features['char-1gram2'] = get_character_ngrams(base, affix, 1)\n features['affix'] = affix\n else:\n features['char-5gram1'], features['char-5gram2'] = 'null','null'\n features['char-4gram1'], features['char-4gram2'] = 'null','null'\n features['char-3gram1'], features['char-3gram2'] = 'null','null'\n features['char-2gram1'], features['char-2gram2'] = 'null','null'\n features['char-1gram1'], features['char-1gram2'] = 'null','null'\n features['affix'] = 'null'\n \n instances.append(features)\n if mode == 'training':\n labels = extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon)\n return sentence_dicts, instances, labels\n return sentence_dicts, instances", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def features(self, sent, position):\n if type(sent[0]) is str:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n elif sent[position].lower() in self.vocab:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][:2].lower()\n suffix = 'suff=' + sent[position][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position - 1].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position - 2].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position + 1].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position + 2].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n else:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n elif sent[position][0].lower() in self.vocab:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][0][:2].lower()\n suffix = 'suff=' + sent[position][0][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position-1][0].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position-2][0].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position+1][0].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position+2][0].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position+2][0].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2][0].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n return fts", "def features(self, sentence, tags, index):\n return{\n 'word': sentence[ index ],\n 'prevWord': '' if index == 0 else sentence[ index - 1 ],\n 'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],\n 'isFirst': index == 0,\n 'isLast': index == len( sentence ) - 1,\n 'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],\n 'isAllCaps': sentence[ index ].upper() == sentence[ index ],\n 'isAllLowers': sentence[ index ].lower() == sentence[ index ],\n 'prefix-1': sentence[ index ][ 0 ],\n 'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],\n 'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],\n 'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],\n 'suffix-1': sentence[ index ][ -1 ],\n 'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],\n 'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],\n 'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],\n 'tag-1': '' if index == 0 else tags[ index - 1 ],\n 'tag-2': '' if index < 2 else tags[ index - 2 ]\n }", "def get_sentence_to_context_map(sentences):\n # Load the vocab\n en_vocab = get_english_vocab(DATA_DIR,VOCAB_SIZE)\n\n # Allocate the sentences to buckets\n bucketed = {}\n for sentence in sentences:\n bucket_id = get_bucket(en_vocab,sentence)\n bucketed.setdefault(bucket_id,[])\n bucketed[bucket_id].append(sentence)\n\n mapped = {}\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(sess, True, train_dir=TRAIN_DIR)\n model.batch_size = BATCH_SIZE # We decode 64 sentence at a time.\n # Iterate over each bucket\n for bucket_id,sentences in bucketed.iteritems():\n for batch in chunker(sentences,BATCH_SIZE):\n data = []\n # Tokenize each sentence\n for sentence in batch:\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)\n expected_output = []\n data.append((token_ids, expected_output))\n # Use the model to obtain contexts for each sentence in the batch\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: data}, bucket_id)\n contexts = model.step_context(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id)\n features = np.hstack(contexts)\n print 'Encoded {0} sentences into {1} dimensional vectors'.format(*features.shape)\n # Now we align sentences with their contexts\n for i,sentence in enumerate(batch):\n mapped[sentence] = features[i,:].tolist()\n return mapped", "def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def _extract_features(self, a_rel, a_parses):\n feats = {}\n doc_id = a_rel[DOC_ID]\n toks_pos1 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG1)\n toks_pos2 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG2)\n self._get_product_rules(feats, doc_id, a_rel, a_parses)\n self._get_dep_rules(feats, doc_id, a_rel, a_parses)\n self._get_first_last_toks(feats, toks_pos1, toks_pos2)\n self._get_modality(feats, toks_pos1, toks_pos2)\n self._get_vb_class(feats, toks_pos1, toks_pos2)\n self._get_brown_clusters(feats, toks_pos1, toks_pos2)\n self._get_inquirer(feats, toks_pos1, toks_pos2)\n self._get_MPQA(feats, toks_pos1, toks_pos2)\n return feats", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def concept_features_for_chunk(self, sentence, ind):\n\n features = {'dummy':1}\n\n # Word-level features for each word of the chunk\n for w in sentence[ind].split():\n word_features = self.concept_features_for_word(w)\n features.update(word_features)\n\n return features\n\n # Context windows\n for feature in self.enabled_concept_features:\n\n # Feature: Previous word\n if feature == \"previous_word_stem\":\n if ind != 0:\n prev_ind = ind - 1\n prev_chunk = sentence[prev_ind].split()\n prev_word = porter_st.stem( prev_chunk[-1] )\n features[ ('prev_word_stem',prev_word) ] = 1\n else:\n features[ ('prev_word_stem','<START>') ] = 1\n\n # Feature: Previous word\n if feature == \"next_word_stem\":\n if ind != len(sentence)-1:\n next_ind = ind + 1\n next_chunk = sentence[next_ind].split()\n next_word = porter_st.stem( next_chunk[0] )\n features[ ('next_word_stem',next_word) ] = 1\n else:\n features[ ('next_word_stem','<END>') ] = 1\n\n\n return features", "def word2features(sent, i):\n features = []\n\n # the [-1,+1] window of words around the token\n for o in [-1,0,1]:\n if i+o >= 0 and i+o < len(sent):\n word_tuple = sent[i+o]\n word_window = get_words_in_window(word_tuple, o)\n features.extend(word_window)\n\n # # part of speech\n # pos = ('pos', sent[i][1])\n # features.append(pos)\n\n # prop = ('prop', is_proper_case(sent[i][0]))\n # features.append(prop)\n\n return dict(features)", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features", "def featurize(self, tokens):\n features = []\n \n nrc_hashtag_emotion_features = self.nrc_hashtag_emotion(tokens)\n nrc_affect_intensity_features = self.nrc_affect_intensity(tokens)\n nrc_hashtag_sentiment_lexicon_unigrams_features = self.nrc_hashtag_sentiment_lexicon_unigrams(tokens)\n nrc_hashtag_sentiment_lexicon_bigrams_features = self.nrc_hashtag_sentiment_lexicon_bigrams(tokens)\n sentiment140_unigrams_features = self.sentiment140_unigrams(tokens)\n sentiment140_bigrams_features = self.sentiment140_bigrams(tokens)\n senti_wordnet_features = self.senti_wordnet(tokens)\n bing_lui_sentiment_lexicons_features = self.bing_lui_sentiment_lexicons(tokens)\n nrc_expanded_lexicon_features = self.nrc_10_expanded(tokens)\n negating_word_list_features = self.negating_words_list(tokens)\n total_number_of_words_features = self.get_total_number_of_words(tokens)\n mpqa_subjectivity_lexicon_features = self.mpqa_subjectivity_lexicon(tokens)\n afinn_sentiment_features = self.afinn_sentiment_scores(tokens)\n # senti_strength_features = self.get_sentistrength(\" \".join(tokens))\n\n features.extend(nrc_hashtag_emotion_features.values()) # 10 features\n features.extend(nrc_affect_intensity_features.values()) # 10 features\n features.extend(nrc_hashtag_sentiment_lexicon_unigrams_features.values()) # 4 features\n features.extend(nrc_hashtag_sentiment_lexicon_bigrams_features.values()) # 4 features\n features.extend(sentiment140_unigrams_features.values()) # 4 features \n features.extend(sentiment140_bigrams_features.values()) # 4 features\n features.extend(senti_wordnet_features.values()) # 4 features\n features.extend(bing_lui_sentiment_lexicons_features.values()) # 2 features\n features.extend(nrc_expanded_lexicon_features.values()) # 10 features\n features.extend(negating_word_list_features.values()) # 1 feature\n features.extend(total_number_of_words_features.values()) # 1 feature\n features.extend(mpqa_subjectivity_lexicon_features.values()) # 2 features\n features.extend(afinn_sentiment_features.values()) # 2 features\n # features.extend(senti_strength_features.values()) # 2 features\n\n return features", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def find_features(sentence: str) -> Set[str]:\n sent_dict = set()\n sentence = _NLP(sentence)\n for token in sentence:\n # check if the word is an opinion word, then assign sentiment\n if token.text in _OPINION_WORDS:\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n if (token.dep_ == \"advmod\"):\n continue\n elif (token.dep_ == \"amod\"):\n sent_dict.add(token.head.text.lower())\n # for opinion words that are adjectives, adverbs, verbs...\n else:\n for child in token.children:\n # if verb, check if there's a direct object\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n sent_dict.add(child.text.lower())\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text == \"and\":\n conj=1\n if (conj == 1) and (subchild.text != \"and\"):\n subchildren.append(subchild.text)\n conj = 0\n for subchild in subchildren:\n sent_dict.add(subchild)\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text + \" \" + noun\n sent_dict.add(noun)\n return set(word.lower() for word in sent_dict)", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def features(self, words, tags, config):\n buffer = config['buffer']\n stack = config['stack']\n pred_tree = config['pred_tree']\n\n feat = []\n\n # Single word features\n b1_w = words[buffer[0]] if buffer else \"<empty>\"\n b1_t = tags[buffer[0]] if buffer else \"<empty>\"\n b1_wt = b1_w + \" \" + b1_t\n\n b2_w = words[buffer[1]] if len(buffer) > 1 else \"<empty>\"\n b2_t = tags[buffer[1]] if len(buffer) > 1 else \"<empty>\"\n b2_wt = b2_w + \" \" + b2_t\n\n b3_w = words[buffer[2]] if len(buffer) > 2 else \"<empty>\"\n b3_t = tags[buffer[2]] if len(buffer) > 2 else \"<empty>\"\n b3_wt = b3_w + \" \" + b3_t\n\n s1_w = words[stack[-1]] if stack else \"<empty>\"\n s1_t = tags[stack[-1]] if stack else \"<empty>\"\n s1_wt = s1_w + \" \" + s1_t\n\n s2_w = words[stack[-2]] if len(stack) > 1 else \"<empty>\"\n s2_t = tags[stack[-2]] if len(stack) > 1 else \"<empty>\"\n s2_wt = s2_w + \" \" + s2_t\n\n '''\n for i in pred_tree:\n if stack and pred_tree[stack[-1]] == i:\n feat.append(\"tag\" + str(i) + str(tags[i]))\n '''\n\n # Triple word features\n\n def is_parent(parent, child):\n if child == 0:\n return False\n if parent == child:\n return True\n return is_parent(parent, pred_tree[child])\n\n # Child that is the most on the left\n def lc1(parent):\n for i in range(0, len(words)):\n if is_parent(parent, i):\n return i\n return -1\n \n # Child that is the most on the right\n def rc1(parent):\n for i in range(0, len(words), -1):\n if is_parent(parent, i):\n return i\n return -1\n\n lc1_s1 = lc1(stack[-1]) if stack else -1\n rc1_s1 = rc1(stack[-1]) if stack else -1\n lc1_s2 = lc1(stack[-2]) if len(stack) > 1 else -1\n rc1_s2 = rc1(stack[-2]) if len(stack) > 1 else -1\n\n s2_t_s1_t_b1_t = s2_t + \" \" + s1_t + \" \" + b1_t\n if lc1_s1 >= 0:\n s2_t_s1_t_lc1_s1_t = s2_t + \" \" + s1_t + \" \" + tags[lc1_s1]\n else:\n s2_t_s1_t_lc1_s1_t = \"<empty>\"\n if rc1_s1 >= 0:\n s2_t_s1_t_rc1_s1_t = s2_t + \" \" + s1_t + \" \" + tags[rc1_s1]\n else:\n s2_t_s1_t_rc1_s1_t = \"<empty>\"\n if lc1_s2 >= 0:\n s2_t_s1_t_lc1_s2_t = s2_t + \" \" + s1_t + \" \" + tags[rc1_s2]\n else:\n s2_t_s1_t_lc1_s2_t = \"<empty>\"\n if rc1_s2 >= 0:\n s2_t_s1_t_rc1_s2_t = s2_t + \" \" + s1_t + \" \" + tags[rc1_s2]\n else:\n s2_t_s1_t_rc1_s2_t = \"<empty>\"\n if lc1_s2 >= 0:\n s2_t_s1_w_rc1_s2_t = s2_t + \" \" + s1_w + \" \" + tags[rc1_s2]\n else:\n s2_t_s1_w_rc1_s2_t = \"<empty>\"\n if lc1_s1 >= 0:\n s2_t_s1_w_lc1_s1_t = s2_t + \" \" + s1_w + \" \" + tags[lc1_s1]\n else:\n s2_t_s1_w_lc1_s1_t = \"<empty>\"\n\n feat.append(\"b1_w:\" + b1_w)\n feat.append(\"b1_t:\" + b1_t)\n feat.append(\"b1_wt:\" + b1_wt)\n\n feat.append(\"b2_w:\" + b2_w)\n feat.append(\"b2_t:\" + b2_t)\n feat.append(\"b2_wt:\" + b2_wt)\n\n feat.append(\"b3_w:\" + b3_w)\n feat.append(\"b3_t:\" + b3_t)\n feat.append(\"b3_wt:\" + b3_wt)\n\n feat.append(\"s1_w:\" + s1_w)\n feat.append(\"s1_t:\" + s1_t)\n feat.append(\"s1_wt:\" + s1_wt)\n\n feat.append(\"s2_w:\" + s2_w)\n feat.append(\"s2_t:\" + s2_t)\n feat.append(\"s2_wt:\" + s2_wt)\n\n feat.append(\"s1_wt_s2_wt:\" + s1_wt + \" \" + s2_wt)\n feat.append(\"s1_wt_s2_w:\" + s1_wt + \" \" + s2_w)\n feat.append(\"s1_wt_s2_t:\" + s1_wt + \" \" + s2_t)\n feat.append(\"s1_w_s2_wt:\" + s1_w + \" \" + s2_wt)\n feat.append(\"s1_t_s2_wt:\" + s1_t + \" \" + s2_wt)\n feat.append(\"s1_w_s2_w:\" + s1_w + \" \" + s2_w)\n feat.append(\"s1_t_s2_t:\" + s1_t + \" \" + s2_t)\n feat.append(\"s1_t_b1_t:\" + s1_t + \" \" + b1_t)\n\n feat.append(\"s2_t_s1_t_b1_t:\" + s2_t_s1_t_b1_t)\n feat.append(\"s2_t_s1_t_lc1_s1_t:\" + s2_t_s1_t_lc1_s1_t)\n feat.append(\"s2_t_s1_t_rc1_s1_t:\" + s2_t_s1_t_rc1_s1_t)\n feat.append(\"s2_t_s1_t_lc1_s2_t:\" + s2_t_s1_t_lc1_s2_t)\n feat.append(\"s2_t_s1_t_rc1_s2_t:\" + s2_t_s1_t_rc1_s2_t)\n feat.append(\"s2_t_s1_w_rc1_s2_t:\" + s2_t_s1_w_rc1_s2_t)\n feat.append(\"s2_t_s1_w_lc1_s1_t:\" + s2_t_s1_w_lc1_s1_t)\n\n\n return feat", "def features(self, tokens, index, history):\r\n # for more details see: http://nlpforhackers.io/named-entity-extraction/\r\n\r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n\r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n\r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n\r\n word, pos = tokens[index]\r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n previob = history[index - 1]\r\n contains_dash = '-' in word\r\n contains_dot = '.' in word\r\n allascii = all([True for c in word if c in string.ascii_lowercase])\r\n\r\n allcaps = word == word.capitalize()\r\n capitalized = word[0] in string.ascii_uppercase\r\n\r\n prevallcaps = prevword == prevword.capitalize()\r\n prevcapitalized = prevword[0] in string.ascii_uppercase\r\n\r\n nextallcaps = nextword == nextword.capitalize()\r\n nextcapitalized = nextword[0] in string.ascii_uppercase\r\n\r\n return {\r\n 'word': word,\r\n 'lemma': stemmer.stem(word),\r\n 'pos': pos,\r\n 'all-ascii': allascii,\r\n\r\n 'next-word': nextword,\r\n 'next-lemma': stemmer.stem(nextword),\r\n 'next-pos': nextpos,\r\n\r\n 'next-next-word': nextnextword,\r\n 'next-next-pos': nextnextpos,\r\n\r\n 'prev-word': prevword,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n 'prev-pos': prevpos,\r\n\r\n 'prev-prev-word': prevprevword,\r\n 'prev-prev-pos': prevprevpos,\r\n\r\n 'prev-iob': previob,\r\n\r\n 'contains-dash': contains_dash,\r\n 'contains-dot': contains_dot,\r\n\r\n 'all-caps': allcaps,\r\n 'capitalized': capitalized,\r\n\r\n 'prev-all-caps': prevallcaps,\r\n 'prev-capitalized': prevcapitalized,\r\n\r\n 'next-all-caps': nextallcaps,\r\n 'next-capitalized': nextcapitalized,\r\n }", "def doc2features(self,sent):\n return [self.word2features(sent['tokens'], i) for i in range(len(sent['tokens']))]", "def make_training_data(feature_funcs, annotations):\n extractor = FeatureExtractor(feature_funcs)\n \n training_instances = []\n \n for sent_str, anns in annotations:\n tree = parser.raw_parse(sent_str).next()\n tree = convert_brackets(tree)\n # print tree\n # some preprocessing, align the positions and \n # also use the sentence string given the parse tree\n anns = align_annotation_with_sentence(sent_str, ' '.join(tree.leaves()), anns)\n sent_str = ' '.join(tree.leaves())\n for ann in anns:\n frame_name = ann.frame_name\n start, end = ann.target.start, ann.target.end\n frame = Frame(start, end, frame_name)\n frame_node = find_node_by_positions(tree, start, end)\n\n # TODO: bug here\n if frame_node is None: \n sys.stderr.write(\"Warning: %r does not correspond to any tree node in sentence \\\"%s\\\"\\nSkip it\\n \" %(frame, sent_str))\n continue\n \n for node, (node_start_pos, node_end_pos) in collect_nodes(tree):\n node_pos = NodePosition(node_start_pos, node_end_pos)\n context = Context(sent_str, tree, frame, node_pos)\n\n feature_values = extractor.extract(node, context)\n \n # try to see the it has some semantic role\n found_matching_node = False\n for fe in ann.FE:\n other_node = find_node_by_positions(tree, fe.start, fe.end)\n if node == other_node:\n training_instances.append((feature_values, fe.name))\n found_matching_node = True\n break\n\n # semantic role => NULL\n if not found_matching_node:\n training_instances.append((feature_values, 'NULL'))\n\n return training_instances", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def chunk_to_features(chunk, tag_method=None, posdict=None, context_feats=False):\n out_string = StringIO()\n\n num_instances = 0\n # Look for the GLOSS_POS tier\n for inst in chunk:\n gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method)\n if gpos_tier:\n num_instances += 1\n\n # For each token in the tier...\n for i, gp in enumerate(gpos_tier):\n\n if ALIGNMENT not in gp.attributes:\n continue\n\n word = gp.igt.find(id=gp.attributes[ALIGNMENT]).value()\n tag = gp.value()\n\n prev_word = None\n next_word = None\n\n if context_feats:\n if i > 0:\n prev_word = gp.igt.find(id=gpos_tier[i-1].attributes[ALIGNMENT]).value()\n\n if i < len(gpos_tier)-1:\n next_word = gp.igt.find(id=gpos_tier[i+1].attributes[ALIGNMENT]).value()\n\n\n # Write out features...\n t = GoldTagPOSToken(word, goldlabel=tag)\n write_gram(t,\n feat_prev_gram=context_feats,\n feat_next_gram=context_feats,\n prev_gram=prev_word,\n next_gram=next_word,\n lowercase=True,\n output=out_string,\n posdict=posdict)\n\n return out_string.getvalue(), num_instances", "def getSentenceFeature(tokens, wordVectors, sentence):\n # Implement computation for the sentence features given a sentence. \n \n # Inputs: \n # - tokens: a dictionary that maps words to their indices in \n # the word vector list \n # - wordVectors: word vectors (each row) for all tokens \n # - sentence: a list of words in the sentence of interest \n\n # Output: \n # - sentVector: feature vector for the sentence \n\n sentence_vectors = [wordVectors[tokens[word]] for word in sentence]\n\n return sum(sentence_vectors) * 1.0 / len(sentence_vectors)" ]
[ "0.6499947", "0.63480926", "0.62033504", "0.6139011", "0.6135315", "0.608286", "0.6075097", "0.60615474", "0.6008933", "0.5990459", "0.5952437", "0.59342676", "0.59133846", "0.5895635", "0.5889323", "0.5888182", "0.58752054", "0.58739567", "0.5872163", "0.5853179", "0.58127546", "0.58093065", "0.5788621", "0.57626414", "0.57525504", "0.56939214", "0.56708765", "0.5658972", "0.5648329", "0.5647268" ]
0.75720364
0
Creates a campaign with propositions, users, and 4 categories in which the students are distributed with the probabilities 0.8, 0.1, 0.06, 0.04
def generate_batch_wishes(self, n_propositions=10, n_students=168): np.random.seed(0) campaign = Campaign(name="Batch campaign", manager_id="17bocquet") campaign.save() propositions = [] for i in range(n_propositions): proposition = Proposition( campaign_id=campaign.id, name="proposition_{}".format(i), # number_of_places=int(ceil(n_students / n_propositions)), TODO: remove this or restore it in the model. ) proposition.save() propositions.append(proposition) categories = [] for name in ["category_{}".format(i) for i in range(4)]: category = Category(name=name, campaign=campaign) category.save() categories.append(category) user_campaigns = [] for i in range(n_students): user = User( id="19user{}".format(i), first_name="firstname {}".format(i), last_name="lastname {}".format(i), year_of_entry=2019, email="email{}@mpt.fr".format(i), ) user.save() category = np.random.choice(categories, 1, p=[0.8, 0.1, 0.06, 0.04])[0] uc = UserCampaign(user=user, campaign=campaign, category=category) uc.save() user_campaigns.append(uc) if i < 157: # simulate that a few users didn't answer the form for (rank, proposition) in enumerate( np.random.permutation(propositions) ): wish = Wish(user_campaign=uc, proposition=proposition, rank=rank) wish.save() return campaign, propositions, categories, user_campaigns
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_citizens_campaign_game(self, number_to_create, demographic_data=None):\n provinces = demographic_data[\"provinces\"]\n country_population = sum([province['population'] for province in provinces])\n for province in provinces:\n province['population'] = round(number_to_create * (province['population'] /\n country_population))\n for k, province in enumerate(provinces):\n people_to_create = province['population'] if k < len(provinces) - 1 \\\n else number_to_create - len(self.citizen_list)\n for i in range(people_to_create):\n current_citizen = Citizen(str(self.population_size + 1))\n current_citizen.province = province['name']\n\n for topic in province['topic_preferences']:\n preference_choice = random.randint(0, 100)\n accumulate = 0\n\n for j, percent in enumerate(topic['preferences']):\n accumulate += percent\n if preference_choice <= accumulate:\n # The data in the JSON file goes from bad to good\n # So if the government isn't giving you something, you would want it.\n current_citizen.traits[topic['name']] = 5 - j\n break\n self.citizen_list.append(current_citizen)\n self.population_size += 1", "def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):\n create_default_groups()\n create_default_settings()\n\n data_created = {'users': 0, 'categories': 0, 'forums': 0,\n 'topics': 0, 'posts': 0}\n\n # create 5 users\n for u in range(1, users + 1):\n username = \"test%s\" % u\n email = \"test%[email protected]\" % u\n user = User(username=username, password=\"test\", email=email)\n user.primary_group_id = u\n user.activated = True\n user.save()\n data_created['users'] += 1\n\n user1 = User.query.filter_by(id=1).first()\n user2 = User.query.filter_by(id=2).first()\n\n # lets send them a few private messages\n for i in range(1, 3):\n # TODO\n pass\n\n # create 2 categories\n for i in range(1, categories + 1):\n category_title = \"Test Category %s\" % i\n category = Category(title=category_title,\n description=\"Test Description\")\n category.save()\n data_created['categories'] += 1\n\n # create 2 forums in each category\n for j in range(1, forums + 1):\n if i == 2:\n j += 2\n\n forum_title = \"Test Forum %s %s\" % (j, i)\n forum = Forum(title=forum_title, description=\"Test Description\",\n category_id=i)\n forum.save()\n data_created['forums'] += 1\n\n for t in range(1, topics + 1):\n # create a topic\n topic = Topic()\n post = Post()\n\n topic.title = \"Test Title %s\" % j\n post.content = \"Test Content\"\n topic.save(post=post, user=user1, forum=forum)\n data_created['topics'] += 1\n\n for p in range(1, posts + 1):\n # create a second post in the forum\n post = Post()\n post.content = \"Test Post\"\n post.save(user=user2, topic=topic)\n data_created['posts'] += 1\n\n return data_created", "def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)", "def get_gold_probdist():\n\n # Read in the dataset as a pandas dataframe.\n card_data_annot = gspd.read_in_categorised()\n\n # Based on the frequencies of each category in the data, create probability distribution and return.\n probdist_dict = gspd.freq_dist_to_prob_dist(card_data_annot)\n return probdist_dict", "def classify_new_email(filename,probabilities_by_category,prior_by_category):\n ### TODO: Write your code here\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n\n predict = \"\"\n if(spam_distribution > ham_distribution):\n predict = \"spam\"\n else:\n predict = \"ham\"\n\n word_distribution = [spam_distribution, ham_distribution]\n\n classify_result = (predict, word_distribution)\n\n return classify_result", "def learn_distributions(file_lists_by_category):\n ### TODO: Write your code here\n\n #get word frequncies in each email category\n #key:word, value: number of occurences in this email loader\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n\n #get total length of each email loader\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n\n #get the length of the dictionary: D\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n\n spam_distribution = {}\n ham_distribution = {}\n #get the distributions of two email loaders\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n #create the required tuple\n probabilities_by_category = (spam_distribution, ham_distribution)\n return probabilities_by_category", "def create_dataset():\n nb_subjects_per_category = 100\n\n # Generate random data using numpy\n # Two values are: Concentration of red blood cell and concentration of white blood cell\n # Generates two values and add the corresponding value with -2. Sick people get score lower than 0\n sick = np.random.randn( nb_subjects_per_category, 2) + np.array([-2,-2])\n # Generates two values and add the corresponding value with 2. Healthy people get score higher than 0\n healthy = np.random.randn( nb_subjects_per_category, 2) + np.array([2, 2])\n\n # combines the two arrays\n full_data = np.vstack([sick, healthy])\n\n # means that those sick people get a value of zero, and those healthy get a value of 1.\n # this gives an array of 10 composed of 5 0s followed by 5 1s.\n targets = np.concatenate((np.zeros(nb_subjects_per_category), np.zeros(nb_subjects_per_category) + 1))\n\n # Plot points. This is the data set being shown in a graph.\n # features[:, 0] means that we are slicing our 2D features of shape 100,2 and taking only the first column of all data\n # features[:, 1] means that we are slicing our array by taking only the second column of our data points\n # s: is marker size (draws bigger points)\n # c: describes the possible colors. Because our targets are 0s and 1s, then there is only two colors. Also, targets\n # array shows how to color the different elements in full_data depending on the index of targets. So I know the 50\n # last elements in full_data will have their own color because the last 50 items in targets all hold same value.\n plt.scatter(full_data[:, 0], full_data[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n # save picture of data points drawn.\n plt.savefig(\"DataPoints.png\")\n\n # can return multiple parameters at once\n return full_data, targets", "def conditionalDistribution(self, d, v):\n probabilities_ts = np.ones((self.n_topic_components, self.n_sentiment_components))\n firstFactor = (self.n_ds[d] + self.alphaVec) / \\\n (self.n_d[d] + np.sum(self.alphaVec))\n secondFactor = np.zeros((self.n_topic_components,self.n_sentiment_components))\n for s in range(self.n_sentiment_components):\n \n secondFactor[:,s] = ((self.n_dst[d, s, :] + self.gammaVec) / \\\n (self.n_ds[d, s] + np.sum(self.gammaVec)))\n\n thirdFactor = (self.n_vts[v,:, :] + self.beta) / \\\n (self.n_ts + self.n_vts.shape[0] * self.beta)\n\n #forthFactor = np.zeros((self.n_topic_components, self.n_sentiment_components))\n #for k in range(self.n_topic_components):\n # forthFactor[k,:] = np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings.T)))\n \n forthFactor = np.exp(np.dot(self.topic_embeddings,self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings,self.word_embeddings.T)),-1)\n probabilities_ts *= firstFactor[:, np.newaxis]\n #probabilities_ts *= secondFactor * thirdFactor\n probabilities_ts *= secondFactor * ((1-self.lambda_)*thirdFactor + self.lambda_*forthFactor)\n probabilities_ts /= np.sum(probabilities_ts)\n \n return probabilities_ts", "def predictProbabilities(self,density ='Gaussian'):\n\t\ttestingProbs = pd.DataFrame(index=self.testing.index.values,\n\t\t\t\t\t\t\t\t\tcolumns=self.trainingMeans.index.values)\n\n\t\ttesting = self.testing.copy().drop(self.classLabel,1)\n\n\t\tdef calculateGaussian(x, mean, stdev):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Gaussian distribution\n\t\t\t\"\"\"\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)\n\n\t\tdef calculateBernoulli(x, mean, stdev):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Bernoulli distribution\n\t\t\t\"\"\"\n\t\t\tif x:\n\t\t\t\tprob = mean\n\t\t\telse:\n\t\t\t\tprob = 1-mean\n\t\t\treturn prob\n\n\t\tdef calculateMultinoulli(x, *series):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Multinoulli distribution\n\t\t\t\"\"\"\n\t\t\tseries= series[0]\n\t\t\treturn series.ix[x]/float(series.sum())\n\n\t\tif density=='Multinoulli':\n\t\t\t#Redefine the parameters to be conditional means\n\t\t\tfor each in self.params.columns:\n\t\t\t\tfor el in self.params.index:\n\t\t\t\t\tmultiDF = pd.Series(index=self.data[each].unique())\n\t\t\t\t\tcounts = self.training[self.training[self.classLabel]==el][each].value_counts()\n\t\t\t\t\tself.params.ix[el][each] = (pd.concat([multiDF,counts],1).drop(0,1),)\n\t\t\tpdf = calculateMultinoulli\n\t\telif density == 'Bernoulli':\n\t\t\tpdf =calculateBernoulli\n\t\telse:\n\t\t\tpdf = calculateGaussian\n\n\t\tprint \"Note: Assuming features follow a \"+density+\" distribution\"\n\n\t\tfor el in testingProbs.columns:\n\t\t\t#Retrieve parameters of distribution\n\t\t\tparameters = self.params.ix[el]\n\t\t\tprobabilities = self.testing.copy().drop(self.classLabel,1)\n\n\t\t\t#For each feature, compute the likelihood of class being el\n\t\t\tfor each in probabilities.columns:\n\t\t\t\t#Skip features with 0 standard deviation\n\t\t\t\tif each in self.useless_features:\n\t\t\t\t\tcontinue\n\t\t\t\tprobabilities[each] = probabilities[each].apply(lambda x: pdf(x,*parameters[each]))\n\n\t\t\t#Multiply features together with prior\n\t\t\ttestingProbs[el] = math.log(self.priors.ix[el])+probabilities.sum(1)\n\t\t\t#testingProbs[el] = self.priors.ix[el]*probabilities.prod(1)\n\t\t#Use log-sum-exp trick. We need the offsetting factor as max among classLabels\n\t\tB = testingProbs.max(1)\n\t\t#Compute log_sum = log(\\sigma_c' exp(b_c' - B)) + B\n\t\tlog_sum = testingProbs.apply(lambda t: (t-B)).applymap(lambda u: math.exp(u)).sum(1).apply(math.log)+B\n\t\tself.testingProbs = testingProbs.apply(lambda x: x-log_sum)\n\t\t#self.testingProbs = testingProbs", "def create_scenarios(self, proposals):\n \n participants = [p.company.values[0] for p in proposals]\n proposals = self.add_null_proposals(proposals)\n priority_prod = (\n pd.DataFrame(\n product(*(p.priority for p in proposals)),\n columns=participants)\n .reset_index(drop=False)\n .rename(columns={'index':'scenario_id'})\n .melt(id_vars='scenario_id', var_name='company', value_name='priority')\n .merge(pd.concat(proposals), how='outer', on=['company', 'priority'])\n )\n return priority_prod", "def test_create_category_scaled_score_success(self):\n score_test = score.ScoresGenerator()\n for test in self.success_create_category_scaled_score_test_params:\n score_test.create_category_scaled_score(test[KEY_INPUT])\n self.assertEqual(score_test.SCALED_SCORES[test[KEY_INPUT]], test[KEY_EXPECTED])", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def create_surveys_c():\n messages = pd.read_csv(Path(survey_dir, 'messages-cleaned.csv'))\n\n # Get rid of bad recordings\n messages['to_remove'] = messages.to_remove.fillna(0)\n messages = messages.ix[messages.to_remove == 0]\n\n # Split off the seeds\n seeds = messages.ix[messages.generation == 0]\n imitations = messages.ix[messages.generation != 0]\n\n between_game_name = 'between-category-game-a'\n within_game_name = 'within-category-game-a'\n\n between_choices = seeds.ix[seeds.game_name == between_game_name, 'message_id'].tolist()\n within_choices = seeds.ix[seeds.game_name == within_game_name, 'message_id'].tolist()\n\n between_imitations = imitations.ix[(imitations.game_name == between_game_name) & (imitations.chain_name != 'splish'), 'message_id'].tolist()\n within_imitations = imitations.ix[(imitations.game_name == within_game_name) & (imitations.chain_name != 'splish'), 'message_id'].tolist()\n\n between_splish = imitations.ix[(imitations.game_name == between_game_name) & (imitations.chain_name == 'splish'), 'message_id'].tolist()\n within_splish = imitations.ix[(imitations.game_name == within_game_name) & (imitations.chain_name == 'splish'), 'message_id'].tolist()\n\n # select imitations at random because we don't have time to collect all ratings\n random.seed(100)\n\n all_between_given = between_splish + within_splish + random.sample(between_imitations, 100)\n\n between_plus_within_splish = {}\n between_plus_within_splish['choices'] = between_choices\n between_plus_within_splish['given'] = all_between_given\n\n convert_to_int(between_plus_within_splish)\n\n with open(Path(survey_dir, 'between_survey_with_within_splish.json'), 'w') as f:\n f.write(json.dumps(between_plus_within_splish))\n\n all_within_given = within_splish + between_splish + random.sample(within_imitations, 100)\n\n within_plus_between_splish = {}\n within_plus_between_splish['choices'] = within_choices\n within_plus_between_splish['given'] = all_within_given\n\n convert_to_int(within_plus_between_splish)\n\n with open(Path(survey_dir, 'within_survey_with_between_splish.json'), 'w') as f:\n f.write(json.dumps(within_plus_between_splish))\n\n return between_plus_within_splish, within_plus_between_splish", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def createStudentWithProposal(self):\n self.createStudent()\n from soc.modules.gsoc.models.proposal import GSoCProposal\n properties = {'link_id': self.profile.link_id, 'scope': self.profile,\n 'parent': self.profile, 'status': 'new'}\n seeder_logic.seed(GSoCProposal, properties)", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def set_uniform_probabilities(self, sentence_aligned_corpus):\n ...", "def setUp(self) -> None:\n super().setUp()\n\n user_models.UserContributionProficiencyModel(\n id='%s.%s' % (self.SCORE_CATEGORY_1, self.USER_1_ID),\n user_id=self.USER_1_ID,\n score_category=self.SCORE_CATEGORY_1,\n score=1.5,\n onboarding_email_sent=False\n ).put()\n user_models.UserContributionProficiencyModel(\n id='%s.%s' % (self.SCORE_CATEGORY_2, self.USER_1_ID),\n user_id=self.USER_1_ID,\n score_category=self.SCORE_CATEGORY_2,\n score=2,\n onboarding_email_sent=False\n ).put()\n user_models.UserContributionProficiencyModel(\n id='%s.%s' % (self.SCORE_CATEGORY_1, self.USER_2_ID),\n user_id=self.USER_2_ID,\n score_category=self.SCORE_CATEGORY_1,\n score=1.5,\n onboarding_email_sent=False,\n deleted=True\n ).put()", "def _categorical(self, rewards, probs, dones):\n\n # Create local vars to keep code more concise\n vmin = self.vmin\n vmax = self.vmax\n atoms = self.atoms\n num_atoms = self.num_atoms\n gamma = self.gamma\n rollout = self.rollout\n\n # rewards/dones shape from [batchsize,] to [batchsize,1]\n rewards = rewards.unsqueeze(-1)\n dones = dones.unsqueeze(-1).type(torch.float)\n\n delta_z = (vmax - vmin) / (num_atoms - 1)\n\n projected_atoms = rewards + gamma**rollout * atoms * (1 - dones)\n projected_atoms.clamp_(vmin, vmax)\n b = (projected_atoms - vmin) / delta_z\n\n # It seems that on professional level GPUs (for instance on AWS), the\n # floating point math is accurate to the degree that a tensor printing\n # as 99.00000 might in fact be 99.000000001 in the backend, perhaps due\n # to binary imprecision, but resulting in 99.00000...ceil() evaluating\n # to 100 instead of 99. Forcibly reducing the precision to the minimum\n # seems to be the only solution to this problem, and presents no issues\n # to the accuracy of calculating lower/upper_bound correctly.\n precision = 1\n b = torch.round(b * 10**precision) / 10**precision\n lower_bound = b.floor()\n upper_bound = b.ceil()\n\n m_lower = (upper_bound + (lower_bound == upper_bound).float() - b) * probs\n m_upper = (b - lower_bound) * probs\n\n projected_probs = torch.tensor(np.zeros(probs.size())).to(self.device)\n\n for idx in range(probs.size(0)):\n projected_probs[idx].index_add_(0, lower_bound[idx].long(), m_lower[idx].double())\n projected_probs[idx].index_add_(0, upper_bound[idx].long(), m_upper[idx].double())\n return projected_probs.float()", "def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot", "def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)", "def generateCitySentimentData(firstPoints,secondPoints):\n\tcityDict = fillStanceDictNamesAsKeys(firstPoints,secondPoints)\n\n\tcsvFile1 = open('Data/city_ratio.csv', 'w',encoding = \"utf-8\")\n\tcsvWriter1 = csv.writer(csvFile1,delimiter = \",\")\n\tcsvWriter1.writerow([\"City\",\"SentimentRatio\"])\n\tfor key in cityDict.keys():\n\t\tcsvWriter1.writerow([key,\n\t\t (cityDict[key][0]+1)/(cityDict[key][0]+cityDict[key][1]+1)]) # AKP over CHP ratio\n\tcsvFile1.close()", "def makeCluster(self):\n for i in range(self.k):\n #vector of length total users, pick random number 1-5\n self.centroids.append(np.random.uniform(low=1,high=5,size=len(self.user)))\n memberList = []\n self.membership.append(memberList)\n self.centroids = np.round(self.centroids)\n\n for movie in self.dictionary.keys():\n #Finds the index of the closest centroid\n closest = np.argmin(self.calculateDistance(self.dictionary[movie]))\n newVector = []\n newVector.append(movie)\n #Add the movie to the list of members of the closest centroid\n self.membership[closest].append(newVector)\n self.recalculateCentroid(self.membership[closest], closest)", "def create_random_proposals(self): \r\n global MAX_NUMBER_PROPOSALS\r\n global LOCATIONS\r\n global CATEGORIES\r\n \r\n for i in range(MAX_NUMBER_PROPOSALS):\r\n description = \"\"\r\n location = locations_rv.rvs(size=1)[0]\r\n category = categories_rv.rvs(size=1)[0]\r\n budget = random.uniform(500000, 1000000)\r\n project = Project(i, description, category, budget, location)\r\n self.proposals.append(project)", "def pca(adata, n_components=50, train_ratio=0.35, n_batches=50, gpu=False):\n\n train_size = math.ceil(adata.X.shape[0] * train_ratio)\n\n if gpu:\n from cuml.decomposition import PCA\n import cupy as cp\n else:\n from sklearn.decomposition import PCA\n import numpy as cp\n\n pca = PCA(n_components=n_components).fit(adata.X[:train_size])\n \n embeddings = cp.zeros((adata.X.shape[0], n_components))\n batch_size = int(embeddings.shape[0] / n_batches)\n for batch in range(n_batches):\n start_idx = batch * batch_size\n end_idx = start_idx + batch_size\n\n if(adata.X.shape[0] - end_idx < batch_size):\n end_idx = adata.X.shape[0]\n\n embeddings[start_idx:end_idx,:] = cp.asarray(pca.transform(adata.X[start_idx:end_idx]))\n \n if gpu:\n embeddings = embeddings.get()\n\n adata.obsm[\"X_pca\"] = embeddings\n return adata", "def __init__(self, p_donate, pos_amounts=[1.0,], pos_amounts_distribution=[1.0,]):\n self.p_donate = p_donate\n self.pos_amounts = pos_amounts\n self.pos_amounts_distribution = pos_amounts_distribution\n zero = np.array([0,])\n p_donate = np.array([p_donate],)\n pos_amounts = np.array(pos_amounts)\n pos_amounts_distribution = np.array(pos_amounts_distribution)\n self.values = np.concatenate([zero,pos_amounts])\n self.distribution = np.concatenate([1 - p_donate, p_donate * pos_amounts_distribution])", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def train(self, ngrams):\n unique_ngrams = pd.Series(ngrams).unique()\n # ngram counts C(w_1, ..., w_n)\n n1grams, c_ngram, c_n1gram = [], [], []\n for ngram in ngrams:\n n1grams.append(ngram[:-1]) # Construct n1gram\n c_ngram.append(ngrams.count(ngram)) # ngram occurrence\n \n # n-1 gram counts C(w_1, ..., w_(n-1))\n for n1gram in n1grams:\n c_n1gram.append(n1grams.count(n1gram))\n\n # Create the conditional probabilities\n probs = np.array(c_ngram) / np.array(c_n1gram)\n \n # Put it all together\n ngram_col = pd.Series(ngrams, name='ngram')\n n1gram_col = pd.Series(n1grams, name='n1gram')\n prob_col = pd.Series(probs, name='prob')\n\n # print(c_ngram, c_n1gram)\n df = pd.DataFrame([ngram_col, n1gram_col, prob_col]).T\n no_dup = df.drop_duplicates('ngram').reset_index(drop=True)\n return no_dup", "def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In freelancer.')\n self.sample_users(user_sample_fraction)\n df_users = pd.DataFrame(self.users)\n df_users_sampled = df_users[df_users['user_id'].isin(self.E)]\n\n # Get categorized skills\n r, c, p = self.categorize_skills(df_users_sampled, rare_threshold, popular_threshold)\n\n # Sample skills from each category\n num_rare_skills = int(num_sampled_skills * rare_sample_fraction)\n num_popular_skills = int(num_sampled_skills * popular_sample_fraction)\n num_common_skills = num_sampled_skills - num_rare_skills - num_popular_skills\n\n # Ensure that skills to be sampled in each category is >= number of skills in that category\n if num_rare_skills > len(r):\n num_rare_skills = len(r)\n if num_common_skills > len(c):\n num_common_skills = len(c)\n if num_common_skills < 0:\n num_common_skills = 0\n if num_popular_skills > len(p):\n num_popular_skills = len(p)\n\n sampled_rare_skills = np.random.choice(r, size=num_rare_skills, replace=False)\n sampled_common_skills = np.random.choice(c, size=num_common_skills, replace=False)\n sampled_popular_skills = np.random.choice(p, size=num_popular_skills, replace=False)\n\n # Merge indices of all sampled skills\n sampled_skills = np.concatenate((sampled_rare_skills, sampled_common_skills, sampled_popular_skills))\n\n # Create final skills sample\n self.skills_covered = np.zeros(self.num_skills)\n \n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)\n self.num_rare_skills = num_rare_skills\n self.num_common_skills = num_common_skills\n self.num_popular_skills = num_popular_skills", "def generate_Pr(categories):\n # Initializing free parameters\n c = 6.5\n phi = 2.0\n lambda_w = 0.03\n lambda_alpha = 0.0033\n # lambda_alpha = 0\n\n # Initializing Hyper parameters\n r = 1\n q = 1\n alpha = [0.3333, 0.3333, 0.3333]\n w = [[0.0, 0.0] for i in range(8)]\n\n Pr = []\n\n Pr_sum = 0\n for epochs in range(400):\n\n current_iteration = epochs\n if current_iteration >= 8:\n current_iteration = current_iteration % 8\n current_stimulus = stimuli[current_iteration]\n\n hidden_activations = hidden_layer_activations(current_stimulus, stimuli, stimuli, alpha, r, q, c)\n output_activations = output_layer_activations(categories, hidden_activations, w)\n\n correct = 0\n # Finding what is correct\n for k in range(len(output_activations)):\n if current_iteration in categories[k]:\n correct = k\n\n\n p = probability_of_category(correct, phi, output_activations)\n Pr_sum += p\n\n if (current_iteration + 1) % 8 == 0:\n Pr_sum /= 8\n Pr.append(Pr_sum)\n Pr_sum = 0\n\n # Online Learning of w and alpha\n del_w = find_del_w(lambda_w, output_activations, hidden_activations, categories)\n for j in range(len(hidden_activations)):\n for k in range(len(output_activations)):\n w[j][k] += del_w[j][k]\n del_alpha = find_del_alpha(current_iteration, current_stimulus, lambda_alpha, c, stimuli,\n output_activations, hidden_activations, w, categories)\n\n for i in range(3):\n alpha[i] += del_alpha[i]\n if (alpha[i] < 0):\n alpha[i] = 0\n\n return Pr" ]
[ "0.60540473", "0.54775965", "0.5420016", "0.53115493", "0.527698", "0.5219173", "0.5213245", "0.52056885", "0.5204419", "0.5188002", "0.5140095", "0.51223373", "0.50955987", "0.50922906", "0.50904256", "0.5060676", "0.5026107", "0.49677777", "0.49602473", "0.4956977", "0.49455723", "0.4938023", "0.49321833", "0.4926397", "0.49169165", "0.49168047", "0.49120364", "0.4911387", "0.48970288", "0.48951027" ]
0.67905664
0
Computes the number of correct predictions.
def num_correct_fun(preds, labels): assert preds.size(0) == labels.size( 0 ), "Batch dim of predictions and labels must match" # Find number of correct predictions num_correct = (preds == labels).float().sum() return num_correct
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, test_data):\n count = 0.0\n for testcase in test_data:\n answer = np.argmax(testcase[1])\n prediction = np.argmax(self.feed_forward(testcase[0]))\n count = count + 1 if (answer - prediction) == 0 else count\n return count", "def accuracy_score(truth, predicted):\n return len(np.where(truth==predicted)[0]) / len(truth)", "def accuracy_score(truth, predicted):\n return len(np.where(truth==predicted)[0]) / len(truth)", "def accuracy(predictions, targets):\n correct_count = 0\n for prediction, target in zip(predictions, targets):\n if prediction == target:\n correct_count += 1\n return correct_count / len(predictions)", "def score(self, X_test: List[str], y_test: List[str]) -> int:\n predictions_count = 0\n right_predictions_count = 0\n\n for i in range(len(X_test)):\n label = self.predict(X_test[i].split())\n predictions_count += 1\n right_predictions_count += 1 if label == y_test[i] else 0\n\n return right_predictions_count / predictions_count", "def score(self, predictions):\n return 0.", "def get_hit_num(pred, y_truth):\n\n hit_num = 0\n for i in range(len(y_truth)):\n for value in y_truth[i]:\n hit_num += np.sum(pred[i] == value)\n return hit_num", "def precision(y, ypred):\n return len(set(y).intersection(set(ypred))) / len(ypred)", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n correct = 0\n for i in range(len(targets)):\n if(predictions[i] == targets[i]):\n correct += 1\n accuracy = correct/len(targets)\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def _count_correct_prediction(\n self, logits: Dict[str, torch.Tensor], labels: torch.Tensor\n ) -> None:\n if len(labels.size()) != 1: # For e.g., CutMix labels\n return\n for module_name, logit in logits.items():\n _, predicted = torch.max(F.softmax(logit, dim=1).data, 1)\n n_correct = int((predicted == labels).sum().cpu())\n self.n_correct_epoch[module_name] += n_correct", "def accuracy(predictions, labels):\n predictions = list(predictions)\n labels = list(labels)\n count = 0\n for i in range(len(labels)):\n if labels[i] == predictions[i]:\n count += 1\n\n return count / len(labels)", "def predictionAccuracy(self, predicted, actual):\n\t\taccuracyCount=0\n\n\t\t###### your implementation below ######\n\t\tfor x in range(len(predicted)):\n\t\t\tif (predicted[x] == actual[x]):\n\t\t\t\taccuracyCount += 1\n\t\taccuracyCount /= len(predicted)\n\t\treturn accuracyCount;", "def total_predictions(self):\n return self._total_predictions", "def find_prediction_success_rate(decision_tree, test_examples, attributes):\n totalCorrect = 0\n for example in test_examples:\n actualResult = example[14]\n prediction = decision_tree_prediction(example, decision_tree, attributes)\n if prediction == actualResult:\n totalCorrect = totalCorrect + 1\n return totalCorrect / len(test_examples)", "def total_predict_batches(self) -> int:\n return sum(self.trainer.num_predict_batches)", "def successes(predictions,truth):\n\ttotal = len(predictions)\n\tcorrect = 0.0\n\tfor p in predictions:\n\t\tif p == truth:\n\t\t\tcorrect += 1\n\t\telse:\n\t\t\tprint truth,\"\\t\",p\n\treturn correct", "def accuracy(predictions, targets):\n\n compare = predictions == targets\n # compare = (predictions.argmax(dim=1)) == (targets)\n # compare = (predictions.argmax(dim=1)) == (targets.argmax(dim=1))\n # summed = compare.sum().item()\n summed = compare.sum()\n # print(summed, compare.size())\n # print(compare.size()[0])\n return summed/compare.size", "def ovo_test(dic, x_test, y_test, nb_classes):\n TP = FP = TN = FN = 0\n correct_predict = 0\n for index, value in enumerate(x_test):\n predict = {elem : 0 for elem in nb_classes}\n for key in dic:\n predict[key[0] if dic[key].predict([value]) else key[1]] += 1\n predicted_value = max(predict.items(), key=operator.itemgetter(1))[0]\n if predicted_value == y_test[index]:\n correct_predict += 1\n TP += 1\n else:\n FP += 1\n \n return correct_predict, TP, FP, TN, FN", "def ovr_test(dic, x_test, y_test, nb_classes):\n TP = FP = TN = FN = 0\n correct_predict = 0\n for index, value in enumerate(x_test):\n predict = {elem : 0 for elem in nb_classes}\n for key in dic:\n predict[key] = dic[key].predict_proba([value])[0][1]\n\n predicted_value = max(predict.items(), key=operator.itemgetter(1))[0]\n if predicted_value == y_test[index]:\n correct_predict += 1\n TP += 1\n else:\n FP += 1\n \n return correct_predict, TP, FP, TN, FN", "def compare(predictions, truth):\n comp = predictions - truth\n return 1 - (np.count_nonzero(comp) / len(predictions))", "def pk(y_true, y_pred, k):\n \n # if k is 0, return 0. we should never have this\n # as k is always >= 1\n if k == 0:\n return 0\n # we are interested only in top-k predictions\n y_pred = y_pred[:k]\n \n # convert predictions to set\n pred_set = set(y_pred)\n \n # convert actual values to set\n true_set = set(y_true)\n \n # find common values\n common_values = pred_set.intersection(true_set)\n \n # return length of common values over k\n return len(common_values) / len(y_pred[:k])", "def _calculate_score(predictions: np.ndarray, correct: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(np.log(predictions + 1) - np.log(correct + 1))) / len(correct))", "def calc_accuracy(true, predicted):\n return sum([t==p for t,p in zip(true, predicted)]) / float(len(true))", "def calculateResults(predictions, answers):\r\n t = 0\r\n f = 0\r\n for i in range(len(answers)):\r\n if predictions[i] == answers[i]:\r\n t += 1\r\n else:\r\n f += 1\r\n\r\n print(\"The Percent of Correct Predictions is {t}%\".format(t=round((t * 100 / len(answers)), 1)))\r\n print(\"The Percent of Incorrect Predictions is {f}%\\n\".format(f=round((f * 100 / len(answers)), 1)))", "def evaluate_predictions(predictions, actual):\n sum = 0\n for estimate, real in zip(predictions, actual):\n real = 1 if real==0 else real\n sum += pow((float(real) - estimate)/real, 2)\n return pow(sum/len(actual), 0.5)", "def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity", "def get_pred_length(self):\n return self.prediction_length", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def get_accuracy(actual, predicted):\n predicted_correct = 0\n # for each index in the actual result\n for i in range(len(actual)):\n # if actual is the same as predicted\n if actual[i] == predicted[i]:\n predicted_correct+=1\n return predicted_correct/len(actual)", "def test_verify_npred(self):\n pwl=models.PowerLaw(index=2 * u.Unit(''),\n amplitude=2e-11 * u.Unit('cm-2 s-1 TeV-1'),\n reference=1 * u.TeV)\n\n npred_stacked=self.obs_stacker.stacked_obs.predicted_counts(model=pwl)\n\n npred1=self.obs_list[0].predicted_counts(model=pwl)\n npred2=self.obs_list[1].predicted_counts(model=pwl)\n # Set npred outside safe range to 0\n npred1.data.data[np.nonzero(self.obs_list[0].on_vector.quality)]=0\n npred2.data.data[np.nonzero(self.obs_list[1].on_vector.quality)]=0\n\n npred_summed=npred1.data.data + npred2.data.data\n\n assert_allclose(npred_stacked.data.data, npred_summed)" ]
[ "0.7441072", "0.72760016", "0.72760016", "0.7261768", "0.7162763", "0.71161926", "0.7093444", "0.7023508", "0.69957274", "0.69852626", "0.69605917", "0.69590217", "0.69373053", "0.6929616", "0.69030887", "0.68987405", "0.6885546", "0.6881008", "0.68792915", "0.6875002", "0.6863347", "0.68380153", "0.6815987", "0.67836106", "0.67595583", "0.6717766", "0.6694823", "0.6682553", "0.66518176", "0.66496813" ]
0.78854287
0
Computes the label error.
def label_errors(preds, labels): num_correct = num_correct_fun(preds, labels) return (1.0 - num_correct / preds.size(0)) * 100.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)", "def squaredError(label, prediction):\n return (label-prediction)*(label-prediction)", "def get_error(scores, labels):\r\n bs = scores.size(0) # 'bs' stands for 'batch size'\r\n predicted_labels = scores.argmax(dim = 1) # Tensor with 'bs' entries\r\n indicator = (predicted_labels == labels) # Tensor containing 'True' for each success\r\n num_matches = indicator.sum().item()\r\n return 1 - (num_matches / bs)", "def calculate_loss(self, a, label):\n if self.loss == 'mse':\n diff = a - label\n err = np.square(diff).mean(axis=0).mean()\n elif self.loss == 'ce':\n return sum(-np.log2(a[label > 0]))\n else:\n raise ValueError('loss function not implemented')\n return err", "def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)", "def labeled_loss_calculation(self, labeled_examples, labels):\n predicted_labels, _ = self.D(labeled_examples)\n labeled_loss = self.labeled_loss_function(predicted_labels, labels, order=self.settings.labeled_loss_order)\n labeled_loss *= self.settings.labeled_loss_multiplier\n return labeled_loss", "def err_num(gold_label, labels):\n return len([x for x in labels if (gold_label != -1 and x != -1 and x != gold_label)])", "def categorical_error(pred, label):\n pred_label = pred.argmax(1)\n return (pred_label != label.flat).mean()", "def calc_error_dist(self):\n pass", "def compute_error(y_true, y_pred):\r\n\r\n # INSERT YOUR CODE HERE\r\n \r\n n = len(y_true)\r\n err = [y_true[i] != y_pred[i] for i in range(n)]\r\n return sum(err) / n\r\n \r\n raise Exception('Function not yet implemented!')", "def error_rate(predictions, labels):\n return 100.0 - (100*(np.sum(predictions == labels)/float(predictions.shape[0]*predictions.shape[1])))", "def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}", "def label(cls) -> str:\n return \"!lobotomy.error\"", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n numpy.sum(numpy.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err", "def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error", "def error(Y, X):\n return (Y - X) ** 2", "def _df_err(self):\n return self.n - self.k - 1", "def compute_error(self, X, Y):\n\n if self.method != 'knn':\n accuracy = self.classifier.score(X, Y)\n error = 1 - accuracy\n return error\n else:\n distances, indices = self.classifier.kneighbors(X)\n error = 0\n for index, ground_truth in zip(indices, Y):\n classes = [self.train_Y[neigbhor] for neigbhor in index]\n mode, _ = stats.mode(classes)\n if mode != ground_truth:\n error += 1\n\n return error / len(Y)", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def yerr(self, i):\n return self.errors[1][i]", "def get_error(self, params):\n return self.endog - self.predict(params)", "def compute_error(y_true, y_pred):\r\n length = len(y_true)\r\n\r\n error_cnt = 0\r\n\r\n for i in range (length):\r\n if y_true[i] != y_pred[i]:\r\n error_cnt = error_cnt+1\r\n error = (1/length) * error_cnt\r\n return error", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def calc_loss(predictions, labels):\n return np.mean(np.square(predictions - labels))", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def compute_error_cross_dataset(AL, train_y):\n # print(train_y.shape)\n nb = train_y.shape[0]\n error=np.power(np.add(train_y,-AL),2)*1/nb\n return error\n # raise NotImplementedError" ]
[ "0.72471374", "0.6982414", "0.6836361", "0.6726223", "0.66949826", "0.66395104", "0.6596141", "0.6493463", "0.64885974", "0.64688194", "0.6429107", "0.63847214", "0.6296689", "0.6245625", "0.6220233", "0.6192241", "0.6189202", "0.6174556", "0.6165178", "0.6155069", "0.6149445", "0.6128924", "0.61285394", "0.61126924", "0.61096257", "0.6051444", "0.60485077", "0.6021506", "0.59855413", "0.59844136" ]
0.729121
0
Computes the GPU memory usage for the current device (MB).
def gpu_mem_usage(): mem_usage_bytes = torch.cuda.max_memory_allocated() return mem_usage_bytes / _B_IN_MB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_gpu_mem_usage(self):\n assert self.network_generator is not None, \\\n \"Unable to measure network memory utilization without generator function\"\n\n dispatcher = MulticoreDispatcher(1)\n dispatcher.run(get_model_gpu_allocation, self.network_generator)\n mem_usage = dispatcher.join()[0]\n mem_usage = math.ceil(mem_usage / .1) * .1 #Round up to nearest 10%\n dispatcher.shutdown()\n return mem_usage", "def memUsedGpu(self):\n return None # amount not known", "def get_mem_usage():\n return process.memory_info().rss / 1024.**2", "def cudaMemGetInfo(mb=False):\n print 'gpu: '\n free = ctypes.c_size_t()\n total = ctypes.c_size_t()\n ret = cuda.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total))\n\n if ret != 0:\n err = cuda.cudaGetErrorString(status)\n raise RuntimeError(\"CUDA Error (%d): %s\" % (status, err))\n\n if mb:\n scale = 1024.0**2\n return free.value / scale, total.value / scale\n else:\n return free.value, total.value", "def get_mem_use(units='MB'):\n import resource\n useage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n div = {'GB': 1024*1024*1024,\n 'MB': 1024*1024,\n 'KB': 1024,\n }\n return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / float(div[units])", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def occupy_mem(cuda_device, mem_ratio=0.9):\n total, used = get_total_and_free_memory_in_Mb(cuda_device)\n max_mem = int(total * mem_ratio)\n block_mem = max_mem - used\n x = torch.cuda.FloatTensor(256, 1024, block_mem)\n del x\n time.sleep(5)", "def gpu_memory_mb() -> Dict[int, int]:\n # pylint: disable=bare-except\n try:\n result = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'],\n encoding='utf-8')\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n return {gpu: memory for gpu, memory in enumerate(gpu_memory)}\n except FileNotFoundError:\n # `nvidia-smi` doesn't exist, assume that means no GPU.\n return {}\n except:\n # Catch *all* exceptions, because this memory check is a nice-to-have\n # and we'd never want a training run to fail because of it.\n logger.exception(\"unable to check gpu_memory_mb(), continuing\")\n return {}", "def memory():\n sin = psutil.virtual_memory()\n return round((sin.total / sin.used) / 100, 3)", "def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent", "def MemoryUsage(cls):\n\t\tmeminfo = cls.MemoryInfo()\n\t\treturn (meminfo[\"MemTotal\"] - meminfo[\"MemFree\"] - meminfo[\"Cached\"]) / float(meminfo[\"MemTotal\"])", "def get_free_gpu_memory(cuda_device_index):\n if sys.platform == \"darwin\":\n # No GPUs on darwin...\n return 0\n result = sp.check_output('nvidia-smi --query-gpu=memory.free '\n '--format=csv,nounits,noheader',\n shell=True)\n result = result.decode('utf-8').split('\\n')[:-1]\n log.verbose(f'The system has {len(result)} gpu(s).')\n free_mem = int(result[cuda_device_index])\n log.info(f'The {cuda_device_index}-th GPU has {free_mem} MB free.')\n if cuda_device_index >= len(result):\n raise ValueError(f\"Couldn't parse result for GPU #{cuda_device_index}\")\n return int(result[cuda_device_index])", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def get_current_mem_usage():\n process = psutil.Process()\n return process.memory_info().rss / float(2**20)", "def gpu_memory_info(device_id=0):\n free = ctypes.c_uint64()\n total = ctypes.c_uint64()\n dev_id = ctypes.c_int(device_id)\n check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))\n return (free.value, total.value)", "def mem_per_core(self):\n return self.mem_per_node / self.cores_per_node", "def memory_utilization(self) -> float:\r\n return self._memory_utilization", "def get_mem_usage(**kwargs):\n try:\n con_mem_data_list = kwargs[\"con\"]._client.get_memory(\n session=kwargs[\"con\"]._session, memory_level=kwargs[\"mem_type\"]\n )\n usedram = 0\n freeram = 0\n for con_mem_data in con_mem_data_list:\n page_size = con_mem_data.page_size\n node_memory_data_list = con_mem_data.node_memory_data\n for node_memory_data in node_memory_data_list:\n ram = node_memory_data.num_pages * page_size\n is_free = node_memory_data.is_free\n if is_free:\n freeram += ram\n else:\n usedram += ram\n totalallocated = usedram + freeram\n if totalallocated > 0:\n totalallocated = round(totalallocated / 1024 / 1024, 1)\n usedram = round(usedram / 1024 / 1024, 1)\n freeram = round(freeram / 1024 / 1024, 1)\n ramusage = {}\n ramusage[\"usedram\"] = usedram\n ramusage[\"freeram\"] = freeram\n ramusage[\"totalallocated\"] = totalallocated\n ramusage[\"errormessage\"] = \"\"\n except Exception as e:\n errormessage = \"Get memory failed with error: \" + str(e)\n logging.error(errormessage)\n ramusage[\"errormessage\"] = errormessage\n return ramusage", "def memory_usage(self):\n\n def multiply_iter(iterable):\n res = 1\n for x in iterable:\n res *= x\n return res\n\n def add_params(parameter):\n res = 0\n for x in parameter:\n res += multiply_iter(x.shape)\n return res\n\n feat = add_params(self.features.parameters())\n clsf = add_params(self.classifier.parameters())\n total = feat + clsf\n\n mb_f = 4 / 1024 ** 2\n\n print(\"Conv : {0}\".format(feat))\n print(\"FC : {0}\".format(clsf))\n print(\"-----------------\")\n print(\"Total : {0}\".format(total))\n print(\"Memory : {0:.2f}MB\".format(total * mb_f))\n print(\"\")", "def memUsedCpu(self):\n return None # amount not known", "def deviceMemory(self):\n return 1", "def get_gpu_memory_available(gpu_id):\n #1MiB = 1048576 bytes\n MiB = 1048576\n \n result = subprocess.check_output(\n [\n 'nvidia-smi' , '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [x for x in result.strip().split('\\n')]\n vram_used = float(gpu_memory[gpu_id])\n #print(\"GPU id:\", str(gpu_id), \"GPU RAM used, including extra driver buffer from nvidia-smi:\", str(vram_used))\n total_mem = torch.cuda.get_device_properties(gpu_id).total_memory / MiB\n vram_available = total_mem-vram_used\n return vram_available", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def memsize(self):\n return self.xlist(\"get-memsize\")[1][0] * 1024", "def memory_get_usage():\n raise NotImplementedError()", "def limit_gpu_memory_usage():\n\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n set_session(tf.Session(config=tf_config))", "def countGPUs(self):\n return libnao_gpu.CountDevices()", "def get_free_gb():\n mem_info = get_mem_info()\n free_gb = float(mem_info['MemAvailable'].value) / 10**6\n return free_gb" ]
[ "0.8307335", "0.7811169", "0.72445303", "0.7240968", "0.7172051", "0.7077602", "0.7038147", "0.70290995", "0.70069295", "0.69895846", "0.6949969", "0.6910933", "0.6902093", "0.68718094", "0.6866974", "0.6856295", "0.68228793", "0.6775267", "0.67143226", "0.66814", "0.66562223", "0.6643438", "0.6641692", "0.66370356", "0.6630061", "0.6624984", "0.6601436", "0.65985525", "0.6595423", "0.65810204" ]
0.8433346
1
Get run id. If active run is not found, tries to find last experiment. Raise `DataSetError` exception if run id can't be found.
def run_id(self): if self._run_id is not None: return self._run_id run = mlflow.active_run() if run: return run.info.run_id raise DataSetError("Cannot find run id.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_run_id(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['run_id']\n\t\texcept:\n\t\t\treturn None", "def getRunId(self):\n return self.runid", "def get_current_run_id(self):\n start_datetime = self.start_dt\n run_datetime = self._get_current_datetime()\n run_interval = self.training_interval\n\n time_since_start = run_datetime - start_datetime\n logger.info(\"Time between start and run_date: %s\" % str(time_since_start))\n\n run_id = int(time_since_start / run_interval)\n logger.info(\"Current run_id: %s\" % str(run_id))\n\n return run_id", "def run_id(self) -> str:\n return self._step_execution_context.run_id", "def get_run(self, _id):\n return Run.deserialize(self._get_single('runs', {'run': _id}))", "def get_run(self, run_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from runs\n WHERE run_id = ?;\n \"\"\",\n (run_id,),\n )\n results = c.fetchall()\n return results[0]", "def run_id() -> int:\n return sg_covid_impact.config[\"flows\"][\"glass\"][\"run_id\"]", "def get_run_id_from_result(model_result):\n if 'ml_flow' not in model_result:\n return None\n\n with model_result['ml_flow'].open('r') as f:\n return yaml.load(f).get('run_id')", "def get_run(self, id):\n if not id:\n return None\n \n query = \"SELECT * FROM task_history WHERE run_id='\"+str(id)+\"';\"\n \n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n run = cur.fetchone()\n \n if run:\n return Run(self.task_history_columns, run);\n else:\n return None", "def get_runner(self, runner_id: int = 0) -> ExperimentRunner:\n return self.runners[runner_id]", "def _get_next_run_id_local(run_dir_root: str) -> int:\r\n dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]\r\n r = re.compile(\"^\\\\d+\") # match one or more digits at the start of the string\r\n run_id = 0\r\n\r\n for dir_name in dir_names:\r\n m = r.match(dir_name)\r\n\r\n if m is not None:\r\n i = int(m.group())\r\n run_id = max(run_id, i + 1)\r\n\r\n return run_id", "def find_run_id(increase=False):\n if tc.RUNID is None:\n if not os.path.exists(tc.RUNID_FILE):\n with open(tc.RUNID_FILE, \"w\") as new_runid_file:\n new_runid_file.write(\"1\")\n tc.RUNID = open(tc.RUNID_FILE, \"r\").read().rstrip()\n with open(tc.RUNID_FILE, \"w\") as new_runid_file:\n new_runid_file.write(str(int(tc.RUNID) + 1))\n tc.RUNID = str(int(tc.RUNID) + 1)\n elif increase:\n tc.RUNID = int(tc.RUNID) + 1\n with open(tc.RUNID_FILE, \"w\") as new_runid_file: \n new_runid_file.write(str(int(tc.RUNID)))\n return tc.RUNID", "def _get_experiment_id(experiment_name: str, config: SQAConfig) -> Optional[int]:\n exp_sqa_class = config.class_to_sqa_class[Experiment]\n with session_scope() as session:\n sqa_experiment_id = (\n session.query(exp_sqa_class.id) # pyre-ignore\n .filter_by(name=experiment_name)\n .one_or_none()\n )\n\n if sqa_experiment_id is None:\n return None\n return sqa_experiment_id[0]", "def get_dataset_id(self) -> int:\n return self.dataset_id", "def reserve_next_run_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('task_history', 'run_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def get_datasetID(self):\n\t\treturn self.prDoc['inputs']['data'][0]['datasetID']", "def dataset_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dataset_id\")", "def get_runid(path):\n name = Path(path).name\n if not os.path.exists(Path(path).parent):\n return '00001'\n files = os.listdir(Path(path).parent)\n runid = 0\n for f in files:\n try:\n id, val = f.split('_', 1)\n runid = max(runid, int(id))\n except:\n pass\n runid = str(runid + 1)\n runid = '0' * (5 - len(runid)) + runid\n return runid", "def rr_retrieve_next_uncertified_dataset(run_number: int) -> str: # pragma: no cover\n datasets = runregistry.get_datasets(filter={\"run_number\": {\"=\": run_number}})\n if len(datasets) == 0:\n raise RunRegistryNoAvailableDatasets(\n f\"No available datasets for run {run_number}\"\n )\n for dataset in datasets:\n if \"online\" not in dataset[\"name\"]:\n if not TrackerCertification.objects.filter(\n runreconstruction__run__run_number=run_number,\n runreconstruction__reconstruction=get_reco_from_dataset(\n dataset[\"name\"]\n ),\n ).exists():\n return dataset[\"name\"]\n\n raise RunReconstructionAllDatasetsCertified(\n f\"Run {run_number} has been fully certified\"\n )", "def getRunningId(self):\n return( int(self.id.split('.')[2]) )", "def get_readable_id(run_or_program):\n if isinstance(run_or_program, CourseRun):\n return run_or_program.courseware_id\n elif isinstance(run_or_program, Program):\n return run_or_program.readable_id\n else:\n raise Exception(f\"Unexpected object {run_or_program}\")", "def run_number(self):\n return self._runNumber", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def get_last_worked_on_step_id(self):\n logger.debug(\"Searching for ID of the step last worked on.\")\n last_id = None\n for step in self.steps:\n if any((task for task in step.tasks if task.status == \"DONE\")) and (not last_id or step.id > last_id):\n last_id = step.id\n if not last_id:\n raise ValueError(\"No ID is found for last worked on step for ticket {}\".format(self.id))\n return last_id", "def current_run(run_id):\n conn = create_connection(db_location)\n c = conn.cursor()\n c.execute(\"SELECT * FROM runs WHERE id = \" + str(run_id))\n result = c.fetchone()\n if result is not None:\n run = dict((c.description[i][0], value) for i, value in enumerate(result))\n run['penalties'], run['num_penalties'] = list_penalties(run_id);\n else:\n run = {'id': 0, 'start': None, 'middle_stop': None, 'middle_start': None, 'end': None, 'droid_uid': 0, 'member_uid': 0, 'first_half_time': None, 'second_half_time': None, 'clock_time': None, 'final_time': None, 'num_penalties': 0}\n if __debug__:\n print(run)\n conn.commit()\n conn.close()\n return run", "def get_task_run(self, task_run_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"task_runs\", \"task_run_id\", task_run_id)", "def get_latest_run_id(log_path: str, env_id: str) -> int:\n max_run_id = 0\n for path in glob.glob(log_path + f\"/{env_id}_[0-9]*\"):\n file_name = path.split(\"/\")[-1]\n ext = file_name.split(\"_\")[-1]\n if env_id == \"_\".join(file_name.split(\"_\")[:-1]) and ext.isdigit() and int(ext) > max_run_id:\n max_run_id = int(ext)\n return max_run_id", "def get_max_num_runs(self, db):\n res = db.session.query(func.max(db.ExperimentResult.run)).filter_by(experiment=self).first()\n if res is None or res[0] is None: return 0\n return res[0] + 1" ]
[ "0.7135845", "0.6851856", "0.65218407", "0.64400977", "0.63702995", "0.63424414", "0.62259513", "0.62118405", "0.61478734", "0.6060078", "0.5936504", "0.5921628", "0.5848821", "0.5746301", "0.57423246", "0.5689302", "0.5672927", "0.5640232", "0.5566331", "0.5538917", "0.55199564", "0.55110735", "0.54785365", "0.54785365", "0.54785365", "0.5475661", "0.54563355", "0.54043514", "0.5401627", "0.5396061" ]
0.7998408
0
Save given MLflow metrics dataset and log it in MLflow as metrics.
def _save(self, data: MetricsDict) -> None: client = MlflowClient() try: run_id = self.run_id except DataSetError: # If run_id can't be found log_metric would create new run. run_id = None log_metric = ( partial(client.log_metric, run_id) if run_id is not None else mlflow.log_metric ) metrics = ( self._build_args_list_from_metric_item(k, v) for k, v in data.items() ) if self._logging_activated: for k, v, i in chain.from_iterable(metrics): log_metric(k, v, step=i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics(logger, model, X_train, y_train, X_test, y_test):\n\n results = dict()\n y_preds = model.predict(X_test)\n results['Train Accuracy'] = model.score(X_train, y_train)\n results['Test Accuracy'] = accuracy_score(y_test, y_preds)\n results['Precision'] = precision_score(y_test, y_preds)\n results['Recall'] = recall_score(y_test, y_preds)\n\n metric_cols = data_config['params']['metrics_cols']\n res_df = pd.DataFrame(results.items(), columns=metric_cols)\n\n metrics_path = data_config['outputs']['logreg_metrics']\n res_df.to_csv(metrics_path, index=False)\n print(f'Metrics saved to {metrics_path}')\n\n return", "def save_metrics(self):\n self.data_stats.write.format(\"org.apache.spark.sql.cassandra\").mode(\"append\").options(table=self.cassandra_stats_table, keyspace=self.cassandra_keyspace).save()\n print (\"Saved data successfully\")", "def write_training_metrics(self) -> None:\n self.trainer_metrics.write_training_metrics()", "def save_scalars(self, step, metrics):\n\n # Save\n with self.summary_writer.as_default():\n for name, value in metrics.items():\n tf.summary.scalar(name, value, step=step)", "def saving_metrics(model_name, logs_file, num_features, auc_train\n ,auc_val, sens_val, spec_val, f1_val, acc_val\n ,auc_test, sens_test, spec_test, f1_test, acc_test,fpr, tpr):\n name = pd.DataFrame({'model_name':model_name}, index=[0])\n num_features = pd.DataFrame({'num_features':num_features}, index=[0])\n auc_train = pd.DataFrame({'auc_train':auc_train},index = [0])\n auc_val = pd.DataFrame({'auc_val':auc_val},index = [0])\n sens_val = pd.DataFrame({'sens_val':sens_val},index = [0])\n spec_val = pd.DataFrame({'spec_val':spec_val},index = [0])\n f1_val = pd.DataFrame({'f1_val':f1_val},index = [0])\n acc_val = pd.DataFrame({'acc_val':acc_val},index = [0])\n auc_test = pd.DataFrame({'auc_test':auc_test},index = [0])\n sens_test = pd.DataFrame({'sens_test':sens_test},index = [0])\n spec_test = pd.DataFrame({'spec_test':spec_test},index = [0])\n f1_test = pd.DataFrame({'f1_test':f1_test},index = [0])\n acc_test = pd.DataFrame({'acc_test':acc_test},index = [0])\n\n fpr = str(fpr)\n tpr = str(tpr)\n fpr = pd.DataFrame({'false_positive_rate':fpr},index = [0])\n tpr = pd.DataFrame({'true_positive_rate':tpr},index = [0])\n\n frames = [name, num_features, auc_train, auc_val,sens_val,spec_val,f1_val,acc_val,\n auc_test,sens_test,spec_test,f1_test,acc_test, fpr, tpr]\n resultado = pd.concat(frames, axis = 1)\n url_log = model_name +'_metrics.csv'\n url_log = os.path.join(logs_file,str(url_log))\n resultado.to_csv(url_log)", "def log_metrics(metrics, step=None):\n mlflow.log_metrics(metrics, step=step)", "def _log_metrics(\n self,\n train_writer: SummaryWriter,\n val_writer: SummaryWriter,\n timestamped_save_dir: Path,\n train_metrics: _Metrics,\n step: int,\n ) -> None:\n if len(self.val_loader) > 0:\n val_metrics, val_img, val_gt, val_pred = self._get_val_metrics()\n if val_metrics.accuracy > self.best_acc:\n self.best_acc = val_metrics.accuracy\n self.save_weights(timestamped_save_dir, True)\n\n for key in vars(train_metrics):\n if key == \"class_loss\":\n tag = \"losses/classification\"\n elif key in {\"shape_loss\", \"total_loss\"}:\n continue\n else:\n tag = f\"metrics/{key}\"\n\n train_writer.add_scalar(tag, getattr(train_metrics, key), step)\n if len(self.val_loader) > 0:\n val_writer.add_scalar(tag, getattr(val_metrics, key), step)\n\n reg_loss = self._get_l2_reg()\n train_writer.add_scalar(\"losses/regularization\", reg_loss, step)\n train_writer.add_scalar(\"losses/shape\", train_metrics.shape_loss, step)\n train_writer.add_scalar(\n \"losses/total\",\n train_metrics.total_loss + self.config.weight_decay * reg_loss,\n step,\n )\n\n # Log a histogram for each tensor parameter in the model, to\n # see if a parameter is training stably or not\n for name, value in self.model.state_dict().items():\n train_writer.add_histogram(name, value, step)\n\n # Log the validation images for easy visualization\n if len(self.val_loader) > 0:\n val_writer.add_images(\"input\", val_img, step)\n val_writer.add_images(\"ground_truth\", val_gt, step)\n val_writer.add_images(\"prediction\", val_pred, step)", "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))", "def write_metrics(metrics, db_path):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM metrics')\n for metric in metrics:\n c.execute(\n 'INSERT INTO metrics '\n '(timestamp, callerid, uniqueid, channel, channel_extension, name) '\n 'VALUES (datetime(?),?,?,?,?,?)',\n (metric['timestamp'],\n metric['callerid'],\n metric['uniqueid'],\n metric['channel'],\n metric['channel_extension'],\n metric['name']))\n conn.commit()\n conn.close()", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def save_metrics(self, path: str) -> None:\n # Save dict of hyperparameter as json file\n with open(os.path.join(path, 'hyperparameter.txt'), 'w') as json_file:\n json.dump(self.hyperparameter, json_file)\n # Iterate items in metrics dict\n for metric_name, values in self.metrics.items():\n # Convert list of values to torch tensor to use build in save method from torch\n values = torch.tensor(values)\n # Save values\n torch.save(values, os.path.join(path, '{}.pt'.format(metric_name)))", "def save_case_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.case_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']))\\\n .to_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv', index=False)\n self.case_metrics = []", "def metrics(self, metrics):\n\n self._metrics = metrics", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def log_metrics(self, metrics, step=None, epoch=None, prefix=None):\n self.experiment.log_metrics(metrics, step=step, epoch=epoch, prefix=prefix)", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def metrics(x, y, save_folder, threshold, ds_name):\n predicted = model.predict(x)\n predicted[predicted > threshold] = 1\n predicted[predicted <= threshold] = 0\n actual = y\n TP = np.sum(np.logical_and(predicted == 1, actual == 1))\n FN = np.sum(np.logical_and(predicted == 0, actual == 1))\n TN = np.sum(np.logical_and(predicted == 0, actual == 0))\n FP = np.sum(np.logical_and(predicted == 1, actual == 0))\n TPR = TP / (TP + FN + 1e-8)\n TNR = TN / (TN + FP + 1e-8)\n FPR = FP / (FP + TN + 1e-8)\n FNR = FN / (FN + TP + 1e-8)\n precision = TP / (TP + FP + 1e-8)\n recall = TPR\n F1 = 2 * precision * recall / (precision + recall + 1e-8)\n metrics_dict = {'TPR': np.round(TPR, 3),\n 'TNR': np.round(TNR, 3),\n 'FPR' : np.round(FPR, 3),\n 'FNR' : np.round(FNR, 3),\n 'F1 Score' : np.round(F1, 3)\n }\n with open(save_folder + '/' + ds_name + '_metrics.txt', 'w') as f:\n f.write(str(metrics_dict))", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def _log_metrics(self, logs, prefix, step):\r\n if logs is None:\r\n logs = {}\r\n\r\n # Group metrics by the name of their associated file writer. Values\r\n # are lists of metrics, as (name, scalar_value) pairs.\r\n logs_by_writer = {\r\n self._train_run_name: [],\r\n self._validation_run_name: [],\r\n }\r\n validation_prefix = 'val_'\r\n for (name, value) in logs.items():\r\n if name in ('batch', 'size', 'num_steps'):\r\n # Scrub non-metric items.\r\n continue\r\n if name.startswith(validation_prefix):\r\n name = name[len(validation_prefix):]\r\n writer_name = self._validation_run_name\r\n else:\r\n writer_name = self._train_run_name\r\n name = prefix + name # assign batch or epoch prefix\r\n logs_by_writer[writer_name].append((name, value))\r\n\r\n with context.eager_mode():\r\n with summary_ops_v2.always_record_summaries():\r\n for writer_name in logs_by_writer:\r\n these_logs = logs_by_writer[writer_name]\r\n if not these_logs:\r\n # Don't create a \"validation\" events file if we don't\r\n # actually have any validation data.\r\n continue\r\n writer = self._get_writer(writer_name)\r\n with writer.as_default():\r\n for (name, value) in these_logs:\r\n summary_ops_v2.scalar(name, value, step=step)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def store_metrics_to_model(self, cm, accuracy, precision, recall, bcr):\n\n self.metrics['confusion_matrices'].append(cm)\n self.metrics['accuracies'].append(accuracy)\n self.metrics['precisions'].append(precision)\n self.metrics['recalls'].append(recall)\n self.metrics['bcrs'].append(bcr)\n\n if self.verbose:\n print(cm)\n print('accuracy for model is', accuracy)\n print('precision for model is', precision)\n print('recall for model is', recall)\n print('balanced classification rate for model is', bcr)", "def save(self, path=None):\n data = self._collect_data()\n\n name = np.random.choice(['a', 'b', 'c', 'd', 'e', 'f']+list(map(str, range(0, 10))), size=8)\n if path is None:\n path = './logs/'+\"\".join(name)+'_'\n with open(path, \"wb\") as f:\n cloudpickle.dump(data, f)\n print(\"Saved at {}\".format(path))", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def save_metrics(\n target_dataframe, model_name, true_y, predicted_y, probability_y, split_data, series, batch_size=np.nan):\n tn, fp, fn, tp = confusion_matrix(true_y, predicted_y).ravel()\n precision, recall, _ = precision_recall_curve(true_y, probability_y)\n target_dataframe = target_dataframe.append(\n {\n 'model': model_name,\n 'tp': tp,\n 'fp': fp,\n 'tn': tn,\n 'fn': fn,\n 'bal_acc': balanced_accuracy_score(true_y, predicted_y),\n 'prec': precision_score(true_y, predicted_y),\n 'recall': recall_score(true_y, predicted_y),\n 'pr_auc': auc(recall, precision),\n 'f1': f1_score(true_y, predicted_y),\n 'mc_coef': matthews_corrcoef(true_y, predicted_y),\n 'batch_s': batch_size,\n 'data': split_data,\n 'series': series,\n 'probability_y': probability_y,\n 'true_y': true_y\n },\n ignore_index=True\n )\n\n return target_dataframe", "def save_ttest_metrics(self, ttest_metrics, fname, no_genes=20):\n\n top_genes = self.fetch_gene_descriptions(ttest_metrics, nih_fetch_num=no_genes, printme=False)\n eids = [int(i[0]) for i in top_genes]\n myfig = self.effect_size_distr(ttest_metrics, genes_of_interest=eids[0:no_genes], return_fig=True)\n plt.savefig(fname+'.png')\n\n with open(fname+'.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for i in top_genes:\n writer.writerow([i[0], i[3], i[1], i[2], i[4]])", "def write_metrics(output_dir, metrics, config, ancestors):\n os.makedirs(output_dir, exist_ok=True)\n\n file_name = \"metrics.csv\"\n file_path = os.path.join(output_dir, file_name)\n\n with open(file_path, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n csv_writer = csv.writer(csvfile)\n for line in metrics.items():\n csv_writer.writerow(line)\n\n record_provenance(file_path, config, ancestors)", "def _save_model(self, epoch, batch, logs):\n self.save(self._get_file_path(epoch, batch, logs))", "def store_metrics_to_params(self):\n\n model = self.model_name\n\n if self.stats_path.exists():\n with open(self.stats_path, \"rb\") as f:\n stats_dict = pickle.load(f)\n else:\n stats_dict = {}\n\n if model not in stats_dict:\n stats_dict[model] = defaultdict(list)\n\n stats_dict[model]['amine'].append(self.amine)\n stats_dict[model]['accuracies'].append(self.metrics['accuracies'])\n stats_dict[model]['confusion_matrices'].append(\n self.metrics['confusion_matrices'])\n stats_dict[model]['precisions'].append(self.metrics['precisions'])\n stats_dict[model]['recalls'].append(self.metrics['recalls'])\n stats_dict[model]['bcrs'].append(self.metrics['bcrs'])\n\n # Save this dictionary in case we need it later\n with open(self.stats_path, \"wb\") as f:\n pickle.dump(stats_dict, f)", "def on_train_end(self, logs=None):", "def on_train_end(self, logs=None):" ]
[ "0.667636", "0.6626673", "0.63638973", "0.6311859", "0.6228164", "0.61837775", "0.6092719", "0.6020087", "0.59919375", "0.59843105", "0.59331805", "0.58579373", "0.58464634", "0.58147526", "0.580968", "0.57692844", "0.57684577", "0.5746049", "0.5712529", "0.56523967", "0.56354636", "0.5634788", "0.55812687", "0.5575134", "0.5514359", "0.5512781", "0.5509569", "0.55067974", "0.54975545", "0.54975545" ]
0.72578466
0
Check if MLflow metrics dataset exists.
def _exists(self) -> bool: client = MlflowClient() all_metrics = client._tracking_client.store.get_all_metrics( run_uuid=self.run_id ) return any(self._is_dataset_metric(x) for x in all_metrics)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_metrics(charm):\n metricsyaml = \"{}/{}/metrics.yaml\".format(\n get_layer_path(),\n charm,\n )\n if os.path.exists(metricsyaml):\n return True\n return False", "def __check_default_metrics_exist(self):\n return_var = False\n if Metric.objects.count() == len(project_constants.VOTE_METRICS_LIST):\n # default metrics exists\n return_var = True\n\n return return_var", "def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0", "def dataset_exists(es_url, id, es_index=\"grq\"):\n\n total, id = check_dataset(es_url, id, es_index)\n if total > 0:\n return True\n return False", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))", "def check_existing_dataset(path: str):\n x_path = os.path.join(path, IMG_DIR)\n y_path = os.path.join(path, MSK_DIR)\n\n if os.path.isdir(x_path) and os.path.isdir(y_path):\n _, _, x_files = next(os.walk(x_path))\n _, _, y_files = next(os.walk(y_path))\n x = len(x_files)\n y = len(y_files)\n\n if x != y:\n logger.warning(\n \"Found un-even numbers of x-y for dataset. x = %i, y = %i.\", x, y\n )\n\n return -1\n\n if x == 0:\n logger.info(\"Found 0 existing sets.\")\n\n return 0\n logger.info(\"Found %s sets in existing dataset.\", x)\n\n return x\n logger.error(\"Could not locate x and y folder.\")\n sys.exit()", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def dataset_exists(dataset_reference, client):\n from google.cloud.exceptions import NotFound\n\n try:\n client.get_dataset(dataset_reference)\n return True\n except NotFound:\n return False", "def metrics_manager(self):\n return_var = False\n # check if default metrics already exist\n if not self.__check_default_metrics_exist():\n # default metrics must be created\n self.__create_default_metrics()\n return_var = True\n\n return return_var", "def is_dataset(self):\n return self._dataset is not None", "def is_empty(self):\n for key, dataset in self.datasets.items():\n try:\n has_data = dataset.has_data()\n except MFDataException as mfde:\n raise MFDataException(\n mfdata_except=mfde,\n model=self._container_package.model_name,\n package=self._container_package._get_pname(),\n message=\"Error occurred while verifying\"\n ' data of dataset \"{}\" in block '\n '\"{}\"'.format(dataset.structure.name, self.structure.name),\n )\n\n if has_data is not None and has_data:\n return False\n return True", "def exists(self):\n return len(list(self.measures)) > 0", "def _is_dataset_metric(self, metric: mlflow.entities.Metric) -> bool:\n return self._prefix is None or (\n self._prefix and metric.key.startswith(self._prefix)\n )", "def _DatasetExists(dataset_id, project_id):\n client = GetApiClient()\n service = client.datasets\n get_request_type = GetApiMessage('BigqueryDatasetsGetRequest')\n get_request = get_request_type(datasetId=dataset_id, projectId=project_id)\n try:\n service.Get(get_request)\n return True\n except apitools_exceptions.HttpNotFoundError:\n log.info('Dataset with id [{}:{}] not found.'.format(\n project_id, dataset_id))\n\n return False", "def exists(self, name):\n assert name, \"Must input a valid dataset name.\"\n return name in self.manager.data[\"dataset\"]", "def has_datapoint_with_metric_name(fake_ingest, metric_name):\n for datapoint in fake_ingest.datapoints:\n if datapoint.metric == metric_name:\n return True\n return False", "def exists(dtype, name, rootdir=None):\n return FreezableAPI.to_slug(dtype,name) in FreezableAPI.datasets(rootdir=rootdir)", "def _warn_for_missing_datasets(self, datasets: set[str]):\n any_missing = False\n for ds in datasets:\n if not self.frames.has_dataset(ds):\n any_missing = True\n logger.warn(f'dataset \"{ds}\" is not in the database')\n if any_missing:\n logger.warn(f\"datasets in the databse: {self.all_datasets()}\")", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def has_datamask(self):\n return self.datamask is not None", "def _check_before_run(self):\r\n if not os.path.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def _is_dataset_path(ds_path: github_api.GithubPath) -> bool:\n return ds_path.is_dir() and (ds_path / f'{ds_path.name}.py').exists()", "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))", "def validate_dataset(self):\n pass", "def check_metrics(self, submission_id, exists, type_file):\n post_json = {\"submission_id\": submission_id}\n response = self.app.post_json(\"/v1/error_metrics/\", post_json, headers={\"x-session-id\": self.session_id})\n\n self.assertEqual(response.status_code, 200)\n\n type_file_length = len(response.json[type_file])\n if exists:\n self.assertGreater(type_file_length, 0)\n else:\n self.assertEqual(type_file_length, 0)", "def is_dataset_created(path, suffix=\"\"):\n dataset_id = None\n try:\n with open(\"%s%sdataset%s\" % (path, os.sep, suffix)) as dataset_file:\n dataset_id = dataset_file.readline().strip()\n try:\n dataset_id = bigml.api.get_dataset_id(dataset_id)\n return True, dataset_id\n except ValueError:\n return False, None\n except IOError:\n return False, None", "def datasetAvailable(self):\n dset = None\n try:\n dset = self._getcopy()\n except Exception:\n pass\n\n if dset is not None:\n self._parent.destroyDset(dset)\n return True\n return False", "async def check_metrics(self, application, task=None):\n if has_metrics(self.charms[application]['name']):\n debug(\"Collecting metrics for {}\".format(application))\n\n metrics = await self.n2vc.GetMetrics(\n self.ns_name,\n application,\n )\n\n return await self.verify_metrics(application, metrics)", "def has_data_flow(self) -> bool:\n return self.graph_count and not self.data_flow_null_count" ]
[ "0.7121421", "0.6643055", "0.6265092", "0.6205776", "0.61936194", "0.6130659", "0.60995066", "0.60854906", "0.5931938", "0.5857567", "0.5838012", "0.5831367", "0.5810273", "0.57931703", "0.5787611", "0.57371074", "0.56733835", "0.56578803", "0.56324285", "0.5589755", "0.5568248", "0.55554444", "0.5531209", "0.55041265", "0.5501954", "0.5491559", "0.54768026", "0.54467237", "0.5426081", "0.54075897" ]
0.78530204
0
Check if given metric belongs to dataset.
def _is_dataset_metric(self, metric: mlflow.entities.Metric) -> bool: return self._prefix is None or ( self._prefix and metric.key.startswith(self._prefix) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_datapoint_with_metric_name(fake_ingest, metric_name):\n for datapoint in fake_ingest.datapoints:\n if datapoint.metric == metric_name:\n return True\n return False", "def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))", "def _exists(self) -> bool:\n client = MlflowClient()\n all_metrics = client._tracking_client.store.get_all_metrics(\n run_uuid=self.run_id\n )\n return any(self._is_dataset_metric(x) for x in all_metrics)", "def is_dataset(self):\n return self._dataset is not None", "def exists(self, name):\n assert name, \"Must input a valid dataset name.\"\n return name in self.manager.data[\"dataset\"]", "def _dataset_match(geno, dataset):\n return all(dataset[k] == v for (k, v) in _dataset_fields(geno).items())", "def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0", "def dataset_exists(es_url, id, es_index=\"grq\"):\n\n total, id = check_dataset(es_url, id, es_index)\n if total > 0:\n return True\n return False", "def check_metric(self, metric):\r\n\r\n if metric in metric_functions or metric == '':\r\n return metric\r\n else:\r\n raise InvalidNeuralNetwork()", "def exists(dtype, name, rootdir=None):\n return FreezableAPI.to_slug(dtype,name) in FreezableAPI.datasets(rootdir=rootdir)", "def is_dataset(obj):\n return isinstance(obj, (DictDataset, ImageDataset, LabeledImageDataset,\n TupleDataset, DatasetMixin))", "def has_dimension(self, dim):\n\n return self.units.dimensions == dim", "def has(self, id_):\n with self._db_connection() as connection:\n return connection.contains_dataset(id_)", "def _DatasetExists(dataset_id, project_id):\n client = GetApiClient()\n service = client.datasets\n get_request_type = GetApiMessage('BigqueryDatasetsGetRequest')\n get_request = get_request_type(datasetId=dataset_id, projectId=project_id)\n try:\n service.Get(get_request)\n return True\n except apitools_exceptions.HttpNotFoundError:\n log.info('Dataset with id [{}:{}] not found.'.format(\n project_id, dataset_id))\n\n return False", "def __contains__(self, item):\n return item in self.default_dataset", "def is_dataset(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = [\"data_vars\", \"coords\", \"dims\", \"to_array\"]\n\n return all([hasattr(X, name) for name in require_attrs])", "def has_metrics(charm):\n metricsyaml = \"{}/{}/metrics.yaml\".format(\n get_layer_path(),\n charm,\n )\n if os.path.exists(metricsyaml):\n return True\n return False", "def is_valid(self, dataset):\n pass", "def isDataLabel(self, label):\n\n if label in self._data:\n return True\n else:\n try:\n return self._getData(label) is not None\n except:\n return False", "def dataset_exists(dataset_reference, client):\n from google.cloud.exceptions import NotFound\n\n try:\n client.get_dataset(dataset_reference)\n return True\n except NotFound:\n return False", "def crs_is_metric(gdf):\n units = str(gdf_get_projection_unit(gdf)).strip().lower()\n if units in ['\"meter\"', '\"metre\"', \"'meter'\", \"'meter'\",\n 'meter', 'metre']:\n return True\n else:\n return False", "def has_metric(self, metric_name):\n found = self._cache_has(metric_name)\n if not found:\n with self._accessor_lock:\n found = self._accessor.has_metric(metric_name)\n if found:\n # The metric was found in the database but not cached, let's\n # cache it now.\n metric = self.get_metric(metric_name)\n self._cache_set(metric_name, metric)\n return found", "def has_datapoint_with_dim(fake_ingest, key, value):\n return has_datapoint_with_all_dims(fake_ingest, {key: value})", "def _cache_has(self, metric_name):\n with self._lock:\n return metric_name in self.__cache", "def test_is_metric(self):\n self.assertTrue(METRIC_SYSTEM.is_metric)\n self.assertFalse(IMPERIAL_SYSTEM.is_metric)", "def _cache_has(self, metric_name):\n pass", "def isunique(cls, dataset, dim, per_geom=False):\n try:\n return cls.isscalar(dataset, dim, per_geom)\n except TypeError:\n return cls.isscalar(dataset, dim)", "def is_metric_to_maximize(metric):\n if isinstance(metric, str):\n metric = [metric]\n if all(m in METRICS_TO_MAXIMIZE for m in metric):\n return True\n if all(m in METRICS_TO_MINIMIZE for m in metric):\n return False\n raise ValueError('Defined metrics %s are not compatible' % metric)", "def __check_default_metrics_exist(self):\n return_var = False\n if Metric.objects.count() == len(project_constants.VOTE_METRICS_LIST):\n # default metrics exists\n return_var = True\n\n return return_var", "def check_dataset_type(val, name='The hdf5 dataset', allow_none=False, print_value=True, location=''):\n none_msg = name + ' was not found in the hdf5 file at its location ' + location\n return check_type_value(val, name, h5py._hl.dataset.Dataset,\n allow_none=allow_none, print_value=print_value, none_msg=none_msg)" ]
[ "0.7180669", "0.685971", "0.6771512", "0.6482194", "0.6318453", "0.6209338", "0.6122321", "0.6078198", "0.6062199", "0.60171694", "0.6016336", "0.59766287", "0.5968776", "0.5964305", "0.5955861", "0.5951552", "0.59065855", "0.5861811", "0.5808055", "0.57722306", "0.5740384", "0.57162875", "0.5703883", "0.56489146", "0.56405604", "0.5628823", "0.55888253", "0.55641395", "0.5547064", "0.54718494" ]
0.79114664
0
Update metric in given dataset.
def _update_metric( metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {} ) -> MetricsDict: for metric in metrics: metric_dict = {"step": metric.step, "value": metric.value} if metric.key in dataset: if isinstance(dataset[metric.key], list): dataset[metric.key].append(metric_dict) else: dataset[metric.key] = [dataset[metric.key], metric_dict] else: dataset[metric.key] = metric_dict return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def update_metrics(self, metrics, predictions, labels):\n return", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def update(self, metric, loc):\n\n self._total_loc += loc\n for region in self._regions:\n region.update(metric, loc)", "def _UpdateDataSetValues( self ):\n pass", "def update_dataset(self, dataset, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.patch(uri, json=self._attribs(name, description))", "def update(self, ds, priority=DatasetActionPriority.DEFAULT):\n old_ds = Dataset.load(self._db, ds.id)\n config_diff = ConfigDiff.compare_configs(old_ds.config, ds.config)\n meta_diff = old_ds.meta != ds.meta\n\n if config_diff == ConfigDiff.INSTR_PARAMS_DIFF:\n self._post_sm_msg(ds=ds, action=DatasetAction.ADD, priority=priority, del_first=True)\n elif config_diff == ConfigDiff.NEW_MOL_DB:\n self._post_sm_msg(ds=ds, action=DatasetAction.ADD, priority=priority)\n elif config_diff == ConfigDiff.EQUAL and meta_diff:\n self._post_sm_msg(ds=ds, action=DatasetAction.UPDATE, priority=DatasetActionPriority.HIGH)\n else:\n self.logger.info('Nothing to update: %s %s', ds.id, ds.name)", "def UpdateSet(self, dataset):\r\n for data in dataset:\r\n self.UpdateOddsRatioVsNoNorm(data)", "def _update(self, datapoints):\r\n if len(datapoints) == 1:\r\n timestamp, value = datapoints[0]\r\n whisper.update(self.path, value, timestamp)\r\n else:\r\n whisper.update_many(self.path, datapoints)", "def test_update_derived_metric(self):\n pass", "def update_dataset(\n self,\n dataset: DatasetDB,\n ) -> DatasetDB:\n dataset_id = dataset.id\n\n self._es.update_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset_id,\n document=self._dataset_to_es_doc(dataset),\n )\n return dataset", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n raise NotImplementedError", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def _update_data(self, selected):\n if selected.row() != self.datasets.index:\n self.datasets.index = selected.row()\n self.datasets.update_current()\n self._update_main()", "def update(self, data):\n self.data.update(data)", "def updateStatistics(self, dataset, metadata):\r\n\r\n col = self.scope[0]\r\n number_null_values = round(self.null_value_prob * self.cardinality)\r\n self.cardinality += 1\r\n\r\n if (dataset[col] is None):\r\n self.null_value_prob = (self.null_value_prob * (self.cardinality - 1) + 1) / self.cardinality\r\n else:\r\n if self.null_value_prob != 0:\r\n self.null_value_prob = (self.null_value_prob * (self.cardinality - 1)) / self.cardinality\r\n\r\n self.mean = 1 / (self.cardinality - number_null_values) * (\r\n dataset[col] + (self.cardinality - number_null_values - 1) * self.mean)\r\n\r\n self.square_mean = 1 / (self.cardinality - number_null_values) * (\r\n dataset[col] ** 2 + (self.cardinality - number_null_values - 1) * self.square_mean)\r\n\r\n if dataset[col] != 0:\r\n self.inverted_mean = 1 / self.cardinality * (\r\n (self.cardinality - 1) * self.inverted_mean + 1 / dataset[col])\r\n\r\n if dataset[col] != 0:\r\n self.inverted_square_mean = 1 / self.cardinality * (\r\n (self.cardinality - 1) * self.inverted_square_mean + 1 / (dataset[col] * dataset[col]))\r\n\r\n # prob_sum\r\n #\r\n _calculate_new_probability_sum(self, dataset[col])\r\n _update_context_no_unique_values(metadata, col,\r\n self.unique_vals)\r\n\r\n return True", "def update(self, name, cache_dir=None, data_dir=None, tasks=None):\n assert name, \"Must input a valid dataset name.\"\n self.manager.update_data(name, cache_dir, data_dir, tasks)", "def update_unified_dataset(session: Session, project: MasteringProject) -> Operation:\n unified_dataset = unified.from_project(session, project)\n op = unified._apply_changes_async(session, unified_dataset)\n return operation.wait(session, op)", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def _cache_set(self, metric_name, metric):\n pass", "def update(self, dt):\n\n self.collecting(dt)", "def update(self, labels, preds):\n labels, preds = check_label_shapes(labels, preds, True)\n\n for label, pred in zip(labels, preds):\n self.metrics.update_binary_stats(label, pred)\n\n if self.average == \"macro\":\n self.sum_metric += self.metrics.fscore\n self.num_inst += 1\n self.metrics.reset_stats()\n else:\n self.sum_metric = self.metrics.fscore * self.metrics.total_examples\n self.num_inst = self.metrics.total_examples", "def update_record(self, d: dict) -> None:\n super().update_record(d)\n d.update(\n dataset_doses=str_list(self.doses),\n dataset_ns=str_list(self.ns),\n dataset_stdevs=str_list(self.stdevs),\n dataset_means=str_list(self.means),\n )", "def update_data():\n pass", "def update_job_metrics(self, job_id:int)->None:\n with connection.cursor() as cursor:\n cursor.execute(f\"SELECT update_job_metrics({job_id})\")\n ##TODO: this should return something ", "def update_data(self, newData):\r\n self.AllData = newData", "def update_weight(self,ctr,new_weight):\n self.sum1 -= self.data_set[ctr].weight\n self.data_set[ctr].weight = new_weight\n self.sum1 += new_weight" ]
[ "0.71797806", "0.63577366", "0.62742144", "0.62739", "0.6228634", "0.6181807", "0.6058167", "0.603701", "0.6003216", "0.60031444", "0.59537387", "0.59425193", "0.58586115", "0.5792442", "0.5785038", "0.57844263", "0.5765737", "0.57546055", "0.57347775", "0.57347775", "0.57347775", "0.57347775", "0.57100815", "0.56977594", "0.56941605", "0.5676207", "0.56553787", "0.5648246", "0.5622305", "0.5575395" ]
0.6611735
1
Build list of tuples with metrics. First element of a tuple is key, second metric value, third step. If MLflow metrics dataset has prefix, it will be attached to key.
def _build_args_list_from_metric_item( self, key: str, value: MetricItem ) -> Generator[MetricTuple, None, None]: if self._prefix: key = f"{self._prefix}.{key}" if isinstance(value, dict): return (i for i in [(key, value["value"], value["step"])]) if isinstance(value, list) and len(value) > 0: return ((key, x["value"], x["step"]) for x in value) raise DataSetError( f"Unexpected metric value. Should be of type `{MetricItem}`, got {type(value)}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def build_metrics_counter_data(count_metrics):\n return [{'name': name, 'delta': delta} for name, delta in iteritems(count_metrics)]", "def collect_metrics() -> Tuple[Dict[str, Dict[str, Any]], Dict[str, List[str]]]:\n metric_docs: Dict[str, Dict[str, Any]] = {}\n metrics_by_integration: DefaultDict[str, List[str]] = defaultdict(list)\n # Reverse to keep backwards-compatible behavior with old script that kept\n # the last metric seen.\n for metric_yaml_file in sorted(INTEGRATIONS_PATH.glob(\"*/metrics.yaml\")):\n if \"Example\" in str(metric_yaml_file):\n continue\n\n for metric_name, metric in (yaml.safe_load(metric_yaml_file.read_text(encoding=\"utf-8\")) or {}).items():\n metrics_by_integration[metric_yaml_file.parent.name].append(metric_name)\n\n if metric_name in metric_docs:\n # print(f\"WARNING metric {metric_name} is duplicated, info will be taken from first one processed only\")\n continue\n\n desc = \"\"\n if \"description\" in metric:\n desc = metric[\"description\"]\n del metric[\"description\"]\n metric_docs[metric_name] = {\"yaml\": metric, \"markdown\": desc}\n return metric_docs, dict(metrics_by_integration)", "def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def init_metric_dict(self, metrics=[\"\"], phases=[\"train\", \"val\"]):\n metric_dict = {phase: {metric: [] for metric in metrics} for phase in phases}\n return metric_dict", "def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics", "def metrics_group():", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results", "def get_metric_list(self) -> List[str]:\n ...", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def _log_metrics(self, logs, prefix, step):\r\n if logs is None:\r\n logs = {}\r\n\r\n # Group metrics by the name of their associated file writer. Values\r\n # are lists of metrics, as (name, scalar_value) pairs.\r\n logs_by_writer = {\r\n self._train_run_name: [],\r\n self._validation_run_name: [],\r\n }\r\n validation_prefix = 'val_'\r\n for (name, value) in logs.items():\r\n if name in ('batch', 'size', 'num_steps'):\r\n # Scrub non-metric items.\r\n continue\r\n if name.startswith(validation_prefix):\r\n name = name[len(validation_prefix):]\r\n writer_name = self._validation_run_name\r\n else:\r\n writer_name = self._train_run_name\r\n name = prefix + name # assign batch or epoch prefix\r\n logs_by_writer[writer_name].append((name, value))\r\n\r\n with context.eager_mode():\r\n with summary_ops_v2.always_record_summaries():\r\n for writer_name in logs_by_writer:\r\n these_logs = logs_by_writer[writer_name]\r\n if not these_logs:\r\n # Don't create a \"validation\" events file if we don't\r\n # actually have any validation data.\r\n continue\r\n writer = self._get_writer(writer_name)\r\n with writer.as_default():\r\n for (name, value) in these_logs:\r\n summary_ops_v2.scalar(name, value, step=step)", "def build_metrics_dict(node):\n\n # Initialize tensors\n n = 0\n n = _recv(n,node)\n keys = [[0 for j in range(8)] for i in range(n)] # max_seq_len for metric name is 8\n values = [0.0 for i in range(n)]\n higher_is_better = [0 for i in range(n)]\n\n # Read data\n keys = _recv(keys,node)\n values = _recv(values,node)\n higher_is_better = _recv(higher_is_better,node)\n\n # Reorganize output + decode dict keys\n orig_keys = [encode_string(key, string_to_int=False) for key in keys]\n values_dict = [{'value': float(v), 'higher_is_better': bool(higher_is_better[i])} for i, v in enumerate(values)]\n metrics = dict(zip(orig_keys,values_dict))\n num_instances = int(metrics.pop('num')['value'])\n\n result = None, metrics, num_instances\n \n return result", "def get_metric_fn_and_keys():\n\n def normalize_value(inst: dict):\n val = int(inst[\"output_layer\"][0])\n return tuple([val]) # returns a tuple.\n\n return normalize_value, [\"val\"] # key order must match.", "def __get_metrics_list(self):\n metrics = metrics_calculator.MetricsCalculator(self.processor)\n metric_list = []\n # Populate the list\n for key in metrics.get_raw_metrics().keys():\n name = metrics.get_raw_metrics()[key][\"NAME\"]\n formula = metrics.get_raw_metrics()[key][\"FORMULA\"]\n description = metrics.get_raw_metrics()[key][\"DESCRIPTION\"]\n metric = Metric(name, formula, description)\n metric_list.append(metric)\n return metric_list", "def metrics(self):\n return {**self.prepend_name_dict(self._prefixes[0], self._train_metrics),\n **self.prepend_name_dict(self._prefixes[1], self.validator.metrics)}", "def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]", "def init_metrics():\n metrics = defaultdict(list)\n metrics['best_acc'] = 0.0\n metrics['best_loss'] = float('inf')\n metrics['best_epoch'] = 0\n return metrics", "def create_metric_keys(\n thresholds: Sequence[float], metrics: List[str], metric_name: str,\n model_name: str, output_name: str, example_weighted: bool\n) -> Tuple[List[metric_types.MetricKey], Dict[float, Dict[\n str, metric_types.MetricKey]]]:\n keys = []\n metric_key_by_name_by_threshold = collections.defaultdict(dict)\n num_digits = _calculate_digits(thresholds)\n for threshold in thresholds:\n for metric in metrics:\n key = metric_types.MetricKey(\n name='%s/%s@%.*f' % (metric_name, metric, num_digits, threshold),\n model_name=model_name,\n output_name=output_name,\n example_weighted=example_weighted)\n keys.append(key)\n metric_key_by_name_by_threshold[threshold][metric] = key\n return keys, metric_key_by_name_by_threshold", "def get_metrics(metrics_keys: List[str]) -> List[Union[Metric, str]]:\n return [\n metrics_factory.get_metric(metric_key=metric_key) for metric_key in metrics_keys\n ]", "def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics", "def compute_metrics(self, x, extra=None):\n if self.__metrics is None and extra is None:\n return None\n\n ret = {}\n if self.__metrics is not None:\n for m in self.__metrics:\n ret[m.name] = self._mdmetric(x, m)\n\n if extra is not None and extra.name not in ret:\n ret[extra.name] = self._mdmetric(x, extra)\n\n return ret", "def get_metrics(self, slug_list):\n # meh. I should have been consistent here, but I'm lazy, so support these\n # value names instead of granularity names, but respect the min/max\n # granularity settings.\n keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']\n key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}\n keys = [key_mapping[gran] for gran in self._granularities()]\n\n results = []\n for slug in slug_list:\n metrics = self.r.mget(*self._build_keys(slug))\n if any(metrics): # Only if we have data.\n results.append((slug, dict(zip(keys, metrics))))\n return results", "def build_metrics(input_workbook, metrics_worksheet_name, topic_name_prefix):\n wb = openpyxl.load_workbook(input_workbook, data_only=True, read_only=True)\n ws = wb[metrics_worksheet_name]\n\n result = {}\n for row in ws.iter_rows(min_row=2):\n name = row[0].value\n if not name:\n break\n\n description = row[1].value\n address = row[2].value\n size = row[3].value\n scaling_factor = row[4].value\n data_type = DATA_TYPE_STR_TO_ENUM[row[5].value]\n topic_name = '{}/{}'.format(topic_name_prefix, name)\n result[name] = model.Metric(\n name, description, address, size, scaling_factor, data_type, topic_name)\n\n return result", "def extract_metric_information(tag: str, metric: str, datapath: str):\n path = os.path.join(datapath, tag)\n\n checkpoint = torch.load(os.path.join(path, \"checkpoint.pt\"),\n map_location=torch.device('cpu'))\n validations = checkpoint['validations']\n epochs = list(validations.keys())\n values = [validations[i][metric] for i in validations.keys()]\n\n config = load_yml(os.path.join(path, \"configuration.yml\"))\n load_from = config.get('load_from', None)\n if load_from is not None:\n load_from = os.path.split(load_from)[1]\n\n # If the model was loaded from another checkpoint, recursively load the\n # parent checkpoints.\n if load_from is not None:\n v_prev, e_prev = extract_metric_information(load_from,\n metric,\n datapath)\n epochs = e_prev + epochs\n values = v_prev + values\n\n return values, epochs", "def training_metrics(self):\r\n if self._training_metrics is None:\r\n # Builds the per-task metrics and losses.\r\n self._training_metrics = {}\r\n for name, task in self.multi_task.tasks.items():\r\n self._training_metrics[name] = task.build_metrics(training=True)\r\n return self._training_metrics", "def summerize_adapter_metrics(parsed_metrics: Dict[int, dict]) -> Dict[Tuple[str, str], dict]:\n\n summarized_metrics = {}\n for lane in parsed_metrics:\n # Iterate over all samples in lane\n summarized_metrics[lane] = summarized_metrics.get(lane, {})\n for value in parsed_metrics[lane].values():\n sample_id = value.get(\"Sample_ID\")\n summarized_metrics[lane][sample_id] = summarized_metrics[lane].get(sample_id, value)\n summarized_metrics[lane][sample_id][\n \"R\" + value.get(\"ReadNumber\") + \"_SampleBases\"\n ] = value.get(\"SampleBases\")\n\n return summarized_metrics" ]
[ "0.6118578", "0.6038798", "0.6013908", "0.5998873", "0.5981428", "0.5951664", "0.58464813", "0.5808635", "0.57962924", "0.57889605", "0.5692853", "0.56794715", "0.5677968", "0.5665272", "0.56633407", "0.55949426", "0.55933595", "0.5555804", "0.5555385", "0.55325973", "0.55102974", "0.5459247", "0.54003334", "0.5368143", "0.5345549", "0.53392524", "0.5337575", "0.5334161", "0.53205514", "0.53195566" ]
0.6626808
0
Train CRF CEM recognizer.
def train_crf(ctx, input, output, clusters): click.echo('chemdataextractor.crf.train') sentences = [] for line in input: sentence = [] for t in line.split(): token, tag, iob = t.rsplit('/', 2) sentence.append(((token, tag), iob)) if sentence: sentences.append(sentence) tagger = CrfCemTagger(clusters=clusters) tagger.train(sentences, output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()", "def train(self, training_data, model_name):\n dataset = []\n for example in training_data:\n entity_offsets = self._convert_example(example)\n dataset.append(self._from_json_to_crf(example, entity_offsets))\n\n features = [self._sentence_to_features(s) for s in dataset]\n labels = [self._sentence_to_labels(s) for s in dataset]\n trainer = sklearn_crfsuite.CRF(\n algorithm=\"lbfgs\",\n # coefficient for L1 penalty\n c1=0.1,\n # coefficient for L2 penalty\n c2=0.1,\n # stop earlier\n max_iterations=50,\n # include transitions that are possible, but not observed\n all_possible_transitions=True,\n )\n trainer.fit(features, labels)\n logger.info(\"Creating Model for Intent %s\",model_name)\n joblib.dump(trainer, 'core/agent/model_files/%s.model' % model_name)\n return True", "def train_crf(threads=3, hyperparameter_crf='1.5', cut_off='5', alg='CRF-L2'):\n\n print '\\n\\ttrain crf'\n os.system('crf_learn -p ' + threads +\n ' -c ' + hyperparameter_crf +\n ' -f ' + cut_off +\n ' -a ' + alg +\n ' ' + template_file + ' ' + train_file_crf + ' ' + model_file)\n print '\\t--done\\n'", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train_clf(x_train, y_train, clf_model=\"decision_tree\"):\n clf = classifiers[clf_model]\n clf.fit(x_train, y_train)\n return clf", "def trainModel( self, featureTrain, classTrain):", "def train():\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n if self.input_col is None:\n raise Exception(\"Preprocessing not specified\")\n self.classifier_model.train(self.input_col, self.output_col)", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def face_recognition_train(self, data_dir='datasets', batch_size=32, img_height=128, img_width=128, epochs=10,\n model_path='model', pretrained=None, base_model_trainable=False):\n\n obj = train.Classifier(data_dir=data_dir, batch_size=batch_size, img_height=img_height,\n img_width=img_width, epochs=epochs, model_path=model_path, pretrained=pretrained,\n base_model_trainable=base_model_trainable)\n obj.start()", "def train(self, trnM, trnL):\n print 'Training ...'\n self.clf.fit(trnM, trnL)", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def _train_model(self, df_train):\n # type: (List[List[Tuple[Text, Text, Text, Text]]]) -> None\n import sklearn_crfsuite\n\n X_train = [self._sentence_to_features(sent) for sent in df_train]\n y_train = [self._sentence_to_labels(sent) for sent in df_train]\n\n from itertools import chain\n\n import nltk\n import sklearn\n import scipy.stats\n from sklearn.metrics import make_scorer\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import RandomizedSearchCV\n\n import sklearn_crfsuite\n from sklearn_crfsuite import scorers\n from sklearn_crfsuite import metrics\n\n X_train = [self._sentence_to_features(sent) for sent in df_train]\n y_train = [self._sentence_to_labels(sent) for sent in df_train]\n\n if self.component_config[\"grid_search\"]:\n self.ent_tagger = sklearn_crfsuite.CRF(\n algorithm='lbfgs',\n # stop earlier\n max_iterations=self.component_config[\"max_iterations\"],\n # include transitions that are possible, but not observed\n all_possible_transitions=True\n )\n self.ent_tagger.fit(X_train, y_train)\n\n params_space = {\n 'c1': scipy.stats.expon(scale=0.5),\n 'c2': scipy.stats.expon(scale=0.5),\n }\n labels = self.ent_tagger.classes_\n\n # use the same metric for evaluation\n f1_scorer = make_scorer(metrics.flat_f1_score,\n average='weighted', labels=labels)\n\n # search\n rs = RandomizedSearchCV(self.ent_tagger, params_space,\n cv=10,\n verbose=1,\n n_jobs=-1,\n n_iter=100,\n scoring=f1_scorer)\n rs.fit(X_train, y_train)\n print('best params:', rs.best_params_)\n print('best CV score:', rs.best_score_)\n print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))\n try:\n import json\n with open(\"tunning_score.json\", \"w\") as f:\n json.dump(rs.best_params_, f, sort_keys=True, indent=4)\n except Exception:\n pass\n self.ent_tagger = sklearn_crfsuite.CRF(\n algorithm='lbfgs',\n c1=rs.best_params_[\"c1\"],\n c2=rs.best_params_[\"c2\"],\n # stop earlier\n max_iterations=self.component_config[\"max_iterations\"],\n # include transitions that are possible, but not observed\n all_possible_transitions=True\n )\n else:\n print(\"L1_c\", self.component_config[\"L1_c\"])\n print(\"L2_c\", self.component_config[\"L2_c\"])\n self.ent_tagger = sklearn_crfsuite.CRF(\n algorithm='lbfgs',\n # coefficient for L1 penalty\n c1=self.component_config[\"L1_c\"],\n # coefficient for L2 penalty\n c2=self.component_config[\"L2_c\"],\n # stop earlier\n max_iterations=self.component_config[\"max_iterations\"],\n # include transitions that are possible, but not observed\n all_possible_transitions=True\n )\n\n self.ent_tagger.fit(X_train, y_train)", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def train(self):\n return", "def train(self):\n raise NotImplementedError", "def train(self):\n\t\traise NotImplementedError", "def train_classifier(train_faces, train_faces_ids):\n recognizer_lbph = cv2.face.LBPHFaceRecognizer_create()\n print('Training model in progress...')\n recognizer_lbph.train(train_faces, np.array(train_faces_ids))\n print('Saving...')\n recognizer_lbph.save('trainner.yml')\n print('Model training complete!')", "def do_training(self):\n json_data = request.data\n global g_list_of_classifier\n\n datas = json.loads(json_data.decode('UTF-8')) #datas = liste\n\n for ite_clf in g_list_of_classifier:\n for data in datas:\n ite_clf.add_data(data['score'], data['answer'])\n print(ite_clf.get_info())\n return ''", "def train(self, ):\n raise NotImplementedError", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)" ]
[ "0.6329142", "0.63131255", "0.62282646", "0.6179476", "0.61736995", "0.6069067", "0.60654396", "0.60244024", "0.60244024", "0.60244024", "0.60244024", "0.60244024", "0.60149896", "0.59746766", "0.5974304", "0.58804363", "0.5876749", "0.58578265", "0.58444804", "0.58298576", "0.5816191", "0.5800663", "0.57994646", "0.5798144", "0.57961017", "0.576009", "0.5744467", "0.5739044", "0.5732645", "0.57176596" ]
0.7064023
0
Creqte source datafram(data, columns) (can not be changed), cast columns to column.type, create datafram for view (may be changed)
def __init__(self, data, columns): super().__init__() self._filters = {} self._columns = [Column(column) for column in columns] self._source = pd.DataFrame(data, columns=[column.name for column in self._columns], dtype=str) # Change columns datatypes for name, type in [(column.name, column.type) for column in self._columns]: if type == 'NUMBER': self._source[name] = self._source[name].astype('float') elif type == 'INTEGER': # self._source[name] = self._source[name].round() self._source[name] = self._source[name].astype('float') self._source[name].fillna(float(0), inplace=True) self._source[name] = self._source[name].astype(int) elif type in ['DATE', 'DATETIME', 'TIME']: self._source[name] = pd.to_datetime(self._source[name]) elif type == 'BOOL': self._source[name] = self._source[name].apply(lambda x: str(x).upper() == 'TRUE').astype('bool') self._visable_columns = [column.name for column in self._columns if column.title is not None and column.visable == True] self._dataframe = self._source.loc[:, self._visable_columns]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def make_cn2_data(contibution_df):\n date_key = select_date_key.value\n cols = ['datetime', 'fixed_cn2_1', 'fixed_cn2_2', 'fixed_cn2_3',\n 'fixed_cn2_4', 'fixed_cn2_5', 'fixed_cn2_6']\n df = contibution_df[contibution_df['date_key'] == date_key]\n return ColumnDataSource(df)", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def create_dt(is_train=True):\n train = pd.read_csv(\"/Users/milesklingenberg/Documents/UWMSBA/590/Data/train_house-1.csv\", dtype=CAT_DTYPES)\n train = pd.DataFrame(train)\n for col, col_dtype in CAT_DTYPES.items():\n if col_dtype == \"category\":\n train[col] = train[col].astype('category').cat.codes\n\n #drop_columns = ['']\n #was going to drop columns\n\n dt = train\n\n return dt", "def sourceToDataframe(self):\n df = pd.read_excel(self.filename)\n df.columns = df.iloc[10]\n df = df.drop(df.index[:11])\n self.df = df #makes this df accessible to the whole class now\n self.insertODN()\n display(df.head())", "def transform_column_source_data(data):\n data_copy = {}\n for key in iterkeys(data):\n if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):\n data_copy[key] = transform_series(data[key])\n elif isinstance(data[key], np.ndarray):\n data_copy[key] = transform_array(data[key])\n else:\n data_copy[key] = traverse_data(data[key])\n return data_copy", "def _transform_df(self, data):\n # specify if has FIPS or not\n if self.has_location:\n loc_col_type = \"location\"\n elif not self.has_location:\n loc_col_type = \"location_name\"\n\n out = data.melt(\n id_vars=[\"dt\", loc_col_type], value_vars=self.crename.keys()\n ).dropna()\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"])\n out = self.extract_CMU(out, self.crename)\n out[\"vintage\"] = self._retrieve_vintage()\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n loc_col_type,\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n return out.loc[:, cols_to_keep]", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def caster(self):\n from ambry.transform import CasterTransformBuilder\n\n bdr = CasterTransformBuilder()\n\n for c in self.columns:\n bdr.append(c.name, c.python_type)\n\n return bdr", "def __init__(self, request, **kwargs):\n super(PSIHDReport, self).__init__(request, **kwargs)\n calculate_fn = lambda key, _: key[len(self.place_types) + 1]\n self.columns['demo_type'] = Column(\"Worker Type\", calculate_fn=calculate_fn)\n self.columns['demo_type'].view = FunctionView(calculate_fn=calculate_fn)\n self.function_views['demo_type'] = self.columns['demo_type'].view", "def create_source(data):\n # Masked values in integer columns show up as <NA> when exported to Pandas.\n # Such values seem to cause weird errors when displayed in bokeh, even when those rows\n # are filtered out with dropna(). So convert the column to float, which results\n # in masked values being NaN (np.nan) which are smoothly ignored by bokeh.\n data[\"num_rows\"] = data[\"num_rows\"].astype(float)\n\n # Create the Pandas DataFrame, with start time officially being a datetime.\n df = data[\"location\", \"start_time\", \"do_query_dur\", \"stream_to_file_dur\", \"num_rows\", \"base_name\", \"service_type\",\n \"ra\", \"dec\", \"sr\"].to_pandas().copy()\n df[\"dt_start_time\"] = pd.to_datetime(df[\"start_time\"], format='%Y-%m-%d %H:%M:%S.%f')\n\n # Create the bokeh data source from the data frame.\n source = ColumnDataSource(df)\n\n return source", "def _convert_to_interactive(key):\n df = _get_dataframe(key)\n if df is not None:\n return _data_table.DataTable(df)", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())", "def _create_column_data_source(self, confusion_array):\n cds = ColumnDataSource()\n df = pd.DataFrame(confusion_array).stack()\n old_x = df.index.droplevel(0).to_list() # one of the indexes astype(str).\n x = [self.label_mapping[ind] for ind in old_x]\n old_y = df.index.droplevel(1).to_list() # second of the indexes astype(str).\n y = [self.label_mapping[ind] for ind in old_y]\n values = df.to_list()\n\n cds.data = {\n self._x: x,\n self._y: y,\n self._values: values\n }\n\n return cds", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def create_source_cols(df, rafd=False):\n df = df.copy()\n\n if rafd:\n for name in ['source_name', 'target_name']:\n n = name.split('_')[0] + '_'\n\n df[n + 'pose'] = df[name].apply(lambda x: int(x.split('_')[0]) if (x != '-1_') else None)\n df[n + 'class'] = df[name].apply(lambda x: int(x.split('_')[1]) if (x != '-1_') else None)\n\n # I'm not quite sure if we will use gender or ethnicity, but it might be good to have\n df[n + 'ethnicity'] = df[name].apply(lambda x: x.split('_')[2] if (x != '-1_') else None)\n df[n + 'gender'] = df[name].apply(lambda x: x.split('_')[3] if (x != '-1_') else None)\n df[n + 'expression'] = df[name].apply(lambda x: x.split('_')[4] if (x != '-1_') else None)\n df[n + 'gaze'] = df[name].apply(lambda x: x.split('_')[5].split('.')[0] if (x != '-1_') else None)\n return df\n else:\n for name in ['source_name', 'target_name']:\n n = name.split('_')[0] + '_'\n\n df[n + 'class'] = df[name].apply(lambda x: int(x.split('_')[0]) if (x != '-1_') else None)\n df[n + 'session'] = df[name].apply(lambda x: float(x.split('_')[1]) if (x != '-1_') else None)\n\n df[n + 'pose'] = df[name].apply(lambda x: float(x.split('_')[2]) if (x != '-1_') else None)\n df[n + 'illumination'] = df[name].apply(lambda x: float(x.split('_')[3]) if (x != '-1_') else None)\n df[n + 'expression'] = df[name].apply(lambda x: float(x.split('_')[4]) if (x != '-1_') else None)\n\n\n df[n + 'pitch'] = df[name].apply(lambda x: float(x.split('_')[5][1:]) if (x != '-1_') else None)\n df[n + 'yaw'] = df[name].apply(lambda x: float(x.split('_')[6][1:]) if (x != '-1_') else None)\n df[n + 'roll'] = df[name].apply(lambda x: float(x.split('_')[7].split('.')[0][1:]) if (x != '-1_') else None)\n\n # Illumination changed, modify this and uncomment the previous roll\n df[n + 'roll'] = df[name].apply(lambda x: float(x.split('_')[7][1:]) if (x != '-1_') else None)\n df[n + 'illum_augmented'] = df[name].apply(lambda x: float(x.split('_')[8][2:]) if (x != '-1_') else None)\n df[n + 'intensity_augmented'] = df[name].apply(lambda x: float(x.split('_')[8].split('.')[0][2:]) if (x != '-1_') else None)\n return df", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def _process_nlx_157874_1_view(self, raw, limit=None):\n\n src_key = 'tables'\n model = Model(self.graph)\n col = self.resources[src_key]['columns']\n with open(raw, 'r') as rawread:\n reader = csv.reader(rawread, delimiter='\\t', quotechar='\\\"')\n row = next(reader)\n if not self.check_fileheader(col, row):\n pass\n\n for row in reader:\n # head -1 dvp.pr_nlx_157874_1|tr '\\t' '\\n'|\n # sed \"s|\\(.*\\)|# \\1 = row[col.index('\\1')]|g\"\n\n morphology_term_id = row[col.index('morphology_term_id')].strip()\n # morphology_term_num = row[col.index('morphology_term_num')]\n morphology_term_label = row[col.index('morphology_term_label')].strip()\n morphology_term_url = row[col.index('morphology_term_url')].strip()\n # terminology_category_label = row[\n # col.index('terminology_category_label')]\n # terminology_category_url = row[col.index('terminology_category_url')]\n # subcategory = row[col.index('subcategory')]\n objective_definition = row[col.index('objective_definition')].strip()\n subjective_definition = row[col.index('subjective_definition')].strip()\n comments = row[col.index('comments')].strip()\n synonyms = row[col.index('synonyms')].strip()\n replaces = row[col.index('replaces')].strip()\n small_figure_url = row[col.index('small_figure_url')].strip()\n large_figure_url = row[col.index('large_figure_url')].strip()\n # e_uid = row[col.index('e_uid')]\n # v_uid = row[col.index('v_uid')]\n # v_uuid = row[col.index('v_uuid')]\n # v_lastmodified = row[col.index('v_lastmodified')]\n # v_status = row[col.index('v_status')]\n # v_lastmodified_epoch = row[col.index('v_lastmodified_epoch')]\n\n # Add morphology term to graph as a class\n # with label, type, and description.\n model.addClassToGraph(\n morphology_term_id,\n morphology_term_label,\n blv.terms['PhenotypicFeature']\n )\n\n # Assemble the description text\n\n if subjective_definition != '' and not (\n re.match(r'.+\\.$', subjective_definition)):\n # add a trailing period.\n subjective_definition = subjective_definition + '.'\n if objective_definition != '' and not (\n re.match(r'.+\\.$', objective_definition)):\n # add a trailing period.\n objective_definition = objective_definition + '.'\n\n definition = ' '.join(\n (objective_definition, subjective_definition))\n\n model.addDefinition(morphology_term_id, definition,\n class_category=blv.terms['PhenotypicFeature'])\n\n # <term id> FOAF:depicted_by literal url\n # <url> type foaf:depiction\n\n # do we want both images?\n # morphology_term_id has depiction small_figure_url\n if small_figure_url != '':\n model.addDepiction(morphology_term_id, small_figure_url)\n\n # morphology_term_id has depiction large_figure_url\n if large_figure_url != '':\n model.addDepiction(morphology_term_id, large_figure_url)\n\n # morphology_term_id has comment comments\n if comments != '':\n model.addComment(morphology_term_id, comments)\n\n for syn in synonyms.split(';'):\n model.addSynonym(\n morphology_term_id,\n syn.strip(),\n self.globaltt['has_exact_synonym']\n )\n\n # morphology_term_id has_related_synonym replaces (; delimited)\n if replaces not in ['', synonyms]:\n for syn in replaces.split(';'):\n syn.strip()\n if syn != '':\n model.addSynonym(\n morphology_term_id,\n syn,\n self.globaltt['has_related_synonym']\n )\n\n # <morphology_term_id> <foaf:page> morphology_term_url\n if morphology_term_id is not None:\n reference = Reference(\n self.graph, morphology_term_id, self.globaltt['web page'])\n\n # TEC 201905:\n # Not so sure we need explicit <eom_uri> <webpage> <eom_url>.\n # since <eom_uri> IS the <eom_url>.\n\n reference.addPage(morphology_term_id, morphology_term_url)\n\n if limit is not None and reader.line_num > limit:\n break", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def collect_data(data_file):\n dat = Table.read(data_file, format='fits')\n df_bytes = dat.to_pandas() # Convert to pandas dataframe\n df = pd.DataFrame() # Init empty dataframe for converted types\n\n # Convert byte columns to strings\n for column in df_bytes:\n if df_bytes[column].dtype == np.dtype('object'):\n df[column + \"_str\"] = df_bytes[column].str.decode(\"utf-8\")\n df[column] = df[column + \"_str\"].copy(deep=True)\n df.drop(column + \"_str\", axis=1, inplace=True)\n else:\n df[column] = df_bytes[column]\n # Drop infinity values.\n df = df[~df.isin([np.inf, -np.inf]).any(1)]\n return df", "def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()", "def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}", "def _populate_dataframe(index, columns, default_dict, dtype):\n new_df = pd.concat(\n [pd.Series(default_dict[fieldname],\n index=index,\n name=fieldname).astype(dt[1])\n for fieldname, dt in zip(columns, dtype.descr)],\n axis=1\n )\n return new_df", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def df2ds(df,inputs,target):\n cats = set([object])\n in_tables = {}\n table = None\n inputs = inputs if isinstance(inputs,list) else [inputs]\n for i in inputs:\n if df[i].dtype in cats:\n unique = df[target].unique()\n in_tables[i] = dict(zip(unique,range(len(unique))))\n if df[target].dtype in cats:\n unique = df[target].unique()\n table = dict(zip(unique,range(len(unique))))\n return DataSet({\n 'inputs': [ array2pt( [ df[i][r] if i not in in_tables else in_tables[i][df[i][r]] for i in inputs ] ) for r in range(df.shape[0]) ] ,\n 'targets':[ array2pt([table[v]]) if table else [v] for v in df[target] ]\n })", "def _get_data_from_view(self):\n self.log.info(\"Getting data from view: vw_AllSurveyData \")\n view_data = self.db.execute_pandas_query(self._get_query('vw_survey_data'))\n self._export_data_to_csv(view_data, 'fresh_survey_data.csv')", "def init_columns(cycle_df, datatype):\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n assert type(cycle_df) == pd.DataFrame\n assert volt_col in cycle_df.columns\n assert dis_cap_col in cycle_df.columns\n assert char_cap_col in cycle_df.columns\n\n cycle_df = cycle_df.reset_index(drop=True)\n cycle_df['dV'] = None\n cycle_df['Discharge_dQ'] = None\n cycle_df['Charge_dQ'] = None\n #cycle_df['Discharge_dQ/dV'] = None\n #cycle_df['Charge_dQ/dV'] = None\n return cycle_df" ]
[ "0.64389884", "0.5948591", "0.5881242", "0.58062464", "0.57837504", "0.57338804", "0.56620854", "0.5654127", "0.5632112", "0.5579484", "0.557895", "0.5526446", "0.5522663", "0.55082947", "0.5498815", "0.5490089", "0.54709536", "0.5469522", "0.5448788", "0.54474294", "0.54463786", "0.54457855", "0.54416627", "0.54399467", "0.5433302", "0.5426678", "0.5377635", "0.5377573", "0.5357666", "0.53560114" ]
0.60024875
1
Get row index in source dataframe
def get_source_row(self, row): return self._dataframe.index[row]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_source_index(self, index: QModelIndex) -> QModelIndex:\n if 0 <= index.row() < len(self._dataframe.values):\n if 0 <= index.column() < len(self._dataframe.columns):\n row = self.get_source_row(index.row())\n column = self.get_source_column(index.column())\n return self.index(row, column)\n return QModelIndex()", "def _get_row_index(self, row: Row) -> int:\n row_index = -1\n for index, table_row in enumerate(self.table_data):\n if table_row.values == row.values:\n row_index = index\n break\n return row_index", "def get_rows(df):\n return df.shape[0]", "def get_row_index(self):\n for row in range(self.model.rowCount()):\n name_item = self.model.item(row, self.COL_NAME)\n fullpath = name_item.data(self.ROLE_FULLPATH)\n if fullpath == self.filepath:\n return row", "def get_index(df, index='date_time'): \n for i, full in enumerate(df.axes):\n if full.name == index:\n return (i, full)", "def _row_index(res, number_of_rows):\n row_index = int(res[:-1]) - 1\n assert row_index >= 0\n assert row_index < number_of_rows\n return row_index", "def get_row(self):\n return self._row_number", "def intrinsic_index_calc(df: pd.DataFrame):\n\n cur_index = 0\n df['Int_index'] = None\n df['Int_index'].iloc[0] = cur_index\n for i in range(len(df)):\n if df['Int_event'][i] in [-1, 1, -2, 2]:\n cur_index = cur_index + 1\n df['Int_index'].iloc[i] = cur_index\n\n return df", "def index_for_file (self):\n return self.family + '_' + self.filename # index for row of dataframe", "def __get_row(self, index: int) -> int:\n return index // self.columns", "def index(self):\n return self.frame.index", "def idx(self):\n return self._idx", "def get_index(df, string):\n return df.columns.to_list().index(string)", "def get_pd_row_column_idx(df, queries, type=\"column\"):\n\n names = df.columns.values if type == \"column\" else df.index.values if type == \"row\" else None\n sidx = np.argsort(names)\n Indices = sidx[np.searchsorted(names, queries, sorter=sidx)]\n\n return Indices", "def get_current_index(self):\r\n return self.contents_widget.currentRow()", "def index(self):\n return self.data.index", "def get_rownumber(self, first_col_val):\n\n try:\n (col_name, col_contents) = self.data[0]\n col_data = [col_name] + col_contents\n return col_data.index(first_col_val)\n except ValueError:\n return None", "def _index(data):\n\tif isinstance(data, (pd.Series, pd.DataFrame)):\n\t\treturn data.index\n\telse:\n\t\treturn pd.RangeIndex(len(data))", "def get_index(self, column):\r\n\r\n\t\treturn self.columns.index(column)", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def getObjectComponentIndexes(df):\n centroids = findClusterCenters(df)\n topYIndex = centroids.sort_values('y').index[-1]\n\n return df[df['label'] == topYIndex].index", "def index(self):\n return self.dataset.index", "def getSourceIndex(self):\n return self.sourceIndex", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def timestep_idx(self, timestep):\n timestep = pd.to_datetime(timestep)\n idx = np.where(self.time_index == timestep)[0][0]\n\n return idx", "def _index(self) -> int:\n return -1", "def get_row_id(self, index):\n try:\n return self._table.getRowId(index)\n except Registry.Table.INVALID_ROW:\n import traceback \n raise IndexError(\n \"Index %s out of bounds. Original exception: %s\" % \\\n (index, traceback.format_exc()))", "def index(self):\n return (self._data_dict.get('tab_index', -1), self._data_dict.get('index_in_tab', -1))", "def find_index(row):\n value = row[index]\n if value in seen:\n return seen[value]\n for row_ in merged.iter_dicts(True):\n if row_[index] == value:\n seen[value] = row_[\"index\"]\n return row_[\"index\"]\n return None", "def index(self) -> int:\r\n return self._index" ]
[ "0.7043844", "0.69685376", "0.67891747", "0.6734552", "0.66790116", "0.6637733", "0.65600115", "0.63639086", "0.63481534", "0.62835675", "0.6162552", "0.6137102", "0.6129762", "0.6109574", "0.6097899", "0.6087226", "0.6052882", "0.6049967", "0.60444957", "0.6033079", "0.6028892", "0.59986603", "0.59890145", "0.598807", "0.5964209", "0.59350824", "0.5917061", "0.59101725", "0.59064806", "0.5887157" ]
0.7654912
0
Check is column filtered
def hasFilter(self, column) -> bool: column_name = self._dataframe.columns[column] return column_name in self._filters.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_column(col, row):\n return col == column", "def check_cols_methane(name):\n return True if name in ['SampleDay', 'SampleHour', 'Decimal Year',\n 'Peak Area 1', 'Peak Area 2', 'Run median', 'Daily Median'] else False", "def filter_row(col, rw):\n return rw == row", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def visible(self):\r\n return self.column.visible", "def _has_filters(self):\n return self.query.has_filters()", "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]", "def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )", "def test_filter_comparison_func_false(self):\n\n num_props_original = len(self.test_table._odmldict)\n self.test_table.filter(comparison_func=lambda x, y: True, PropertyName='')\n self.assertEqual(len(self.test_table._odmldict), num_props_original)\n\n self.test_table.filter(comparison_func=lambda x, y: False, PropertyName='')\n self.assertEqual(len(self.test_table._odmldict), 0)", "def getFilter(self):\n col = self.filtercol.get()\n val = self.filtercolvalue.get()\n op = self.operator.get()\n booleanop = self.booleanop.get()\n return col, val, op, booleanop", "def has_column(self, column):\n if column == '*':\n return True\n for c in self.columns:\n if column == c.data.name:\n return True\n return False", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def has_filter(self) -> bool:\n return self.filter_client_reference_id or self.filter_mhr_number or self.filter_registration_type or \\\n self.filter_reg_start_date or self.filter_status_type or self.filter_submitting_name or \\\n self.filter_username", "def is_country_column_present_in_re_analysis_page(self):\n return self.is_specific_column_present(self.re_analysis_grid_div_id, self.column_name_country)", "def filter():\n return get_filter_data(db, MyTable)", "def check_col(self):\n return (set(map(lambda x: x.lower(),\n self.config['dtypes'])) -\n set(self.metadata.name.values))", "def checkIfColumnControlledVocab(self, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n valid_controlled_column=0\n db_output=con.cursor().callproc('check_if_column_controlled',\n [column_name.upper(),\\\n valid_controlled_column])\n if db_output[1]==0:\n return False\n else:\n return True\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )", "def row_filter(self) -> Optional[pulumi.Input['DataCellsFilterRowFilterArgs']]:\n return pulumi.get(self, \"row_filter\")", "def get_filter(self):\n if not self.has_changes and hasattr(self, \"filter_result\"):\n return self.filter_result\n negation = False\n list_input = False\n self.filter_result = None\n if self.input_val.value:\n lhs = 'self.data[\"{}\"]'.format(self.select_col.value)\n col_dtype = self.data_dict[\n self.data_dict.Columns == self.select_col.value\n ].Dtypes.values[0]\n col_values = self.data_dict[\n self.data_dict.Columns == self.select_col.value\n ].Values.values[0]\n func = CONDITIONOPERATIONS[self.condition.value]\n if \"isin\" in func:\n list_input = True\n if col_dtype in [DTYPES.category, DTYPES.string]:\n if \"!\" in func:\n func = func[1:]\n negation = True\n if not list_input and col_dtype == DTYPES.string:\n case = True\n if func[0] == \"i\":\n func = func[1:]\n case = False\n func = \"str.{}\".format(func)\n value = '\"{}\"'.format(self.input_val.value)\n if case is False:\n if func.split(\".\")[-1] in [\n COMPARATORS.startswith,\n COMPARATORS.endswith,\n ]:\n func = \"str.lower().\" + func\n value = value.lower()\n else:\n value = value + \", case=False\"\n elif list_input:\n value = self.input_val.value\n if isinstance(value, str):\n value = (\n self.input_val.value.replace(\", \", \",\")\n .replace(\" ,\", \",\")\n .split(\",\")\n )\n else:\n value = str(self.input_val.value)\n rhs = \".{}({})\".format(func, value)\n elif DTYPES.datetime in col_dtype:\n start_date, end_date = self.input_val.value\n if start_date > col_values[0]:\n self.filter_result = (\n \"(self.data['\"\n + self.select_col.value\n + \"']\"\n + \">= '\"\n + str(start_date)\n + \"'\"\n )\n if end_date < col_values[1]:\n if self.filter_result is None:\n self.filter_result = (\n \"(self.data['\"\n + self.select_col.value\n + \"']\"\n + \"<= '\"\n + str(end_date)\n + \"'\"\n )\n else:\n self.filter_result += (\n \") & (\"\n + \"self.data['\"\n + self.select_col.value\n + \"']<= '\"\n + str(end_date)\n + \"')\"\n )\n self.filter_result = \"(\" + self.filter_result\n self.filter_result += \")\"\n self.filter_result = eval(self.filter_result)\n return self.filter_result\n elif list_input and isinstance(self.input_val.value, list):\n rhs = f\".{func}({self.input_val.value})\"\n elif list_input:\n rhs = f\".{func}([{self.input_val.value}])\"\n else:\n if isinstance(self.input_val.value, list):\n input_value = str(self.input_val.value)[1:-1]\n else:\n input_value = str(self.input_val.value)\n rhs = CONDITIONOPERATIONS[self.condition.value] + input_value\n filter_str = (\"-\" if negation else \"\") + lhs + rhs\n self.filter_result = eval(filter_str)\n return self.filter_result", "def valid_column(self, col: int) -> bool:\n\n return self.check_bounds(0, col) and self.grid[0][col] == \" \"", "def are_there_available_columns_to_play(self):\n available_columns = self.get_available_columns()\n return self._state.n_neutral_markers != 3 and len(available_columns) > 0", "def _filter(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return row\n\n if 'cond' not in self._state:\n self._state['cond'] = self._replace_fields(self._args.cond)\n\n r = list(map(self._convert, row))\n if eval(self._state['cond']):\n return row", "def test_columns_not_in_raw_var(self):\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )", "def has_filter(self, param: str) -> bool:\n return param in self.filter_names", "def sql_filter_bool(my_table='', colName='', var='', **kw):\n\tif (my_table=='') or (colName=='') or (var==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tsql = my_table+\".\"+colName+\" == \"+var\n\t\treturn dict(sql=sql,clauseTables=[])", "def _column_exists(self, tbname, colname):\n self._check_file(tbname)\n tb = tbtool()\n tb.open(tbname)\n cols = tb.colnames()\n tb.close()\n return (colname in cols)", "def filter_columns(self, identifier, columns, names, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Filter columns by their identifier.\n df = dataset.to_dataframe()\n df = vizual.filter_columns(df=df, colids=columns, names=names)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations.filter(columns=columns)\n )\n return VizualApiResult(ds)", "def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()" ]
[ "0.714587", "0.6344972", "0.62104183", "0.6077036", "0.6077036", "0.60573", "0.6053506", "0.60512096", "0.60433084", "0.5973961", "0.59735787", "0.5946853", "0.59380823", "0.5930365", "0.5920199", "0.59102327", "0.5894116", "0.58357984", "0.58133674", "0.57480985", "0.57395756", "0.57298654", "0.57262254", "0.57093674", "0.56824225", "0.5656391", "0.56456774", "0.5642701", "0.5623312", "0.5616608" ]
0.6831352
1
Set filter value to column
def setFilter(self, column, value) -> None: if not self.hasFilter(column): column_name = self._dataframe.columns[column] self._filters[column_name] = value self._applyFilters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_or_update_filter(column, value, filter_type='eq', _filter=None):\n if _filter is None:\n _filter = {}\n\n _filter[column] = {filter_type: value}\n\n return _filter", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def filter(self, filter):\n self._filter = filter", "def set_FilterValue(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterValue', value)", "def updateFilterColumn(self, currentIndex):\n self.logsView.updateFilterColumn(currentIndex=currentIndex)", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def __setitem__(self, query_filter, value):\n saved_items = []\n for index, query in enumerate(self.__bound_queries):\n saved_items.append(query.get(query_filter, None))\n try:\n query[query_filter] = value\n except:\n for q, old_value in itertools.izip(self.__bound_queries[:index],\n saved_items):\n if old_value is not None:\n q[query_filter] = old_value\n else:\n del q[query_filter]\n raise", "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]", "def set_sensitive_to_filter(self, sensitive_name, sensitive_val):\n self.name += str(sensitive_val)\n self.sensitive_filter = sensitive_val\n self.sensitive_for_metric = sensitive_name", "def dbtrace_filter_change(filter_name_field):\n\n pass", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def set_filter(self, category, code):\n flt_setter = self.__filter_set_map.get(category, None)\n if flt_setter is not None:\n flt_setter(code)", "def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()", "def add_filter_entry(self, filter_column=None, filter_entry=None):\n new_filter_label = tkinter.Label(self.rightmostframe, text='Custom Column Filter:')\n new_filter_label.pack(pady=4)\n\n my_str = tkinter.StringVar()\n\n new_filter_columns = tkinter.OptionMenu(self.rightmostframe, my_str, *self.columns_list)\n if filter_column != None:\n my_str.set(filter_column)\n new_filter_columns.pack(pady=4)\n\n new_filter_entry = tkinter.Entry(self.rightmostframe)\n if filter_entry != None:\n new_filter_entry.insert(0, filter_entry)\n new_filter_entry.pack(pady=4)\n \n self.filter_entries_list.append((new_filter_entry, my_str))", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def _custom_filter(self, query):\r\n return query", "def _setParam(self, callerId, key, value):\n if key not in self.FilterParameters:\n self.__docWriter.addParam(callerId, key)", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def setColumn(self,item,column,value):\n raise UncodedError", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def set_ChangeFilter(self, iid, dv, dt, extrema = True):\n self.filters[iid] = ChangeFilter(self, dv, dt, extrema)", "def filter_resolution_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.resolution_grid_div_id, column_name, filter_item_text)", "def update_file_column(self,col,filter_safe=0):\n \n # invalid sheet catcher\n if self.sheet == 'blank':\n print('Error: please connect to an excel document first.')\n return\n \n # (x1,y1) are excel coordinates, (x2,y2) are dataframe coordinates\n # +2: numpy starts at 0 (+1), dataframe uses 1st column/row as indices (+1)\n y1 = int(np.where(self.df.columns == col)[0][0] + 2)\n y2 = col\n n = len(self.df.index)\n \n if filter_safe == 0: # vastly faster, but doesn't work if there are filters in the xl\n # newaxis because we need a 2D array to transpose it\n self.sheet.range((2,y1),(int(n),y1)).value = self.df[y2].fillna('#N/A').values[np.newaxis].T\n \n else: # 381x slower, but filling elementwise is filter-safe\n print('\\tWriting filter-safe data, please be patient... ',end=''),\n \n for x1 in range(2,n,1):\n x2 = self.sheet.range((x1,1)).value\n data = self.df.loc[x2][y2]\n \n if np.isnan(data):\n self.sheet.range((x1,y1)).value = '#N/A'\n else:\n self.sheet.range((x1,y1)).value = data\n print('Done')\n \n #save the changes so that read_excel can pull the current metrics data\n self.book.save()\n if type(col) == dtt.datetime:\n print('\\tWeek data updated.')\n elif type(col) == str:\n print('\\tColumn %s updated.' % col)\n else:\n print('\\tData updated.')", "def _initFilterTable(self):\n\n t = self.tableWidget_filter # shorthand notation\n\n ### Header population & properties\n t.setHorizontalHeaderLabels(self.data.filter_col_name_list)\n t.horizontalHeader().setMovable(True)\n\n ### Item population\n nRows = len(self.data.filter_spec)\n t.setRowCount(nRows)\n for (j, spec) in enumerate(self.data.filter_spec):\n for (i, filter_prop) in enumerate(self.data.filter_property_list):\n if filter_prop is not 'exclude':\n if filter_prop in spec[0]:\n item_string = spec[0][filter_prop]\n else:\n item_string = ''\n t.setItem(j,i,\n Qt.QTableWidgetItem(item_string))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsEnabled) # Make it editable\n else:\n t.setItem(j,i,Qt.QTableWidgetItem(''))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsUserCheckable|\n Qt.Qt.ItemIsEnabled) # Make it checkable\n if spec[1]: # exclusion flag\n t.item(j,i).setCheckState(Qt.Qt.Checked)\n else:\n t.item(j,i).setCheckState(Qt.Qt.Unchecked)\n\n\n\n ### Presentation formatting\n t.resizeColumnsToContents()\n for i in range(t.columnCount()):\n if t.columnWidth(i) > self.max_auto_adjust_column_width:\n t.setColumnWidth(i,self.max_auto_adjust_column_width)", "def filter_column(col, row):\n return col == column", "def _filter_model(self) -> None:\n indexes = self.tableView.selectedIndexes()\n if len(indexes) > 0:\n cell = self.tableView.model().itemData(indexes[0])\n if self.tableView.model().hasFilter(indexes[0].column()):\n self.tableView.model().resetFilter(indexes[0].column())\n if self._last_sorted_column != -1:\n # Resore last sort\n self.tableView.model().sort(self._last_sorted_column, self._last_sort_order)\n else:\n self.tableView.model().setFilter(indexes[0].column(), cell)\n idx = self.tableView.model().index(0, indexes[0].column())\n self.tableView.selectionModel().select(idx, QItemSelectionModel.ClearAndSelect)\n self.tableView.resizeColumnToContents(indexes[0].column())\n Cli3App.instance().updateMainWindiwSignal.emit()", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def _augment_filter(self, header):\n return header" ]
[ "0.6479343", "0.6327096", "0.6258252", "0.62564", "0.60858995", "0.6042315", "0.60086817", "0.59755254", "0.5903751", "0.58324903", "0.5778453", "0.57581186", "0.5745971", "0.57118183", "0.5702547", "0.5678945", "0.5658681", "0.5639756", "0.5639756", "0.56249976", "0.5568201", "0.5559566", "0.5557317", "0.5544242", "0.5529813", "0.5527344", "0.55228966", "0.5513001", "0.54912204", "0.5487422" ]
0.7826371
0
Reset filter value to column
def resetFilter(self, column): if self.hasFilter(column): column_name = self._dataframe.columns[column] del self._filters[column_name] self._applyFilters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def setFilter(self, column, value) -> None:\n if not self.hasFilter(column):\n column_name = self._dataframe.columns[column]\n self._filters[column_name] = value\n self._applyFilters()", "def _filter_model(self) -> None:\n indexes = self.tableView.selectedIndexes()\n if len(indexes) > 0:\n cell = self.tableView.model().itemData(indexes[0])\n if self.tableView.model().hasFilter(indexes[0].column()):\n self.tableView.model().resetFilter(indexes[0].column())\n if self._last_sorted_column != -1:\n # Resore last sort\n self.tableView.model().sort(self._last_sorted_column, self._last_sort_order)\n else:\n self.tableView.model().setFilter(indexes[0].column(), cell)\n idx = self.tableView.model().index(0, indexes[0].column())\n self.tableView.selectionModel().select(idx, QItemSelectionModel.ClearAndSelect)\n self.tableView.resizeColumnToContents(indexes[0].column())\n Cli3App.instance().updateMainWindiwSignal.emit()", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def updateFilterColumn(self, currentIndex):\n self.logsView.updateFilterColumn(currentIndex=currentIndex)", "def ResetAvgFilter(self):\n self.k = 1\n self.prevAvg = 0", "def reset_instances_filter(self):\n page_instances = self.page_instances()\n page_instances.field_filter_instances.value = ''\n page_instances.button_filter_instances.click()", "def do_reset(self, args):\n\t\tself.parent.filter = {}\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def reset(self):\n self.sample['masked'] = [False]*len(self.sample.index)\n self.sample['colour'] = ['undefined']*len(self.sample.index)", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def filter(self, filter):\n self._filter = filter", "def reset(self):\n for col in self._columns:\n col.reset()\n\n self._next_column = 0\n self._columns = []", "def highpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._highpass_sos)\n print('Zi shape: ', zi.shape, data.shape)\n self._highpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the high-pass filter state.')", "def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def clear_columns(self):\n self._columns = []\n return self", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def reset_s(self):\n self.s = np.copy(self.f_uniq) # (current) solution, selected column", "def filter_(self,fltr:torch.tensor):\n self.container = self.container[:,fltr]\n self.count_hist = self.count_hist[fltr]", "def removeAutoSaveFilter(filter):", "def _initFilterTable(self):\n\n t = self.tableWidget_filter # shorthand notation\n\n ### Header population & properties\n t.setHorizontalHeaderLabels(self.data.filter_col_name_list)\n t.horizontalHeader().setMovable(True)\n\n ### Item population\n nRows = len(self.data.filter_spec)\n t.setRowCount(nRows)\n for (j, spec) in enumerate(self.data.filter_spec):\n for (i, filter_prop) in enumerate(self.data.filter_property_list):\n if filter_prop is not 'exclude':\n if filter_prop in spec[0]:\n item_string = spec[0][filter_prop]\n else:\n item_string = ''\n t.setItem(j,i,\n Qt.QTableWidgetItem(item_string))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsEnabled) # Make it editable\n else:\n t.setItem(j,i,Qt.QTableWidgetItem(''))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsUserCheckable|\n Qt.Qt.ItemIsEnabled) # Make it checkable\n if spec[1]: # exclusion flag\n t.item(j,i).setCheckState(Qt.Qt.Checked)\n else:\n t.item(j,i).setCheckState(Qt.Qt.Unchecked)\n\n\n\n ### Presentation formatting\n t.resizeColumnsToContents()\n for i in range(t.columnCount()):\n if t.columnWidth(i) > self.max_auto_adjust_column_width:\n t.setColumnWidth(i,self.max_auto_adjust_column_width)", "def reset_values(self):\n\n self.values = []", "def __call__(self,row):\n filterType = row.pop(0)\n if filterType: #i.e. not none and >0\n result = self.dispatch[filterType](row,self.prev)\n self.prev = result\n return result\n else:\n self.prev = row\n return self.prev", "def lowpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._lowpass_sos)\n self._lowpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n data.shape[1], axis=2)\n logging.info('Resetting the low-pass filter state.')", "def remove_filter_field(self, field):\n if self.filters:\n category_filter = self.filters.get(str(field.category.id), None)\n\n if category_filter:\n field_filter = category_filter.pop(field.key, None)\n\n if field_filter:\n self.save()", "def reset_columns(self):\n\n reset_cols = [i for i in self.__cols if i in self.__df_timings.columns]\n self.__df_timings = self.__df_timings.loc[:, reset_cols]\n return" ]
[ "0.7546535", "0.65499115", "0.62967247", "0.6168715", "0.6093071", "0.59730977", "0.58553", "0.576923", "0.57680595", "0.56759405", "0.5591549", "0.5562137", "0.55558145", "0.55424154", "0.55133826", "0.55060047", "0.5501112", "0.54963195", "0.54875726", "0.54688084", "0.54570127", "0.54539204", "0.5448857", "0.543181", "0.5399913", "0.5364161", "0.5360075", "0.5356363", "0.53531724", "0.53512245" ]
0.71770364
1
Make visible dataframe appling all filters on source
def _applyFilters(self) -> None: self._dataframe = self._source.loc[:, self._visable_columns] for column, value in self._filters.items(): if value is not None: self._dataframe = self._dataframe[self._source[column] == value] else: self._dataframe = self._dataframe[self._source[column].isnull()] self.layoutChanged.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def __handle_filters(self, df) -> DataFrame:\n if not len(df):\n return df\n starting_df = df.copy()\n running_df = df\n for filter_ in self.filters:\n filter_value = filter_.value\n if filter_value is None:\n continue\n filter_condition = filter_.condition\n if filter_condition == FilterCondition.OR:\n df = starting_df\n else:\n df = running_df\n\n column_name = filter_.columnName\n operation = filter_.operation\n if operation == FilterOperation.TOP:\n df = df.sort_values(by=column_name, ascending=False, na_position='last').head(filter_value)\n elif operation == FilterOperation.BOTTOM:\n df = df.sort_values(by=column_name, ascending=True, na_position='last').head(filter_value)\n elif operation == FilterOperation.ABSOLUTE_TOP:\n df = df.reindex(df[column_name].abs().sort_values(ascending=False, na_position='last').index).head(\n filter_value)\n elif operation == FilterOperation.ABSOLUTE_BOTTOM:\n df = df.reindex(df[column_name].abs().sort_values(ascending=True, na_position='last').index).head(\n filter_value)\n elif operation == FilterOperation.EQUALS:\n if not isinstance(filter_value, list):\n filter_value = [filter_value]\n # Special case to handle different types of floats\n if isinstance(filter_value[0], str):\n df = df.loc[df[column_name].isin(filter_value)]\n else:\n # Add a tolerance for the special case to handle different types of floats\n df = df[np.isclose(df[column_name].values[:, None], filter_value, atol=1e-10).any(axis=1)]\n elif operation == FilterOperation.NOT_EQUALS:\n if not isinstance(filter_value, list):\n filter_value = [filter_value]\n if isinstance(filter_value[0], str):\n df = df.loc[~df[column_name].isin(filter_value)]\n else:\n # Add a tolerance for the special case to handle different types of float\n df = df[~np.isclose(df[column_name].values[:, None], filter_value, atol=1e-10).any(axis=1)]\n elif operation == FilterOperation.GREATER_THAN:\n df = df[df[column_name] > filter_value]\n elif operation == FilterOperation.LESS_THAN:\n df = df[df[column_name] < filter_value]\n elif operation == FilterOperation.LESS_THAN_EQUALS:\n df = df[df[column_name] <= filter_value]\n elif operation == FilterOperation.GREATER_THAN_EQUALS:\n df = df[df[column_name] >= filter_value]\n else:\n raise MqValueError(f'Invalid Filter operation Type: {operation}')\n\n if filter_.condition == FilterCondition.OR:\n # Need to merge the results\n running_df = running_df.merge(df, how='outer')\n else:\n running_df = df\n\n return running_df", "def start_pipeline(df):\n new_df = df.copy()\n new_df = new_df[[\"Title\", \"Genre\", \"Director\", \"Actors\", \"Plot\"]]\n return new_df", "def filter(self, filters):", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def _apply_filters(self, df):\n df = df[(df['Date'] >= self.start_date) &\n (df['Date'] <= self.end_date)]\n return df", "def __init__(self, data, columns):\n super().__init__()\n self._filters = {}\n self._columns = [Column(column) for column in columns]\n self._source = pd.DataFrame(data, columns=[column.name for column in self._columns], dtype=str)\n # Change columns datatypes\n for name, type in [(column.name, column.type) for column in self._columns]:\n if type == 'NUMBER':\n self._source[name] = self._source[name].astype('float')\n elif type == 'INTEGER':\n # self._source[name] = self._source[name].round()\n self._source[name] = self._source[name].astype('float')\n self._source[name].fillna(float(0), inplace=True)\n self._source[name] = self._source[name].astype(int)\n elif type in ['DATE', 'DATETIME', 'TIME']:\n self._source[name] = pd.to_datetime(self._source[name])\n elif type == 'BOOL':\n self._source[name] = self._source[name].apply(lambda x: str(x).upper() == 'TRUE').astype('bool')\n\n self._visable_columns = [column.name for column in self._columns if\n column.title is not None and column.visable == True]\n self._dataframe = self._source.loc[:, self._visable_columns]", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def filter(self, target_model):\n # return filter_dict_to_target_model(self._axl_data, target_model)\n super().__setattr__('_axl_data', filter_dict_to_target_model(self._axl_data, target_model))\n return self", "def add_append_filter(source, args, index):\n exclude_columns = args.get('append-exclude-columns%02d' % index, False)\n append_sources = []\n for subindex in range(1, 100):\n append_source = args.get('append-dataset%02d-%02d' % (index, subindex))\n if append_source:\n append_sources.append(append_source)\n row_query = args.get('append-where%02d' % index, None)\n return source.append(\n append_sources=append_sources,\n add_columns=(not exclude_columns),\n queries=row_query\n )", "def add_row_filter(source, args, index):\n queries = []\n for subindex in range(1, 6):\n query = args.get('select-query%02d-%02d' % (index, subindex))\n if query:\n queries.append(query)\n reverse = (args.get('select-reverse%02d' % index) == 'on')\n if reverse:\n return source.without_rows(queries)\n else:\n return source.with_rows(queries)", "def get_filters(self):", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def _screen(self, include=True, **kwargs):\n df = self.copy()\n for k, v in list(kwargs.items()):\n v = [v] if type(v) != list else v\n if include:\n df = df[df[k].str.contains('|'.join(v), flags=re.IGNORECASE).fillna(False)]\n else:\n df = df[df[k].str.contains('|'.join(v), flags=re.IGNORECASE).fillna(False) == False]\n return df", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def _initFilterTable(self):\n\n t = self.tableWidget_filter # shorthand notation\n\n ### Header population & properties\n t.setHorizontalHeaderLabels(self.data.filter_col_name_list)\n t.horizontalHeader().setMovable(True)\n\n ### Item population\n nRows = len(self.data.filter_spec)\n t.setRowCount(nRows)\n for (j, spec) in enumerate(self.data.filter_spec):\n for (i, filter_prop) in enumerate(self.data.filter_property_list):\n if filter_prop is not 'exclude':\n if filter_prop in spec[0]:\n item_string = spec[0][filter_prop]\n else:\n item_string = ''\n t.setItem(j,i,\n Qt.QTableWidgetItem(item_string))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsEnabled) # Make it editable\n else:\n t.setItem(j,i,Qt.QTableWidgetItem(''))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsUserCheckable|\n Qt.Qt.ItemIsEnabled) # Make it checkable\n if spec[1]: # exclusion flag\n t.item(j,i).setCheckState(Qt.Qt.Checked)\n else:\n t.item(j,i).setCheckState(Qt.Qt.Unchecked)\n\n\n\n ### Presentation formatting\n t.resizeColumnsToContents()\n for i in range(t.columnCount()):\n if t.columnWidth(i) > self.max_auto_adjust_column_width:\n t.setColumnWidth(i,self.max_auto_adjust_column_width)", "def merge_dfs(userdf, filtered_apidf):\n userdf['SOURCE']='USER'\n filtered_apidf['SOURCE']='API'\n filtered_apidf.rename(columns={'_id': 'bids_name'}, inplace=True)\n\n merged_df = pd.concat([userdf,filtered_apidf], sort=True).fillna(0)\n # merged_df['_INDEX']=merged_df.index\n\n # merged_df_with_index = pd.DataFrame(index = merged_df.index, data= merged_df)\n return merged_df", "def run(self):\n query = self.query\n\n # count before filtering\n # self.cardinality = query.add_columns(self.columns[0].sqla_expr).count()\n\n self._set_column_filter_expressions()\n self._set_global_filter_expression()\n self._set_sort_expressions()\n self._set_yadcf_data(query)\n\n # apply filters\n query = query.filter(\n *[e for e in self.filter_expressions if e is not None])\n self.filtered_query = deepcopy(query)\n\n # self.cardinality_filtered = query.add_columns(\n # self.columns[0].sqla_expr).count()\n\n # apply sorts\n query = query.order_by(\n *[e for e in self.sort_expressions if e is not None])\n\n # add paging options\n length = int(self.params.get('length'))\n if length >= 0:\n query = query.limit(length)\n elif length == -1:\n pass\n else:\n raise(ValueError(\n 'Length should be a positive integer or -1 to disable'))\n query = query.offset(int(self.params.get('start')))\n\n # add columns to query\n query = query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.filtered_query = self.filtered_query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.query = query\n # fetch the result of the queries\n column_names = [col.mData if col.mData else str(i)\n for i, col in enumerate(self.columns)]\n # self.results = [{k: v for k, v in zip(\n # column_names, row)} for row in query.all()]", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def filter(self, **kwargs):\n kwargs['query'] += ' FROM {0}'\n return kwargs", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def reduce_data_to_necessary_columns(filtered_df):\n hist_df = filtered_df[\n [\n \"UniqueName\",\n \"Joins\",\n \"Projection_Attributes\",\n \"Selection_Attributes\",\n \"GroupBy\",\n \"OrderBy\",\n \"Strings\",\n \"Tables\",\n ]\n ].set_index(\"UniqueName\")\n return hist_df", "def pwgrwlfilter(self):\n return None", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def df_measure(function,row_filters,col_filters,**options):\n\tresults=pd.DataFrame()\n\tfor col_key,col_filter in col_filters.items():\n\t\tjoined_options={**options,**col_filter}\n\t\tresults[col_key]=series_measure(function,row_filters,**joined_options)\n\treturn results", "def filter(self, *args, **kwargs):" ]
[ "0.64257777", "0.64170974", "0.60299", "0.590793", "0.5862535", "0.5817036", "0.5791453", "0.57656854", "0.5761146", "0.57199377", "0.566996", "0.5668453", "0.56454384", "0.55735713", "0.5497803", "0.547759", "0.54444075", "0.5442627", "0.54268384", "0.54174805", "0.541208", "0.5375646", "0.53669006", "0.5358131", "0.5328", "0.5298258", "0.52794", "0.5275598", "0.5272202", "0.52715117" ]
0.7271048
0
Get cell style from hiden styeles column
def _get_cell_style(self, row: int, column: int): style = None if 'STYLE' in self._source.columns: style = self._source.loc[self.get_source_row(row), 'STYLE'] column_name = self._dataframe.columns[column] if f'STYLE_{column_name}' in self._source.columns: style = self._source.loc[self.get_source_row(row), f'STYLE_{column_name}'] if style is not None: if style in Cli3App.instance().styles.keys(): return Cli3App.instance().styles[style] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_highlighted_style(feature):\r\n\r\n return {\"weight\": 3, \"color\": \"black\"}", "def design_cells(val):\n # ugly way to get if it is emoji or prob-val\n if \".\" not in str(val[0]):\n # emoji\n return ['font-size:20pt'] * TOP_E\n else:\n # prob\n return ['color: green'] * TOP_E", "def GetColumnColour(self, column):\r\n\r\n return self._header_win.GetColumn(column).GetColour()", "def style(self):\n return self['style']", "def get_row_style(self, console: \"Console\", index: int) -> StyleType:\n style = Style.null()\n if self.row_styles:\n style += console.get_style(self.row_styles[index % len(self.row_styles)])\n row_style = self.rows[index].style\n if row_style is not None:\n style += console.get_style(row_style)\n return style", "def cell_type(self, row, col):\n if col == 0: return 'heading'\n else: return 'data'", "def get_cell_color ( self, object ):\n if self.is_editable( object ):\n return self.cell_color_\n return self.read_only_cell_color_", "def color_column(self):\n return 8", "def __table_column_style(self):\n cell = TableCellStyle()\n cell.set_bottom_border(1)\n self.default_style.add_cell_style('PLC-TableColumn', cell)", "def style(self):\n return self.container['style']", "def get_colspan(self, ):\n return self.attrs.get(self.AttributeNames.COLSPAN, None)", "def get_cell_color ( self, object ):\n if self.is_editable( object ):\n if self._is_selected( object ):\n return self.selected_cell_color_\n return self.cell_color_\n return self.read_only_cell_color_", "def get_style ( self, object ):\n return self.style", "def __cell_style(self):\n cell = TableCellStyle()\n self.default_style.add_cell_style(\"PLC-Cell\", cell)", "def getType(self, style):\n typematch = re.search('TL\\s(.*)',style)\n if typematch :\n cellType = typematch.group(1)\n else :\n cellType = 'Unknown'\n return cellType", "def highlight_cells(c, c_dict):\n colour= c_dict.get(c)\n return 'background-color: %s' % colour", "def test_002_header_style() -> None:\n df = generate_test_data()\n skim(df, header_style=\"italic green\")", "def GetAttr(self, row, col, kind):\n \n #print \"Get Attr\",row,col,kind\n\n provider = self.GetAttrProvider()\n if provider and provider.GetAttr(row, col, kind):\n attr = provider.GetAttr(row, col, kind).Clone()\n else:\n attr = wx.grid.GridCellAttr()\n\n #color marks\n if self.colsel[col] in self.marks['X']:\n attr.SetBackgroundColour(wx.Colour(255, 230, 230))\n elif self.colsel[col] in self.marks['Y1']:\n attr.SetBackgroundColour(wx.Colour(255, 255, 205))\n elif self.colsel[col] in self.marks['Y2']:\n attr.SetBackgroundColour(wx.Colour(255, 255, 155))\n elif self.colsel[col] in self.marks['G']:\n attr.SetBackgroundColour(wx.Colour(155, 255, 155))\n\n #color dynamic columns\n if self.colsel[col] in self.dynamic_cols:\n attr.SetBackgroundColour(wx.Colour(200, 200, 200))\n\n #color last rows\n maxRows = self.GetNumberRows()\n if self.active:\n if maxRows - row == 1: #last row\n attr.SetBackgroundColour(wx.Colour(255, 230, 230))\n elif maxRows - row == 2: #second to last row\n attr.SetBackgroundColour(wx.Colour(255, 255, 205))\n elif maxRows - row == 3:\n if self.record:\n attr.SetBackgroundColour(wx.Colour(200, 255, 200))\n else:\n attr.SetBackgroundColour(wx.Colour(255, 100, 100))\n else:\n if maxRows - row <= 2:\n attr.SetBackgroundColour(wx.Colour(127, 127, 127))\n\n if self.rowmask[row]:\n attr.SetTextColour(wx.Colour(0,0,255))\n \n return attr", "def GetTextColour(self):\r\n \r\n return self._colText", "def shortcolour(c):\n return c if c == \"none\" or c[0] == 'u' else repr2col(col2repr(c))", "def style(self):\n return self._style", "def style(self):\n return self._style", "def get_bl_cell(self):\n return self._bl_cell", "def line_style(self):\n return self.container['line_style']", "def piece_colour(self, col, row):\n square = self.get_square(col, row)\n if square == ' ':\n return None\n return 'W' if square.isupper() else 'B'", "def heat_method(self, row):\n if row['Indexer'] == \"Heating\":\n return row['HeatCool']\n return 0", "def cell_colouring(word_table_cell: table.Table.cell, colour: str) -> None:\n\n try:\n if colour == \"R\":\n colour = parse_xml(r'<w:shd {} w:fill=\"cb1f00\"/>'.format(nsdecls(\"w\")))\n elif colour == \"A/R\":\n colour = parse_xml(r'<w:shd {} w:fill=\"f97b31\"/>'.format(nsdecls(\"w\")))\n elif colour == \"A\":\n colour = parse_xml(r'<w:shd {} w:fill=\"fce553\"/>'.format(nsdecls(\"w\")))\n elif colour == \"A/G\":\n colour = parse_xml(r'<w:shd {} w:fill=\"a5b700\"/>'.format(nsdecls(\"w\")))\n elif colour == \"G\":\n colour = parse_xml(r'<w:shd {} w:fill=\"17960c\"/>'.format(nsdecls(\"w\")))\n\n word_table_cell._tc.get_or_add_tcPr().append(colour)\n\n except TypeError:\n pass", "def get_shear_style(data):\n if 'dsensum' in data.dtype.names:\n shear_style='lensfit'\n else:\n shear_style='reduced'\n return shear_style", "def color(self):\n if self._simplecell:\n self.fetch()\n return self._color", "def __cell(table, row, col):\n rows = table.findAll('tr')\n cols = rows[row].findAll('td')\n return cols[col]" ]
[ "0.6037128", "0.602545", "0.5960017", "0.59558016", "0.5942395", "0.5839042", "0.58133143", "0.57690036", "0.56537867", "0.5642963", "0.56286687", "0.5584785", "0.55833846", "0.5573612", "0.5543419", "0.55313474", "0.5503975", "0.55015445", "0.547513", "0.5468061", "0.5450509", "0.5450509", "0.54286534", "0.5388338", "0.53692335", "0.5363356", "0.53466415", "0.5337955", "0.53346825", "0.5326076" ]
0.72440755
0
Filter model for selected cell value
def _filter_model(self) -> None: indexes = self.tableView.selectedIndexes() if len(indexes) > 0: cell = self.tableView.model().itemData(indexes[0]) if self.tableView.model().hasFilter(indexes[0].column()): self.tableView.model().resetFilter(indexes[0].column()) if self._last_sorted_column != -1: # Resore last sort self.tableView.model().sort(self._last_sorted_column, self._last_sort_order) else: self.tableView.model().setFilter(indexes[0].column(), cell) idx = self.tableView.model().index(0, indexes[0].column()) self.tableView.selectionModel().select(idx, QItemSelectionModel.ClearAndSelect) self.tableView.resizeColumnToContents(indexes[0].column()) Cli3App.instance().updateMainWindiwSignal.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]", "def filter_table(self):\n\n filter_text = self.dlg.uTextFilter.text()\n self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)\n self.proxy_model.setFilterKeyColumn(2)\n self.proxy_model.setFilterFixedString(filter_text)", "def filterSelection(self, widget, callback):\n # It seems unselect_iter() fires of a \"changed\" event\n self.selection.handler_block(self.changed_id)\n\n for row in self.selection.get_selected_rows()[1]:\n iter = self.model.get_iter(row)\n event = self.model.get(iter, 9)[0]\n if not callback(event):\n self.view.get_selection().unselect_iter(iter)\n self.selection.unselect_iter(iter)\n\n self.onSelectionChanged(self.selection)\n self.selection.handler_unblock(self.changed_id)", "def get_filter(self):\n if not self.has_changes and hasattr(self, \"filter_result\"):\n return self.filter_result\n negation = False\n list_input = False\n self.filter_result = None\n if self.input_val.value:\n lhs = 'self.data[\"{}\"]'.format(self.select_col.value)\n col_dtype = self.data_dict[\n self.data_dict.Columns == self.select_col.value\n ].Dtypes.values[0]\n col_values = self.data_dict[\n self.data_dict.Columns == self.select_col.value\n ].Values.values[0]\n func = CONDITIONOPERATIONS[self.condition.value]\n if \"isin\" in func:\n list_input = True\n if col_dtype in [DTYPES.category, DTYPES.string]:\n if \"!\" in func:\n func = func[1:]\n negation = True\n if not list_input and col_dtype == DTYPES.string:\n case = True\n if func[0] == \"i\":\n func = func[1:]\n case = False\n func = \"str.{}\".format(func)\n value = '\"{}\"'.format(self.input_val.value)\n if case is False:\n if func.split(\".\")[-1] in [\n COMPARATORS.startswith,\n COMPARATORS.endswith,\n ]:\n func = \"str.lower().\" + func\n value = value.lower()\n else:\n value = value + \", case=False\"\n elif list_input:\n value = self.input_val.value\n if isinstance(value, str):\n value = (\n self.input_val.value.replace(\", \", \",\")\n .replace(\" ,\", \",\")\n .split(\",\")\n )\n else:\n value = str(self.input_val.value)\n rhs = \".{}({})\".format(func, value)\n elif DTYPES.datetime in col_dtype:\n start_date, end_date = self.input_val.value\n if start_date > col_values[0]:\n self.filter_result = (\n \"(self.data['\"\n + self.select_col.value\n + \"']\"\n + \">= '\"\n + str(start_date)\n + \"'\"\n )\n if end_date < col_values[1]:\n if self.filter_result is None:\n self.filter_result = (\n \"(self.data['\"\n + self.select_col.value\n + \"']\"\n + \"<= '\"\n + str(end_date)\n + \"'\"\n )\n else:\n self.filter_result += (\n \") & (\"\n + \"self.data['\"\n + self.select_col.value\n + \"']<= '\"\n + str(end_date)\n + \"')\"\n )\n self.filter_result = \"(\" + self.filter_result\n self.filter_result += \")\"\n self.filter_result = eval(self.filter_result)\n return self.filter_result\n elif list_input and isinstance(self.input_val.value, list):\n rhs = f\".{func}({self.input_val.value})\"\n elif list_input:\n rhs = f\".{func}([{self.input_val.value}])\"\n else:\n if isinstance(self.input_val.value, list):\n input_value = str(self.input_val.value)[1:-1]\n else:\n input_value = str(self.input_val.value)\n rhs = CONDITIONOPERATIONS[self.condition.value] + input_value\n filter_str = (\"-\" if negation else \"\") + lhs + rhs\n self.filter_result = eval(filter_str)\n return self.filter_result", "def filter():\n return get_filter_data(db, MyTable)", "def _filter(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return row\n\n if 'cond' not in self._state:\n self._state['cond'] = self._replace_fields(self._args.cond)\n\n r = list(map(self._convert, row))\n if eval(self._state['cond']):\n return row", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def row_filter(self) -> Optional[pulumi.Input['DataCellsFilterRowFilterArgs']]:\n return pulumi.get(self, \"row_filter\")", "def filter(self, observable):", "def _filter_model(data: list, line: int, col: int, filters: list):\n\n return filter(\n lambda proxies: proxies[line][col] in filters,\n data\n )", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def list_data_cells_filter(self) -> List[Dict[str, Any]]:\n return []", "def filter(self, target_model):\n # return filter_dict_to_target_model(self._axl_data, target_model)\n super().__setattr__('_axl_data', filter_dict_to_target_model(self._axl_data, target_model))\n return self", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def filter(self, column: Union[str, BinaryExpression, List[Union[Tuple, BinaryExpression]]], operator: str = None, value: Any = None) -> B[B, E]:\n pass", "def row_filter(self) -> pulumi.Output[Optional['outputs.DataCellsFilterRowFilter']]:\n return pulumi.get(self, \"row_filter\")", "def where(self, label, filter_fn):\n new_label = []\n new_rows = []\n for x in self.column_labels:\n new_label.append(x)\n # filter(is_even, [1,2,3,4])\n \n for x in self.rows:\n if filter_fn(x[self.column_labels.index(label)]):\n new_row = []\n new_row += x\n new_rows.append(new_row)\n \n\n new_Table = T88ble(new_rows, new_label)\n\n return new_Table", "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value():\n return queryset.filter(state_pol=self.value())", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def FilterFXSeries(self):\r\n filtFX=self.data[self.data.columns[0]].tolist()\r\n return filtFX", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n\n if self.value() is None:\n return queryset.all()\n\n return queryset.filter(firm__pk=self.value())", "def filter_tracked(self, queryset, name, value):\n q_batch = Q(batch=None) | Q(batch='')\n q_serial = Q(serial=None) | Q(serial='')\n\n if str2bool(value):\n return queryset.exclude(q_batch & q_serial)\n else:\n return queryset.filter(q_batch & q_serial)", "def filter_column(col, row):\n return col == column", "def getFilter(self):\n col = self.filtercol.get()\n val = self.filtercolvalue.get()\n op = self.operator.get()\n booleanop = self.booleanop.get()\n return col, val, op, booleanop", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def filter(self, *args, **kwargs):", "def evaluate_filter(self, x):\n raise NotImplementedError" ]
[ "0.61465836", "0.60357475", "0.5836791", "0.58231956", "0.5813032", "0.5806916", "0.5806267", "0.57512933", "0.570322", "0.5613734", "0.55834484", "0.5580254", "0.5567273", "0.5547823", "0.5541002", "0.5514371", "0.5469984", "0.54644424", "0.5443306", "0.540509", "0.53569776", "0.53527117", "0.53527117", "0.53433484", "0.5340524", "0.5329804", "0.53272235", "0.53158176", "0.53027844", "0.52652687" ]
0.72029215
0
extract temperature and pressure from thermofile
def extract_TP(thermofile, column_number, TP, addtxt): with open(thermofile, 'r') as f: [f.readline() for i in range(3)] #extract data while True: line = f.readline() if not line: break else: entry=line.split('\n')[0].split('\t') TP[entry[0].split('outcar.umd.dat')[0].split('/')[-1]+addtxt] = (int(entry[column_number['T']]),float(entry[column_number['P']])) return TP
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_temperature():\n content = _read_raw_temperature()\n\n # get last three characters of first line\n is_valid = content[0][-4:].strip()\n\n # convert to boolean\n is_valid = _validity_to_bool(is_valid)\n\n reading = content[1]\n reading = float(reading.split('=')[-1].strip()) / 1e3\n\n return is_valid, reading, dt.datetime.now()", "def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def read_temperature(self):\n tRaw = self._read_multiple_bytes_as_array(self.BME280_TEMP_MSB, 3)\n\n return float(self._compensate_temperature((tRaw[0] << 12) + (tRaw[1] << 4) + (tRaw[2] >> 4)))", "def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content", "def temperatures():\n\n return station_9281", "def read_tph(self):\n resultsTPH = [ 0.0, 0.0, 0.0 ]\n\n tRaw = self._read_multiple_bytes_as_array(self.BME280_TEMP_MSB, 3)\n pRaw = self._read_multiple_bytes_as_array(self.BME280_PRESS_MSB, 3)\n hRaw = self._read_multiple_bytes_as_array(self.BME280_HUM_MSB, 2)\n\n resultsTPH[0] = float(self._compensate_temperature((tRaw[0] << 12) + (tRaw[1] << 4) + (tRaw[2] >> 4)))\n resultsTPH[1] = float(self._compensate_pressure((pRaw[0] << 12) + (pRaw[1] << 4) + (pRaw[2] >> 4)))\n resultsTPH[2] = float(self._compensate_humidity(hRaw[0] << 8) + hRaw[1])\n\n return resultsTPH", "def temperature(self):\n done, data = self._request('GP')\n if done:\n return {\n 'ds3231temp': float(data[0])/10,\n 'mcp9808temp': float(data[1])/10,\n 'tmp007temp': float(data[2])/10\n }\n\n raise EvseError", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def parse_temperature(prod, regime, lines, data):\n for linenum, line in enumerate(lines):\n if len(line.strip()) < 18:\n continue\n # Repair a broken (E) product, see akrherz/pyIEM#08\n if line[20:23] == \"(E)\" and line[38] == \" \":\n prod.warnings.append(f\"Invalid line repaired |{line}|\")\n line = line.replace(\"(E)\", \"E \")\n tokens = make_tokens(regime, line)\n key = tokens[0].strip().lower()\n if key.upper() not in [\"MAXIMUM\", \"MINIMUM\", \"AVERAGE\"]:\n continue\n data[f\"temperature_{key}\"] = get_number(tokens[1])\n if tokens[2] is not None:\n data[f\"temperature_{key}_time\"] = tokens[2]\n if tokens[3] is not None:\n data[f\"temperature_{key}_record\"] = get_number(tokens[3])\n if tokens[4] is not None and tokens[4].strip() not in [\"\", \"M\", \"MM\"]:\n n = get_number_year(tokens[4])\n if n is not None:\n data[f\"temperature_{key}_record_years\"] = [n]\n else:\n prod.warnings.append(f\"Found invalid year |{tokens[4]}|\")\n if tokens[5] is not None:\n data[f\"temperature_{key}_normal\"] = get_number(tokens[5])\n # Check next line(s) for more years\n while (linenum + 1) < len(lines) and len(\n lines[linenum + 1].strip()\n ) == 4:\n line2 = lines[linenum + 1].strip()\n n = get_number_year(line2)\n if n is not None:\n data.setdefault(\n f\"temperature_{key}_record_years\",\n [],\n ).append(n)\n else:\n prod.warnings.append(f\"Found invalid year |{line2}|\")\n linenum += 1", "def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625", "def _extract_raw_data(self, lines):\r\n\r\n i = self._find_first_data_point(lines)\r\n if self._lines_have_temperature(lines[i]):\r\n self._T = []\r\n\r\n if self._has_drift_points(lines):\r\n while i < len(lines) and lines[i][0] in ['+', '-']:\r\n self._extract_drift_point(lines[i])\r\n i += 2\r\n i += self._extract_next_forc(lines[i:])\r\n i += 1\r\n else:\r\n while i < len(lines) and lines[i][0]in ['+', '-']:\r\n i += self._extract_next_forc(lines[i:])\r\n self._extract_drift_point(lines[i-1])\r\n i += 1\r\n\r\n return", "def get_temp():\n count = 0\n while True:\n # Temp\n output = subprocess.check_output(\n [\"/home/andy/python/bitbucket/pitemp/Adafruit_DHT\", \"2302\", \"4\"])\n count += 1\n print (\"Attempt %s: %s\") % (count, output)\n temp_match = re.search(\"Temp =\\s+([0-9.]+)\", output)\n humid_match = re.search(\"Hum =\\s+([0-9.]+)\", output)\n\n # if the beginning of output contains temp and numbers,\n # we can assume we are getting valid data\n if temp_match:\n temp = float(temp_match.group(1))\n humidity = float(humid_match.group(1))\n break\n\n return (temp, humidity)", "def getCl(filename):\n powSpec = pf.getdata(filename,1)\n temps = powSpec.field('TEMPERATURE')\n ell = np.arange(temps.size)\n return ell,temps", "def get_temp(self):\n lines = self._get_temp_raw()\n\n while not self._is_successful_read(lines):\n time.sleep(0.2)\n lines = self._get_temp_raw()\n \n try: \n temp_file_location = lines[1].find('t=')\n except: \n print(\"ERROR: w1_slave file corrupted. No t= found.\")\n \n if temp_file_location is not -1:\n temp_string = lines[1][temp_file_location+2:]\n temp = float(temp_string) / 1000.0\n return temp", "def get_list_temperature(self,typ,file_number):\n if typ == 'emis':\n return self.beam_emis[file_number].temperature\n elif typ == 'atte':\n return self.beam_atte[file_number].temperature\n else:\n raise NameError('No list with this name: {0}'.format(typ))", "def read_temperature(self):\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n\n return float(self._compensate_temperature(tempADC))", "def read(self):\n try:\n pressure, temperature=self.get_pressure()\n return pressure,temperature\n except Exception:\n logging.exception(\"Pressure Sensor Error\")", "def get_temperature(self):\n pass", "def get_temp(html) -> None:\n\tif page_type_dict['general']:\n\t\tt_text = html.find('div', {'class': '_1HBR'}).text\n\t\tt_digit = ''.join([i for i in t_text if i.isdigit()])\n\t\tweather_dict['temperature'] = t_digit\n\telse:\n\t\tre_temp_class = re.compile('.*_2ezK.*') # regex template: str w/ '_2ezK'\n\t\ttemp_class = html.find('div', {'class': re_temp_class}) \n\t\t# we've got smth like: 'Ночью14°Утром19°Днём24°Вечером22°\n\t\tweather_lst = temp_class.text.split('°') # ['Ночью14','Утром19',...]\n\t\tint_weather_lst = [int(number.group()) for number in ( # for all the elems \n\t\t\tre.search(r'\\d+', word) for word in weather_lst) if number] # keep integers\n\t\t# result: [14, 19, 24, 22]\n\t\tweather_dict['temperature'] = int_weather_lst", "def readtemperature(self, cTemp):\r\n\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\twhile (data & 0x01) != 0 :\r\n\t\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\tdata1 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\tdata2 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\t\r\n\t\t# Convert the data to 14-bits\r\n\t\tcTemp = (((data1 * 256.0) + data2) / 4.0)\r\n\t\t\r\n\t\tif cTemp < 0x0140 :\r\n\t\t\tcTemp = 0x0140\r\n\t\telif cTemp > 0x12C0 :\r\n\t\t\tcTemp = 0x12C0\r\n\t\telse :\r\n\t\t\tcTemp = cTemp\r\n\t\t\r\n\t\tcTemp = (cTemp / 32.0) - 50.0\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def thermo_vals(self, file, path, chunk=100):\n reader = IO(file, path)\n # print(\"Thermodynamic values can be extracted from %s.\" % file)\n lines = reader.lines()\n \n # Mark the data locations.\n marker1 = 'VIBRATIONAL FREQUENCIES'\n marker2 = 'NORMAL MODES'\n marker3 = 'INNER ENERGY'\n for i in range(len(lines)):\n line = lines[i]\n if line == marker1:\n vib_start = i+3\n elif line == marker2:\n vib_end = i-3\n elif line == marker3:\n therm_start = i\n \n # Extract the data values. \n vib_lines = lines[vib_start:vib_end]\n U = extr(lines[therm_start+19])[0]\n H = extr(lines[therm_start+39])[0]\n S_el = extr(lines[therm_start+54])[0]\n S_vib = extr(lines[therm_start+55])[0]\n S_trans = extr(lines[therm_start+57])[0]\n linearity = lines[therm_start+65]\n if ' linear' in linearity:\n rot_num = 1\n elif 'nonlinear' in linearity:\n rot_num = 1.5\n else:\n raise\n qrot = extr(lines[therm_start+68])[0]\n \n vibs = [extr(line)[:2] for line in vib_lines]\n img_modes = []\n for vib in vibs:\n if vib[1] < 0:\n img_modes.append(vib)\n \n if len(img_modes) > 0:\n values = {}\n print(\"ERROR: %s contains imaginary modes:\" % file)\n for mode in img_modes:\n print(\"#{0}: {1} cm^-1\".format(mode[0], mode[1]))\n else:\n values = {\n 'U':U, 'H':H, 'S*T (el)':S_el, 'S*T (vib)':S_vib,\n 'S*T (trans)':S_trans, 'qrot':qrot, 'rot #':rot_num}\n\n return values", "def __getRawTemperature(self):\n t1 = self.read_byte_data(self.address, 0x03)\n t2 = self.read_byte_data(self.address, 0x04)\n t3 = self.read_byte_data(self.address, 0x05)\n t = (t1 << 16) | (t2 << 8) | t3\n t = getTwosComplement(t, 24)\n return t", "def getTemperatureMeasurements(self):\n # self.board.readline()\n self.stop = False\n times = []\n temps = [[], [], []]\n \n # A synchronisation string containing the characters tx is sent before each set of measurements,\n # we ensure correct reading of the measurements by waiting for this string\n while str(self.board.readline()).strip('b\\'\\\\rn') != 'tx':\n pass\n \n while not self.stop:\n # A synchronisation string containing the characters tx is sent before each set of measurements\n tx = self.board.readline()\n if str(tx).strip('b\\'\\\\rn') == 'tx':\n rawData1 = self.board.readline()\n rawData2 = self.board.readline()\n rawData3 = self.board.readline()\n rawData4 = self.board.readline()\n \n \n timeStamp = str(rawData1).strip('b\\'\\\\rn')\n temp1 = str(rawData2).strip('b\\'\\\\rn')\n temp2 = str(rawData3).strip('b\\'\\\\rn')\n temp3 = str(rawData4).strip('b\\'\\\\rn')\n try:\n times.append(float(timeStamp) / 1000)\n temps[0].append(float(temp1) / 128)\n temps[1].append(float(temp2) / 128)\n temps[2].append(float(temp3) / 128)\n # print(f'\\rtime: {float(timeStamp) / 1000:.2f} s, Temperature measured on sensor 1: {float(temp1) / 128:.2f} °C,'\n # f'sensor 2: {float(temp2) / 128:.2f} °C, sensor 3: {float(temp3) / 128:.2f} °C', sep='', end='', flush=True)\n except:\n print(rawData1, rawData2, rawData3, rawData4)\n \n \n if self.stop:\n print('\\nMeasurement finished...')\n \n self.data_stack[self.fetch_kinds[0]] = times\n self.data_stack[self.fetch_kinds[1]] = temps[0]\n self.data_stack[self.fetch_kinds[2]] = temps[1]\n self.data_stack[self.fetch_kinds[3]] = temps[2]\n \n if (len(self.data_stack['Sensor 1 Temp']) != len(times) or len(self.data_stack['Sensor 2 Temp']) != len(times) or len(self.data_stack['Sensor 3 Temp']) != len(times)):\n print(\"Warning: There may be some missing values!\")", "def test_temperatures(get_touchmat):\n touchmat = get_touchmat\n\n temperatures = touchmat.temperatures()\n info = touchmat.info()\n check_system_types.check_TemperatureInfoList(temperatures, [info])", "def read_tph(self):\n resultsTPH = [ 0.0, 0.0, 0.0 ]\n\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n presADC = (self._read_register_1ubyte(self.BME680_PRESS_MSB) << 12) | (self._read_register_1ubyte(self.BME680_PRESS_LSB) << 4) | (self._read_register_1ubyte(self.BME680_PRESS_XLSB) >> 4)\n humADC = (self._read_register_1ubyte(self.BME680_HUM_MSB) << 8) | (self._read_register_1ubyte(self.BME680_HUM_LSB))\n\n resultsTPH[0] = float(self._compensate_temperature(tempADC))\n resultsTPH[1] = float(self._compensate_pressure(presADC))\n resultsTPH[2] = float(self._compensate_humidity(humADC))\n\n return resultsTPH", "def get_internal_energy(filename):\n # --------------- helper functions --------------- #\n def parse_data(block):\n \"\"\"\n Parse the line(s) to get the data.\n \"\"\"\n rval = {\n 'Total' : None,\n 'Electronic' : None,\n 'Translational' : None,\n 'Rotational' : None,\n 'Vibrational' : None\n }\n for line in block.splitlines():\n if re.match(r'^\\s*Total', line):\n key = 'Total'\n elif re.match(r'^\\s*Electronic', line):\n key = 'Electronic'\n elif re.match(r'^\\s*Translational', line):\n key = 'Translational'\n elif re.match(r'^\\s*Rotational', line):\n key = 'Rotational'\n elif re.match(r'^\\s*Vibrational', line):\n key = 'Vibrational'\n else:\n key = None\n if key:\n words = line.strip().split()\n try:\n rval[key] = float(words[1])\n except ValueError:\n raise ValueError('Invalid thermodynamic format.')\n return rval\n # ------------- end helper functions ------------- #\n # open the file, if a string\n if isinstance(filename, str):\n ifs = open(filename, 'r')\n else:\n ifs = filename\n # extract the relevent lines\n start = r'^\\s*E\\s+\\(Thermal\\)'\n stop = r'^\\s*Vibrational'\n rre = RegexRangeExtractor(start, stop,\n include_start=True,\n include_stop=True)\n block = rre(ifs)[0]\n # close file\n if ifs is not filename:\n ifs.close()\n # parse data\n #+ single value/file\n rval = parse_data(block)\n return rval", "def get_temps(self):\n try:\n cmos = self.cmos_temp\n except Exception:\n cmos = None\n try:\n pcb = self.pcb_temp\n except Exception:\n pcb = None\n return cmos, pcb" ]
[ "0.6801142", "0.6627863", "0.65125084", "0.6498045", "0.6469067", "0.6388448", "0.6369974", "0.6369449", "0.63679165", "0.6332436", "0.6262144", "0.6257998", "0.6150737", "0.6128258", "0.61186016", "0.61119604", "0.6102738", "0.60951656", "0.609493", "0.60928375", "0.6082937", "0.60700405", "0.60601294", "0.6059842", "0.60493475", "0.60293806", "0.6003358", "0.59951246", "0.5977001", "0.5967906" ]
0.7111684
0
registers a new cache or replaces the existing one. if `replace=True` is provided. otherwise, it raises an error on adding a cache which is already registered.
def register_cache(instance, **options): get_component(CachingPackage.COMPONENT_NAME).register_cache(instance, **options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace(self, other_cache):\n # This is used in avant-idle to replace the content of the cache\n # in a process (where no storage normally takes place) by\n # that of another where the actual caching of the source is done.\n self.cache.clear()\n for key in other_cache:\n self.add(key, other_cache[key])", "def testReplaceItem(self):\n\n first = \"Little pig, little pig, let me come in!\"\n second = \"Not by the hair on my chinny-chin-chin!\"\n memcache.set('first', first)\n assert memcache.get('first') == first\n memcache.replace('first', second)\n assert memcache.get('first') == second", "def addToCache(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def add_mock(monkeypatch):\n _mock = Mock()\n monkeypatch.setattr('django.core.cache.cache.add', _mock)\n return _mock", "def add(self, key, value, timeout=None):\n try:\n key = self.prepare_key(key)\n if self._cache.exists(key):\n return False\n return self.set(key, value, timeout)\n except Exception as err:\n return self.warn_or_error(err, False)", "def try_insert(self, cache_key, paths):\r\n pass", "def testSetDefaultExists(self):\n ref = cache.CacheReference(self.cache, 'key')\n self.cache._KeyExists.return_value = True\n ref.SetDefault('/foo')\n self.assertFalse(self.cache._Insert.called)", "def update_cache(self, repo=None, force=False):\n raise NotImplementedError(self.update_cache)", "def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n pass", "def put_cache(self, data, key, coordinates=None, expires=None, overwrite=True):\n\n try:\n self.definition\n except NodeDefinitionError as e:\n raise NodeException(\"Cache unavailable, %s (key='%s')\" % (e.args[0], key))\n\n if self.cache_ctrl is None:\n return\n\n if not overwrite and self.has_cache(key, coordinates=coordinates):\n raise NodeException(\"Cached data already exists for key '%s' and coordinates %s\" % (key, coordinates))\n\n with thread_manager.cache_lock:\n self.cache_ctrl.put(self, data, key, coordinates=coordinates, expires=expires, update=overwrite)", "def mark_if_cached(self, args):\n pass", "def _update_use(self, key):\n\t\tif (self._replace_pol == Cache.LRU):\n\t\t\tself.cache[key]= self.hashmap[key]\n\t\tif (self._replace_pol == Cache.LRU_S):\n\t\t\tself.cache[key] = self.hashmap[key]", "def test_register_component_with_custom_key_duplicate_with_replace():\n\n default_database_component = application_services.get_component('database.component')\n custom_component1 = ExtraDatabaseComponentMock('database.component',\n component_custom_key=2000)\n custom_component2 = DuplicateExtraDatabaseComponentMock('database.component',\n component_custom_key=2000)\n\n application_services.register_component(custom_component1)\n application_services.register_component(custom_component2, replace=True)\n assert application_services.get_component('database.component') \\\n == default_database_component\n\n assert application_services.get_component('database.component',\n component_custom_key=2000) \\\n == custom_component2\n\n application_services.remove_component(custom_component2.get_id())", "def add(self, search, replace):\n esearch = html.escape(search)\n ereplace = html.escape(replace)\n\n with self.connection.ro as dbconn:\n dbconn.cursor.execute(\n \"SELECT search, replace FROM {} WHERE search=? AND replace=?\".format(\n self.tablename),\n (esearch, ereplace))\n present = dbconn.cursor.fetchall()\n if present:\n LOGGER.debug(\n \"Pair '{}'/'{}' already exists in '{}', nothing to do\".format(\n esearch, ereplace, self.tablename))\n else:\n LOGGER.debug(\n \"Pair '{}'/'{}' does not exist in '{}', adding...\".format(\n esearch, ereplace, self.tablename))\n with self.connection.rw as dbconn:\n dbconn.cursor.execute(\n \"INSERT INTO {} VALUES (?, ?)\".format(self.tablename),\n (esearch, ereplace))", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n caching_services.register_cache(instance, **kwargs)\n\n return cls", "def test_upload_overwrite(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = [\"everyone\"]\n name, filename, content = \"a\", \"a-1.tar.gz\", BytesIO(b\"new\")\n cache.upload(filename, BytesIO(b\"old\"), name)\n cache.upload(filename, content, name)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 1)\n data = cache.storage.open(all_versions[0]).read()\n self.assertEqual(data, b\"new\")\n\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 1)", "def replace(self, key, val, expiry_time=0, min_compress_len=0):\n\t\treturn self._set(\"replace\", key, val, expiry_time, min_compress_len)", "def StoreOrUpdateInCache(self, filename, data):\n try:\n if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):\n memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)\n except (ValueError), err:\n logging.warning('Data size too large to cache\\n%s' % err)", "def StoreOrUpdateInCache(self, filename, data):\n try:\n if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):\n memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)\n except (ValueError), err:\n logging.warning('Data size too large to cache\\n%s' % err)", "def np_cache(enable_cache, write_cache=True, force_update=False, compress=True, hash_method='hash'):\n\n valid_hash_funcs = {\n 'hash': _func_hash,\n 'readable': _func_hash_readable\n }\n try:\n hash_func = valid_hash_funcs[hash_method]\n except KeyError:\n msg = \"hash_method argument value must be one of {}\".format(', '.join(valid_hash_funcs.keys()))\n raise ValueError(msg) from None\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not enable_cache:\n # Don't cache anything\n return func(*args, **kwargs)\n # create cache file path\n hash_key = '{}.npz'.format(hash_func(func, args, kwargs))\n cache_path = os.path.join(CACHE_DIR, hash_key)\n\n def run_func_update_cache():\n res = func(*args, **kwargs)\n if force_update or write_cache:\n # logging.debug(\"Writing to cache\")\n _save_numpy(cache_path, res, compress)\n return res\n\n if force_update:\n logging.debug(\"Cache: Forcing update on {}\".format(hash_key))\n return run_func_update_cache()\n else:\n try:\n result = _load_numpy(cache_path)\n logging.debug(\"Cache: Found {}\".format(hash_key))\n return result\n except (IOError, FileNotFoundError):\n logging.debug(\"Cache: Not found {}\".format(hash_key))\n return run_func_update_cache()\n except BadZipFile:\n logging.warning(\"Cache: Corrupted file, ignoring {}\".format(hash_key))\n return run_func_update_cache()\n return wrapper\n\n return decorator", "def set_cache(self, val):\n pass", "def register(self, model, values=None, instance_values=None):\n\n if model in self._models:\n raise Exception(\"%s is already registered\" % model)\n\n self._models[model] = CacheConfig(values, instance_values)", "def test_cache_add_without_timeout(self):\n self.cache.set('garbage', 'full')\n\n self.assertTrue(self.cache.add('superman', 'clark kent'))\n self.assertTrue(self.cache.add('recipe', {'sugar': 2, 'wine': 5}))\n self.assertFalse(self.cache.add('garbage', 'empty'))\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n self.assertEqual(self.cache.get('garbage'), 'full')\n\n # Move time forward 10 years\n cache.datetime.now = lambda: datetime.now() + timedelta(days=10*365)\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n self.assertEqual(self.cache.get('garbage'), 'full')\n\n # Try adding items again\n self.assertFalse(self.cache.add('superman', 'not kent'))\n self.assertFalse(self.cache.add('recipe', {'sugar': None, 'wine': 'A bottle'}))\n self.assertFalse(self.cache.add('garbage', 'empty'))\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n self.assertEqual(self.cache.get('garbage'), 'full')", "def testSetDefault(self):\n ref = cache.CacheReference(self.cache, 'key')\n self.cache._KeyExists.return_value = False\n ref.SetDefault('/foo')\n self.cache._Insert.assert_called_once_with('key', '/foo')", "def cache(fn):\n\tcache.c = dict()\n\tdef _fn(*args, **kwargs):\n\t\tkey = fn.__name__ + str(args) + str(kwargs)\n\t\ttry:\n\t\t\tret = cache.c[key]\n\t\texcept KeyError, e:\n\t\t\tret = fn(*args, **kwargs)\n\t\t\tcache.c[key] = ret\n\t\treturn ret\n\treturn _fn", "def add_to_cache(self, content: Content):\n cache = self.cache\n cache.add_content_object(content)\n self.cache = cache", "def add_to_cache(multipart_upload):\n if len(cache) < MAX_CACHE_SIZE:\n if multipart_upload.uploadId not in cache:\n cache[multipart_upload.uploadId] = multipart_upload", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def insert(self, cache_key, paths):\r\n missing_files = filter(lambda f: not os.path.exists(f), paths)\r\n try:\r\n if missing_files:\r\n raise ArtifactCache.CacheError('Tried to cache nonexistent files: %s' % missing_files)\r\n self.try_insert(cache_key, paths)\r\n except Exception as e:\r\n self.log.error('Error while writing to artifact cache: %s. ' % e)" ]
[ "0.57119435", "0.55816114", "0.5510161", "0.53981495", "0.53348285", "0.5221639", "0.5185721", "0.51807415", "0.5178592", "0.516138", "0.5150727", "0.5098093", "0.50709575", "0.50582135", "0.5053526", "0.5041543", "0.5037809", "0.5036832", "0.5036832", "0.5020652", "0.50199574", "0.500916", "0.4999056", "0.49950287", "0.49904194", "0.49716738", "0.49591205", "0.49576965", "0.49576965", "0.49465138" ]
0.5955155
0
gets the registered cache with given name. it raises an error if no cache found for given name.
def get_cache(name): return get_component(CachingPackage.COMPONENT_NAME).get_cache(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cache(name, region, namespace):\n try:\n cache = getattr(_CACHES, name)\n except AttributeError:\n manager = CacheManager(**cache_regions[region])\n cache = manager.get_cache(namespace)\n setattr(_CACHES, name, cache)\n return cache", "def getHostByName(self, name, *args):\n key = (name,) + args\n if key in self._cache:\n # If we failed last time, try again\n if isinstance(self._cache[key], Failure):\n del self._cache[key]\n # Check for a cache hit.\n elif time.time() > self._cache[key][1] + self._timeout:\n # Ensure the item hasn't expired.\n del self._cache[key]\n else:\n # If the item is in cache and not expired, return it immediately.\n return defer.succeed(self._cache[key][0])\n\n # If it wasn't already in the cache, this always returns a deferred.\n return self._cache[key].addCallback(lambda x: x[0])", "def factory_get(self, name):\n try:\n return registry[name]\n except KeyError:\n import traceback\n traceback.print_exc()\n Log.error(\"Cannot find %s in {%s}\" % (name, ', '.join(registry.keys())))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss", "def get_from_cache(cls, file_name):\n random.shuffle(cls.CACHE_BACKENDS)\n fname = None\n for cb in cls.CACHE_BACKENDS:\n if not cb.health_check():\n continue\n fname = cb.get_from_cache(file_name)\n if fname:\n break\n return fname", "def get_cache(self, key):\n return self.r.get(key)", "def _get_from_immediate_cache(self, name, fn):\n\n with self._immediate_cache_lock:\n if not hasattr(self, '_immediate_cache'):\n self._immediate_cache = [{}]\n cache = self._immediate_cache[0]\n hx = hash(name)\n if hx not in cache:\n cache[hx] = fn()\n\n return cache[hx]", "def get(key):\n return Cache.cache_connector.get(key)", "def _get_cache_instance(self):\n try:\n cache = caches[stats2_settings.CACHE_KEY]\n except InvalidCacheBackendError:\n cache = caches['default']\n return cache", "def cache_get(item: str) -> object:\n\titem = str(item)\n\tcache = cache_find(item)\n\n\t# cache_find() will return none if the cache does not exist\n\t# the returned location is guaranteed to exist, so no point checking again.\n\n\tif cache is not None:\n\t\ttry:\n\t\t\tcached = pickle.load(open(cache, \"rb\"))\n\t\texcept EOFError as ex:\n\t\t\t# Cache file is corrupted, so print an error and act like it does\n\t\t\t# not exist. We do not delete the cache file incase the user wants\n\t\t\t# to recover the file.\n\t\t\tuux.show_error(\"Error when loading file from cache: \" + str(ex))\n\t\t\treturn None\n\t\texcept Exception as ex:\n\t\t\traise ex\n\t\tuux.show_debug(\"Cache hit for \" + item)\n\t\treturn cached\n\n\treturn None", "def _cache_get(self, metric_name):\n pass", "def get_cache(self):\n return self._instance._cache[self.name]", "def cache(self, name: str = None) -> B[B, E]:", "def download(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.download(name, self.l2.get_path(name))\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return self.l2.load(name)\n logging.debug(f'{name} cache miss')\n return None # Cache Miss", "def getCache(self, key):\n return self._cache.get(key, None)", "def get_output_from_cache(name, filename):\n cache_filename = _get_cache_filename(name, filename)\n if (os.path.exists(cache_filename) and\n os.path.getmtime(filename) < os.path.getmtime(cache_filename)):\n with io.open(cache_filename) as f:\n return f.read()\n\n return None", "def get(self, name):\r\n return self._registry[name]", "def _cache_get(self, metric_name):\n try:\n with self._lock:\n metric = self.__cache.get(metric_name, False)\n except KeyError:\n # When metrics expire, we still get a KeyError.\n metric = False\n if metric is False:\n return None, False\n else:\n return metric, True", "def _find_cache():\n app = _find_app()\n return app.cache", "def get_symbol(self, name): # pylint: disable=no-self-use,unused-argument\n if name in self._symbol_cache:\n return self._symbol_cache[name]\n return None", "def get_json_from_cache(file_name):\n result = None\n path = clean_path(file_name)\n cached_file_name = get_cached_file_name(path)\n if os.path.exists(cached_file_name):\n time = os.path.getmtime(path)\n cached_time = os.path.getmtime(cached_file_name)\n if cached_time > time:\n try:\n source = open(cached_file_name, \"r\")\n try:\n result = json.load(source)\n except ValueError:\n pass\n source.close()\n except OSError:\n # Includes IOError\n pass\n return result", "def get_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n return self.data[\"dataset\"][name]\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def get_cli_cache(name, recursive=False):\n if not is_cli():\n return None\n\n # List cached values candidates\n timestamp = _time()\n candidates = {}\n for filename in _listdir(CACHE_DIR):\n path = _join(CACHE_DIR, filename)\n cached_name, expiry = filename.rsplit('_', 1)\n\n # Remove expired cached files\n if int(expiry) < timestamp:\n try:\n _remove(path)\n continue\n except OSError: # pragma: no cover\n # Should never raise, May be already removed by another accelpy\n # instance\n continue\n\n # Memorize candidates cached files\n candidates[cached_name] = path\n\n if not candidates:\n return\n\n # Get cached value, or return None\n if recursive:\n names = []\n while name and not name.endswith('|'):\n names.append(name)\n name = name[:-1]\n names.append(name)\n\n else:\n names = name,\n\n for hashed_name in (hash_cli_name(name) for name in names):\n try:\n return json_read(candidates[hashed_name])\n except KeyError:\n continue", "def get(self, path):\n\t\treturn self.cache.get(path)", "def get(name, key, default=None, **options):\n\n return get_component(CachingPackage.COMPONENT_NAME).get(name, key,\n default=default, **options)", "def load_cache(name, typ=\"pkl\"):\n filename = cache_name(name, typ)\n if typ == \"str\":\n with open(filename, 'r') as fin:\n return fin.read()\n elif typ == \"pkl\":\n with open(filename, 'rb') as fin:\n return pickle.load(fin)\n elif typ == \"h5\":\n import keras\n return keras.models.load_model(filename)\n else:\n raise ValueError(\"Invalid type '{}'.\".format(typ))", "def get_cache(url='memory://'):\n\n parsed = parse.urlparse(url)\n backend = parsed.scheme\n\n query = parsed.query\n # NOTE(flaper87): We need the following hack\n # for python versions < 2.7.5. Previous versions\n # of python parsed query params just for 'known'\n # schemes. This was changed in this patch:\n # http://hg.python.org/cpython/rev/79e6ff3d9afd\n if not query and '?' in parsed.path:\n query = parsed.path.split('?', 1)[-1]\n parameters = parse.parse_qsl(query)\n kwargs = {'options': dict(parameters)}\n\n mgr = driver.DriverManager('neutron.openstack.common.cache.backends', backend,\n invoke_on_load=True,\n invoke_args=[parsed],\n invoke_kwds=kwargs)\n return mgr.driver", "def lookup_by_name(cls, name):\n return cls.__by_name[name]" ]
[ "0.7070603", "0.70663273", "0.6774853", "0.65753937", "0.65753937", "0.6561623", "0.6553643", "0.65430456", "0.6463009", "0.64330125", "0.6423352", "0.6407644", "0.63443184", "0.63150007", "0.63098884", "0.62563205", "0.6237036", "0.6201351", "0.6185012", "0.6158534", "0.6157325", "0.61189646", "0.6117408", "0.61031014", "0.60773176", "0.60353297", "0.60194975", "0.60130113", "0.59942824", "0.5989194" ]
0.8160379
0
clears a cache with given name.
def clear(name): get_component(CachingPackage.COMPONENT_NAME).clear(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_cache(name, region, namespace):\n cache = get_cache(name, region, namespace)\n cache.clear()", "def clear(self, name=None):\n if name is None: # Delete whole cache\n try:\n os.unlink(self.filepath)\n except OSError:\n pass\n return\n elif name in self.caches:\n sql = u'DROP TABLE `{}`'.format(name)\n with self.conn as c:\n c.execute(sql)\n return\n else:\n raise ValueError('store not found : {!r}'.format(name))", "def cache_clear():\n # type: () -> None\n with Cache(CACHE_URI) as c:\n c.clear()", "def _cache_drop(self, metric_name):\n with self._lock:\n del self.__cache[metric_name]", "def clear_cache():\n cache = Cache()\n cache.reset()", "def delete_cache(self, dbname=None, system=None):\n # Use flush_all, which\n # expire all data currently in the memcache servers.\n self.memcache.flush_all()", "def cache_drop(self, metric_name):\n self._cache_drop(metric_name)", "def cache_clear():\n # type: () -> None\n with Cache() as c:\n c.clear()", "def delete_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n self.data[\"dataset\"].pop(name)\n self.update_categories()\n self.write_data_cache(self.data)\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def clear_cache():\n # TODO\n pass", "def clear_cache(self):\n requests.get(url=self.proxy_url+'/clear_cache')", "def remove_mockcache(connection_name):\n try:\n mockcache_dir = get_mockcache_dir(connection_name)\n except ValueError:\n return\n file_list = glob.glob(os.path.join(mockcache_dir, '*'))\n for _file in file_list:\n os.remove(_file)\n os.rmdir(mockcache_dir)", "def clear_cache():\n os.remove(CACHE_FILE)", "def erase_cache_layer(self, layer_key_name):\n\n self.cache.reset_cache_layer(layer_key_name)", "def destroy_cache():\n # TODO\n pass", "def drop_cache(self, filename=None):\n os.remove(self._cache_filename(filename))", "def delete_cache(self, key):\n self.r.delete(key)", "def clear_cache():\n if os.path.exists(get_cachedir()):\n for filename in os.listdir(get_cachedir()):\n if not filename.endswith('.cache'):\n continue\n\n path = os.path.join(get_cachedir(), filename)\n os.unlink(path)", "def clear_cache(self):\n pass", "def clear(self, name):\n pass", "def clear(self, cacheDir):", "def delete_from_cache(self, cache_key, cache_index):\n try:\n del MEM_CACHE[cache_key.lower()][cache_index]\n except KeyError:\n pass", "def delete(self, cache_key):\r\n pass", "def clear_cache(self):\n for fle in self.cache_location.glob(\"*.pickle\"):\n fle.unlink()", "def cache_clear(self):\n\t\tself.__cache = {}", "def clearcache():\n g.pafs = {}\n g.streams = {}\n g.url_memo = collections.OrderedDict()\n dbg(\"%scache cleared%s\", c.p, c.w)\n g.message = \"cache cleared\"", "def clear_cache():\n path = join(\"data\", \"cache\")\n file_list = os.listdir(path)\n file_list.remove(\".gitkeep\") # Exclude .gitkeep\n for filename in file_list:\n os.remove(join(path, filename))", "def clear_cache():\n run(\"rm -rf ~/public_html/var/cache/mage*\")\n run(\"redis-cli FLUSHALL\")", "def clear(self, key):\n cache_key = make_key('queue-' + self.name, key)\n memcache.delete(cache_key)", "def clear(self, k=None, value=None):\n if k is not None:\n kk = self._checkIndex(k)\n for j, cache in enumerate(self.caches):\n if k is None or j == kk:\n if value is None:\n cache.clear()\n while cache.count(value):\n cache.remove(value)" ]
[ "0.7740158", "0.7638491", "0.7009554", "0.67994934", "0.67422235", "0.67090124", "0.6701614", "0.6690846", "0.6683314", "0.6680682", "0.66635156", "0.66617554", "0.664632", "0.6569245", "0.65159833", "0.64994127", "0.6455329", "0.6447904", "0.6433137", "0.64108324", "0.6408189", "0.6297961", "0.6294362", "0.6215967", "0.61995894", "0.6150829", "0.6117879", "0.6082972", "0.6079376", "0.603503" ]
0.7696294
1
gets all available cache names.
def get_cache_names(): return get_component(CachingPackage.COMPONENT_NAME).get_cache_names()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_cache_names(self) -> list:\n conn = await self.random_node()\n return await cache_get_names_async(conn)", "def get_init_all_names(self) -> list[str]:\n names = {self.client.name, self.client.alias_name}\n if self.service_resource:\n names.add(self.service_resource.name)\n names.add(self.service_resource.alias_name)\n for waiter in self.waiters:\n names.add(waiter.name)\n for paginator in self.paginators:\n names.add(paginator.name)\n\n result = list(names)\n result.sort()\n return result", "def list_cached():\n for json_name in cached_files():\n source_name = get_source_file_name(json_name)\n yield (json_name, source_name)", "def get_all(self):\r\n ret = []\r\n for cache_name, stat in self.stats_per_cache.items():\r\n ret.append({\r\n 'cache_name': cache_name,\r\n 'num_hits': len(stat.hit_targets),\r\n 'num_misses': len(stat.miss_targets),\r\n 'hits': stat.hit_targets,\r\n 'misses': stat.miss_targets\r\n })\r\n return ret", "def _get_all_cache_files(self):\n files = set()\n dir_tree = os.walk(self.config.get('cachedir', self.CACHEDIR))\n for dirpath, _, filenames in dir_tree:\n for file_name in filenames:\n if 'cache' in file_name:\n files.add(os.path.join(dirpath, file_name))\n return files", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def get_all(self, name):\n\t\tpass", "def get_request_candidates(self):\n return os.listdir(self.cache_dir_)", "def _list_dir(self):\n return [os.path.join(self.cache_dir, fn)\n for fn in os.listdir(self.cache_dir)]", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def getUncachedGameKeys(self):\n theKeys = HashSet()\n for game in theCacheDirectory.listFiles():\n theKeys.add(game.__name__.replace(\".zip\", \"\"))\n return theKeys", "def get_crl_gnames(self):\n urls = ['uri:' + u for u in self.crl_urls]\n return self.load_gnames(urls)", "def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()", "def list_keys(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def get_cached_property_names(self): # real signature unknown; restored from __doc__\n return []", "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def getNames(self) -> List[unicode]:\n ...", "def __iter__(self):\n cache_key = \"countries:all:{}\".format(get_language())\n if cache_key in self._cached_lists:\n yield from self._cached_lists[cache_key]\n return\n\n val = cache.get(cache_key)\n if val:\n self._cached_lists[cache_key] = val\n yield from val\n return\n\n val = list(super().__iter__())\n self._cached_lists[cache_key] = val\n cache.set(cache_key, val, 3600 * 24 * 30)\n yield from val", "def list_objects(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def namelist(self):\n return []", "def all(self):\n if not self._cache:\n self.load()\n\n return self._cache", "def getMemberNames(self):\r\n # On Windows NT/2k/XP and Unix, if path is a Unicode object, the result \r\n # will be a list of Unicode objects. \r\n # Undecodable filenames will still be returned as string objects \r\n # If we don't request unicode, for example Vista may return a '?' \r\n # instead of a special character. The name would then be unusable to\r\n # build a distinct URL that references this resource.\r\n\r\n nameList = []\r\n\r\n for item in self.nibbler.listdir(self.path):\r\n name = to_str(item.name)\r\n nameList.append(name)\r\n\r\n for item in self.provider.cache_fs.get_dir_content(self.path):\r\n if item not in nameList:\r\n nameList.append(to_str(item))\r\n\r\n #this magic does not allow load the whole content for crazy Finder on MacOS\r\n magic_files = ['.ql_disablecache', '.ql_disablethumbnails']\r\n if nameList:\r\n for magic_file in magic_files:\r\n if magic_file not in nameList:\r\n f_obj = FSItem(magic_file, is_dir=False) \r\n self.provider.cache_fs.put(os.path.join(self.path, magic_file), f_obj)\r\n nameList.append(magic_file)\r\n\r\n return nameList", "async def _multi_get(self, keys, encoding=\"utf-8\"):\n return [SimpleMemoryBackend._cache.get(key) for key in keys]", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def get_cache(name):\n\n return get_component(CachingPackage.COMPONENT_NAME).get_cache(name)", "def find_cache_files():\n files = []\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"*.pyc\"):\n files.append(os.path.join(root, filename))\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"__pycache__\"):\n files.append(os.path.join(root, filename))\n\n return files", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()" ]
[ "0.8306477", "0.63981295", "0.63402134", "0.61634046", "0.61542654", "0.61251444", "0.5971529", "0.59659857", "0.5958396", "0.58481795", "0.5840008", "0.58350694", "0.5829402", "0.57440615", "0.573535", "0.57182413", "0.5714329", "0.5707662", "0.567821", "0.5675578", "0.56313896", "0.56153685", "0.5588408", "0.5579081", "0.5573567", "0.5573567", "0.5570537", "0.554767", "0.5520109", "0.5520109" ]
0.8020084
1
gets statistic info of all caches.
def get_all_stats(): return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all(self):\r\n ret = []\r\n for cache_name, stat in self.stats_per_cache.items():\r\n ret.append({\r\n 'cache_name': cache_name,\r\n 'num_hits': len(stat.hit_targets),\r\n 'num_misses': len(stat.miss_targets),\r\n 'hits': stat.hit_targets,\r\n 'misses': stat.miss_targets\r\n })\r\n return ret", "def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]", "def stats():\n global CACHE, STATS_MISSES, STATS_HITS, STATS_KEYS_COUNT\n memory_address = \"0x\" + str(\"%X\" % id( CACHE )).zfill(16)\n return {'cache_memory_address': memory_address,\n 'hits': STATS_HITS,\n 'misses': STATS_MISSES ,\n 'keys_count': STATS_KEYS_COUNT,\n }", "def get_cache_stats():\n hostnames = get_memcached_hosts()\n\n if not hostnames:\n return None\n\n all_stats = []\n\n for hostname in hostnames:\n try:\n host, port = hostname.split(':')\n except ValueError:\n # Assume this is a hostname without a port.\n socket_af = socket.AF_INET\n host = hostname\n port = 11211\n\n if host == 'unix':\n socket_af = socket.AF_UNIX\n connect_param = port\n else:\n socket_af = socket.AF_INET\n connect_param = (host, int(port))\n\n s = socket.socket(socket_af, socket.SOCK_STREAM)\n\n try:\n s.connect(connect_param)\n except socket.error:\n logger.error('Unable to connect to \"%s\"' % hostname)\n s.close()\n continue\n\n s.send(b'stats\\r\\n')\n data = s.recv(2048).decode('ascii')\n s.close()\n\n stats = {}\n\n for line in data.splitlines():\n info = line.split(' ')\n\n if info[0] == 'STAT' and len(info) == 3:\n try:\n value = int(info[2])\n except ValueError:\n value = info[2]\n\n stats[info[1]] = value\n\n if stats['cmd_get'] == 0:\n stats['hit_rate'] = 0\n stats['miss_rate'] = 0\n else:\n stats['hit_rate'] = 100 * stats['get_hits'] / stats['cmd_get']\n stats['miss_rate'] = 100 * stats['get_misses'] / stats['cmd_get']\n\n all_stats.append((hostname, stats))\n\n return all_stats", "def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()", "def stats(self):\n return super(NoneCache, self).stats()", "def stats(self):\n ret = super(DiskCache, self).stats()\n ret[\"root\"] = (self.__env.stat(),)\n for name, database in self.__databases.items():\n with self.__env.begin(database, write=False) as txn:\n ret[name] = txn.stat(database)\n\n return ret", "def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()", "def testStats(self):\n\n stats = memcache.get_stats()\n self.assertEqual(\n set(['hits', 'items', 'bytes', 'oldest_item_age', 'misses',\n 'byte_hits']),\n set(stats.keys()))", "def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)", "def cache_stats(request, template_name=\"admin/cache_stats.html\"):\n cache_stats = get_cache_stats()\n\n return render_to_response(template_name, RequestContext(request, {\n 'cache_hosts': cache_stats,\n 'cache_backend': cache.__module__,\n 'title': _(\"Server Cache\"),\n 'root_path': settings.SITE_ROOT + \"admin/db/\"\n }))", "def get_host_stats(self, refresh=False):", "def generate_statistics():\r\n statistics = cache.get('statistics')\r\n if statistics is None:\r\n statistics = {}\r\n statistics['nr_hashtags'] = ('Number of Hashtags',\r\n get_number_hashtags())\r\n statistics['nr_tokens'] = ('Number of Tokens', get_token_count())\r\n statistics['media_storage_size'] = ('Storage Folder Size (MB)',\r\n str(get_folder_size(\r\n cfg['media_storage'])))\r\n\r\n cache.set('statistics', statistics,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return statistics", "def get_info(self):\n hits, misses, cacheSizeBytes, cacheSize = (\n self.hits,\n self.misses,\n self.__get_cache_size(),\n len(self.__recentAccessed),\n )\n filled = cacheSizeBytes / self.__maxSize\n\n return {\n \"hits\": hits,\n \"misses\": misses,\n \"cacheSize\": {\"bytes\": cacheSizeBytes, \"items\": cacheSize},\n \"filled\": filled,\n }", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def stats(self):\n pass", "def getMemoizeStats():\n\n STAT_STRING = \\\n \"Number of functions which used memoizing: %(numFuncs)s\\n\" \\\n \"Number of unique function values recorded: %(numValues)s\"\n\n info = STAT_STRING % dict(\n numFuncs = len( _memoizedFunctions ),\n numValues = sum( [ len(list(i.cache.keys())) \\\n for i in _memoizedFunctions ] ),\n )\n\n return info", "def get_cache_info(self):\n\t\tdb_cursor = self.cache.query_source(self.name,\n\t\t\t[\"count(*)\", \"min(COLLECTED_DATE)\", \"max(COLLECTED_DATE)\"])\n\t\t(count, min_date, max_date) = db_cursor.fetchone()\n\t\treturn self.name, str(count), str(min_date), str(max_date)", "def get_stats(self):\n return self.manager.get_stats(self)", "def get_statistics(self):\n return self.results", "def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError", "def get_stats(name):\n\n return get_component(CachingPackage.COMPONENT_NAME).get_stats(name)", "def stats(self):", "def get_cnstat(self):\n def get_counters(table_id):\n \"\"\"\n Get the counters from specific table.\n \"\"\"\n fields = [\"0\"]*BUCKET_NUM\n\n for pos, cntr_list in counter_bucket_dict.items():\n for counter_name in cntr_list:\n full_table_id = COUNTER_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = str(int(fields[pos]) + int(counter_data))\n\n cntr = NStats._make(fields)\n return cntr\n\n def get_rates(table_id):\n \"\"\"\n Get the rates from specific table.\n \"\"\"\n fields = [\"0\",\"0\",\"0\",\"0\",\"0\",\"0\"]\n for pos, name in enumerate(rates_key_list):\n full_table_id = RATES_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = float(counter_data)\n cntr = RateStats._make(fields)\n return cntr\n\n # Get the info from database\n counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP);\n # Build a dictionary of the stats\n cnstat_dict = OrderedDict()\n cnstat_dict['time'] = datetime.datetime.now()\n ratestat_dict = OrderedDict()\n if counter_port_name_map is None:\n return cnstat_dict, ratestat_dict\n for port in natsorted(counter_port_name_map):\n port_name = port.split(\":\")[0]\n if self.multi_asic.skip_display(constants.PORT_OBJ, port_name):\n continue\n cnstat_dict[port] = get_counters(counter_port_name_map[port])\n ratestat_dict[port] = get_rates(counter_port_name_map[port])\n return cnstat_dict, ratestat_dict", "def get_statistics(self):\n statistics = {\n 'entry': 0,\n 'bandwidth': 0,\n 'exit': 0,\n 'pages': 0\n }\n downloads = statistics.copy()\n \n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state'\n )\n context_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_context_state'\n )\n site = portal_state.portal()\n \n url = self.context.absolute_url().replace(site.absolute_url(), '')\n urls = []\n if url == '':\n url = '/'\n quoted_url = urllib.quote(url)\n \n urls.append(quoted_url)\n urls.append(quoted_url + '/view')\n canonical_url = urllib.quote(context_state.canonical_object_url())\n if canonical_url not in urls:\n urls.append(canonical_url)\n urls.append(canonical_url + '/view')\n\n query = 'SELECT * FROM statistics WHERE url IN %s' % str(tuple(urls))\n results = Session.execute(query).fetchall()\n if results:\n for row in results:\n for key in statistics.keys():\n statistics[key] = statistics[key] + int(row[key])\n\n results_dw = Session.execute(\n 'SELECT * FROM statistics WHERE url=\"%s/at_download%%\"' % quoted_url).fetchall()\n if results_dw:\n for row in rows_stat:\n for key in statistics.keys():\n downloads[key] = downloads[key] + int(row[key])\n statistics['downloads'] = downloads['pages']\n return statistics", "def stats(self):\r\n return {}", "def item_stats(host, port):\n\n stats = None\n try:\n mc = memcache.Client(['%s:%s' % (host, port)])\n stats = mc.get_stats()[0][1]\n except IndexError:\n raise\n finally:\n return stats", "def _retrieveCachedData(self):", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def cache():\n if request.method == 'GET':\n cache_info = in_water.cache_info()\n return json.dumps({\n 'hits': cache_info.hits,\n 'misses': cache_info.misses,\n 'maxsize': cache_info.maxsize,\n 'currsize': cache_info.currsize,\n })" ]
[ "0.7667571", "0.72230273", "0.71913105", "0.7065014", "0.6986488", "0.69252217", "0.68825775", "0.6842123", "0.6785798", "0.67494714", "0.66936606", "0.660042", "0.6566474", "0.6457031", "0.6440768", "0.63751704", "0.6336934", "0.62978095", "0.629635", "0.62528795", "0.6250231", "0.6240843", "0.61858195", "0.6181826", "0.6179083", "0.6168252", "0.61597985", "0.61410964", "0.61371297", "0.6123703" ]
0.76419413
1
saves cached items of all persistent caches into database.
def persist_all(**options): return get_component(CachingPackage.COMPONENT_NAME).persist_all(**options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_all(self):\r\n for index in range(self.count()):\r\n self.save(index)", "def save_servers(self):\n\n\t\tfor serv in self.servers:\n\t\t\ta = serv.cache.hash_map\n\t\t\twith open('cache.pickle', 'wb') as handle:\n\t\t\t\tpickle.dump(a, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self):\n if self._cache is not None:\n with open(self.cache_path, 'w') as cache_file:\n json.dump(self._cache, cache_file)", "def persist_to_datastore(self):\n\n # Start putting the experiments asynchronously.\n experiments_to_put = []\n for experiment_name in self.experiments:\n experiment_model = self.get_experiment(experiment_name)\n experiments_to_put.append(experiment_model)\n async_experiments = db.put_async(experiments_to_put)\n\n # Fetch all current counts available in memcache...\n counter_keys = []\n for experiment_name in self.experiments:\n experiment_model = self.get_experiment(experiment_name)\n counter_keys.append(experiment_model.participants_key)\n counter_keys.append(experiment_model.conversions_key)\n\n # ...and when we grab the current counts, reset the currently\n # accumulating counters at the same time.\n count_results = synchronized_counter.SynchronizedCounter.pop_counters(\n counter_keys)\n\n # Now add the latest accumulating counters to each alternative.\n alternatives_to_put = []\n for experiment_name in self.alternatives:\n\n experiment_model = self.get_experiment(experiment_name)\n alternative_models = self.get_alternatives(experiment_name)\n participants = count_results[experiment_model.participants_key]\n conversions = count_results[experiment_model.conversions_key]\n\n for alternative_model in alternative_models:\n\n # When persisting to datastore, we want to update with the most\n # recent accumulated counter from memcache.\n if alternative_model.number < len(participants):\n delta_participants = participants[alternative_model.number]\n alternative_model.participants += delta_participants\n\n if alternative_model.number < len(conversions):\n delta_conversions = conversions[alternative_model.number]\n alternative_model.conversions += delta_conversions\n\n alternatives_to_put.append(alternative_model)\n self.update_alternative(alternative_model)\n\n # When periodically persisting to datastore, first make sure memcache\n # has relatively up-to-date participant/conversion counts for each\n # alternative.\n self.dirty = True\n self.store_if_dirty()\n\n # Once memcache is done, put alternatives.\n async_alternatives = db.put_async(alternatives_to_put)\n\n async_experiments.get_result()\n async_alternatives.get_result()", "async def save_all_storage(self) -> None:\n await asyncio.gather(*(server.save_all_storages() for server in self.servers.values()))\n await self.save_all_global_storages()", "def save_cache(self):\n with open(self.get_cache_filename(), 'wb+') as f:\n out = dict()\n out['timestamp'] = self.get_last_update()\n out['cache'] = self.cache\n f.write(pickle.dumps(out))", "async def save_to_cache(self, item: T):\n path = self._build_cache_path(\n **{a: getattr(item, a) for a in self._unique_attribues}\n )\n if path.is_file():\n raise ValueError(f\"Trying to overwrite cache at {str(path)}\")\n path.parent.mkdir(parents=True, exist_ok=True)\n async with aiofiles.open(str(path), \"w\") as file:\n await file.write(item.to_json())", "def cache_save(item: str, obj: object) -> None:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tcache_create()\n\n\tpickle.dump(obj, open(cache, \"wb\"))\n\tuux.show_debug(\"Cached object to \" + cache)", "def save(self):\n\n for vm in self.vms:\n vm.save()\n\n for obj in self.objects:\n obj.save()\n\n for vol in self.volumes:\n vol.save()", "def save(searches):\n # type: (list) -> None\n with Cache(CACHE_URI) as c:\n c.set(SAVED_SEARCH, json.dumps(searches, ensure_ascii=False))", "def flush_to_disk(self):\n logger.info(\"Flushing %s queries from in-memory cache to disk\", len(self.batch_writes))\n rows = self.memory_connection.execute(f\"\"\"\n SELECT hash_id, query, raw_query, domain, intent FROM queries\n WHERE rowid IN ({\",\".join(self.batch_writes)});\n \"\"\")\n self.disk_connection.executemany(\"\"\"\n INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);\n \"\"\", rows)\n self.disk_connection.commit()\n self.batch_writes = []", "def write(self):\n\n for storage in self.storages.values():\n self.__write(storage)\n storage.clear()", "def save_db(self) -> None:", "def _save(self):\n self.logger.debug(\"Saving to persistence\")\n try:\n data = self.persistence_serialize()\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n # generate item to be persisted by gathering all variables\n # to be persisted into a dictionary\n data = {persisted_var: getattr(self, persisted_var)\n for persisted_var in self.persisted_values()}\n\n # save generated dictionary under block's id\n self._persistence.save(data, self.id())", "def save(self):\n if self._mode == 'dict':\n self._mode = 'shelve'\n self._shelve_mode = 'c'\n\n for key, value in self._dict.items():\n ckey = copy.copy(key)\n cvalue = copy.copy(value)\n self.add(ckey, cvalue, 'shelve', check=False)\n\n self._dict.clear()\n\n if self._mode == 'dict':\n self._mode = 'dict'\n self._shelve_mode = 'r'", "def save_items(self):\n raise NotImplementedError()", "def save_all(self, objects):\n self.session.add_all(objects)\n self.session.commit()", "def flush(self):\n for db in self.values():\n db.flush()", "def write_to_cache(self):\n data = {'data': self.data, 'inventory': self.inventory}\n json_data = json.dumps(data, indent=2)\n\n with open(self.cache_filename, 'w') as cache:\n cache.write(json_data)", "def flush_caches(self):\n spotify.Error.maybe_raise(\n lib.sp_session_flush_caches(self._sp_session))", "def dump(self):\n for cache_set in self.cache_sets:\n cache_set.dump()", "def commit(self):\n for db in self.values():\n db.commit()", "def save_cached(self, static):\n self.session.query(Cached).\\\n filter(text(\"extract(year from target_date) = :year\")).\\\n params(year=self.year).\\\n delete(synchronize_session=False)\n for cdate in static.all_days:\n static_day = static.all_days[cdate]\n if static_day.base_block is not None:\n self.session.add(self.new_cache_target(static_day, static_day.base_block, 'base'))\n if static_day.vigil_block is not None:\n self.session.add(self.new_cache_target(static_day, static_day.vigil_block, 'vigil'))\n self.session.commit()", "def save(self):\n\n\t\t# Use internal time if we have one, else use the global\n\t\tiExpire = '__expire' in self.__dStore and self.__dStore['__expire'] or _muiExpire\n\n\t\t# If we have no expire time, set forever\n\t\tif iExpire == 0:\n\t\t\t_moRedis.set(self.__id, JSON.encode(self.__dStore))\n\n\t\t# Else, set to expire\n\t\telse:\n\t\t\t_moRedis.setex(self.__id, _muiExpire, JSON.encode(self.__dStore))", "def save(self):\n\n toStore = {\n key: obj.to_dict()\n for key, obj in FileStorage.__objects.items()\n }\n with open(FileStorage.__file_path, 'wt') as file:\n json.dump(toStore, file)", "def _do_flush(self, cache):\n try:\n while cache and not self._stop_flushing:\n key, value = cache.popitem()\n self._shelf[self._encode_key(key)] = value\n if cache:\n cache.clear()\n except BaseException as exception:\n self._flush_exception = exception", "def save_all(self, obj_list):\n\n for obj in obj_list:\n self.save(obj)", "def saveCacheFile(self):\n with open(self.cachePath, 'w', encoding='utf-8') as outfile:\n json.dump(self.cacheData, outfile)", "def save_data(self):\n with open(self.storage_path, 'w') as cache_file:\n json.dump(self.data, cache_file)", "def _store_cache(self):\n assert self._already_generated, \"Must generate before storing to cache\"\n\n if self.variant_unit is not None:\n logger.warning(\"Cannot cache once variant_unit has been set\")\n return\n\n try:\n os.mkdir(os.path.dirname(self._cache_key))\n except FileExistsError:\n # Easier than checking and risking race conditions\n pass\n\n with open(self._cache_key, 'w') as f:\n json.dump(self.rows, f)\n\n logger.debug(\"Stored cache to {}\".format(self._cache_key))" ]
[ "0.6491182", "0.6305364", "0.623572", "0.61513877", "0.61201674", "0.6106031", "0.604628", "0.6033563", "0.60323066", "0.5989577", "0.59685063", "0.5851815", "0.5850472", "0.5846848", "0.5825873", "0.5825709", "0.5813846", "0.5808218", "0.57953066", "0.57812065", "0.5776191", "0.576409", "0.5758883", "0.5741006", "0.5735999", "0.5733217", "0.5711757", "0.57097435", "0.5709055", "0.5707841" ]
0.664058
0
clears all caches that are required. normally, you should never call this method manually. but it is implemented to be used for clearing extended and complex caches after application has been fully loaded. to enforce that valid results are cached based on loaded packages.
def clear_required_caches(): return get_component(CachingPackage.COMPONENT_NAME).clear_required_caches()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_cache():\n # TODO\n pass", "def clear_cache(self):\n pass", "def clear_cache():\n cache = Cache()\n cache.reset()", "def cache_clear(self):\n\t\tself.__cache = {}", "def clear_all(self) -> None:\n with self._caches_lock:\n self._function_caches = {}", "def clean_cache(self):\n return", "def _clear_cache(self):\n\n self._cache = dict()", "def _clear_cache(self):\n self.cache = {}", "def clear(self, cacheDir):", "def reset_cache():\n global _CACHE\n _CACHE.clear()", "def clear_all_cache():\r\n gc.collect()\r\n wrappers = [\r\n a for a in gc.get_objects() if isinstance(a, functools._lru_cache_wrapper)\r\n ]\r\n\r\n for wrapper in wrappers:\r\n wrapper.cache_clear()", "def clear_cache(self):\n self._cache = dict()", "def clear(self):\n try:\n shutil.rmtree(self._cache_path)\n self._init_cache_path()\n except Exception:\n return", "def reset_cache(self):\n self._cache_complete = False\n self._cache = {}\n self._catcache = {}", "def reset_cache(self):\n self.izx.reset_cache()\n self.ezx.reset_cache()", "def _clean_cache(self):\n del self._cache\n self._cache = {}", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def clear_cache(self):\n\n for dataset in self._datasets:\n dataset.clear_cache()", "def cache_clear():\n # type: () -> None\n with Cache() as c:\n c.clear()", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def clear_cache(self):\n self.part_cache.clear()", "def clear_all(self):\n self.clear_redis()\n self.clear_cache()", "def _purge():\r\n _cache.clear()", "def cache_clear():\n # type: () -> None\n with Cache(CACHE_URI) as c:\n c.clear()", "def clear_cache():\n if os.path.exists(get_cachedir()):\n for filename in os.listdir(get_cachedir()):\n if not filename.endswith('.cache'):\n continue\n\n path = os.path.join(get_cachedir(), filename)\n os.unlink(path)", "def invalidateCaches(self):\n\n self._vertexCacheValid = False\n self._genusCacheValid = False\n self._vertexCharacteristicCacheValid = False\n self._coreCacheValid = False", "def clear_cache():\n os.remove(CACHE_FILE)", "def set_emptying_cache():\r\n from pylons import g\r\n from r2.lib.cache import SelfEmptyingCache\r\n g.cache.caches = [SelfEmptyingCache(),] + list(g.cache.caches[1:])", "def clear(self):\n self._cache = dict()" ]
[ "0.7994559", "0.7888056", "0.7651978", "0.7617424", "0.76009864", "0.75158864", "0.7514429", "0.7508485", "0.7451582", "0.7418931", "0.7418292", "0.73935676", "0.7345077", "0.733354", "0.7332456", "0.73254156", "0.73245597", "0.730606", "0.72230273", "0.7210806", "0.7198348", "0.7187966", "0.7182363", "0.71495044", "0.7126831", "0.70975107", "0.7085064", "0.703638", "0.7030687", "0.70289826" ]
0.8194667
0
Exchange ghosts values in periodic local array
def _exchange_ghosts_local_d(self, d): s_gh = self.gh_out[d] sl = [slice(None) for _ in xrange(self._dim)] sl_gh = [slice(None) for _ in xrange(self._dim)] sl[d] = slice(1 * s_gh, 2 * s_gh) sl_gh[d] = slice(-1 * s_gh, None) for v_out in self.field_out: v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)] sl[d] = slice(-2 * s_gh, -1 * s_gh) sl_gh[d] = slice(0, 1 * s_gh) for v_out in self.field_out: v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)", "def indices_and_currents_TSC_2D( charge_electron, positions_x, positions_y, velocity_x, velocity_y,\\\n x_grid, y_grid, ghost_cells, length_domain_x, length_domain_y, dt ):\n \n \n positions_x_new = positions_x + velocity_x * dt\n positions_y_new = positions_y + velocity_y * dt\n\n base_indices_x = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n base_indices_y = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n\n dx = af.sum(x_grid[1] - x_grid[0])\n dy = af.sum(y_grid[1] - y_grid[0])\n\n\n # Computing S0_x and S0_y\n ###########################################################################################\n \n # Determining the grid cells containing the respective particles\n \n x_zone = (((af.abs(positions_x - af.sum(x_grid[0])))/dx).as_type(af.Dtype.u32))\n y_zone = (((af.abs(positions_y - af.sum(y_grid[0])))/dy).as_type(af.Dtype.u32))\n\n \n # Determing the indices of the closest grid node in x direction\n\n temp = af.where(af.abs(positions_x-x_grid[x_zone]) < \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = x_zone[temp]\n\n temp = af.where(af.abs(positions_x - x_grid[x_zone]) >= \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = (x_zone[temp] + 1).as_type(af.Dtype.u32) \n\n\n # Determing the indices of the closest grid node in y direction\n\n temp = af.where(af.abs(positions_y-y_grid[y_zone]) < \\\n af.abs(positions_y-y_grid[y_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_y[temp] = y_zone[temp]\n\n temp = af.where(af.abs(positions_y - y_grid[y_zone])>=af.abs(positions_y-x_grid[y_zone + 1]))\n\n if(temp.elements()>0):\n base_indices_y[temp] = (y_zone[temp] + 1).as_type(af.Dtype.u32) \n\n # Concatenating the index list for near by grid nodes in x direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n base_indices_minus_two = (base_indices_x - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_x - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_x + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_x + 2).as_type(af.Dtype.u32) \n\n\n\n index_list_x = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_x),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n\n\n # Concatenating the index list for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n \n base_indices_minus_two = (base_indices_y - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_y - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_y + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_y + 2).as_type(af.Dtype.u32) \n\n\n index_list_y = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_y),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n # Concatenating the positions_x for determining weights for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n positions_x_5x = af.join( 0,\\\n af.join(0, positions_x, positions_x, positions_x),\\\n af.join(0, positions_x, positions_x),\\\n )\n\n positions_y_5x = af.join( 0,\\\n af.join(0, positions_y, positions_y, positions_y),\\\n af.join(0, positions_y, positions_y),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n\n distance_nodes_x = x_grid[af.flat(index_list_x)]\n\n distance_nodes_y = y_grid[af.flat(index_list_y)]\n\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx))**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy)**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy))**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_y.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle\n \n S0_x = af.tile(W_x, 1, 1, 5)\n S0_y = af.tile(W_y, 1, 1, 5)\n\n\n S0_y = af.reorder(S0_y, 0, 2, 1)\n\n\n\n #Computing S1_x and S1_y\n ###########################################################################################\n\n positions_x_5x_new = af.join( 0,\\\n af.join(0, positions_x_new, positions_x_new, positions_x_new),\\\n af.join(0, positions_x_new, positions_x_new),\\\n )\n\n positions_y_5x_new = af.join( 0,\\\n af.join(0, positions_y_new, positions_y_new, positions_y_new),\\\n af.join(0, positions_y_new, positions_y_new),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x_new) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x_new[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x_new) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x_new) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] \\\n - positions_x_5x_new[temp])/dx\\\n )\\\n )**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x_new) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp]\\\n )/dy\\\n )**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x_new) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x_new) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp])/dy\\\n )\\\n )**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_x.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle \n \n S1_x = af.tile(W_x, 1, 1, 5)\n S1_y = af.tile(W_y, 1, 1, 5)\n\n S1_y = af.reorder(S1_y, 0, 2, 1)\n\n\n ###########################################################################################\n\n # Determining the final weight matrix for currents in 3D matrix form factor\n\n\n W_x = (S1_x - S0_x) * (S0_y + (0.5 *(S1_y - S0_y)) )\n\n\n W_y = (S1_y - S0_y) * (S0_x + (0.5 *(S1_x - S0_x)) )\n\n\n ###########################################################################################\n\n\n # Assigning Jx and Jy according to Esirkepov's scheme\n\n Jx = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n Jy = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n\n\n Jx[:, 0, :] = -1 * charge_electron * (dx/dt) * W_x[:, 0, :].copy()\n Jx[:, 1, :] = Jx[:, 0, :] + -1 * charge_electron * (dx/dt) * W_x[:, 1, :].copy()\n Jx[:, 2, :] = Jx[:, 1, :] + -1 * charge_electron * (dx/dt) * W_x[:, 2, :].copy()\n Jx[:, 3, :] = Jx[:, 2, :] + -1 * charge_electron * (dx/dt) * W_x[:, 3, :].copy()\n Jx[:, 4, :] = Jx[:, 3, :] + -1 * charge_electron * (dx/dt) * W_x[:, 4, :].copy()\n \n # Computing current density using currents\n \n Jx = (1/(dx * dy)) * Jx\n\n\n Jy[:, :, 0] = -1 * charge_electron * (dy/dt) * W_y[:, :, 0].copy()\n Jy[:, :, 1] = Jy[:, :, 0] + -1 * charge_electron * (dy/dt) * W_y[:, :, 1].copy()\n Jy[:, :, 2] = Jy[:, :, 1] + -1 * charge_electron * (dy/dt) * W_y[:, :, 2].copy()\n Jy[:, :, 3] = Jy[:, :, 2] + -1 * charge_electron * (dy/dt) * W_y[:, :, 3].copy()\n Jy[:, :, 4] = Jy[:, :, 3] + -1 * charge_electron * (dy/dt) * W_y[:, :, 4].copy()\n \n # Computing current density using currents\n\n Jy = (1/(dx * dy)) * Jy\n\n # Preparing the final index and current vectors\n ###########################################################################################\n \n \n # Determining the x indices for charge deposition\n index_list_x_Jx = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jx = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n\n\n currents_Jx = af.flat(Jx)\n\n # Determining the x indices for charge deposition\n index_list_x_Jy = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jy = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n \n # Flattenning the Currents array\n currents_Jy = af.flat(Jy)\n\n af.eval(index_list_x_Jx, index_list_y_Jx)\n af.eval(index_list_x_Jy, index_list_y_Jy)\n af.eval(currents_Jx, currents_Jy)\n\n\n return index_list_x_Jx, index_list_y_Jx, currents_Jx,\\\n index_list_x_Jy, index_list_y_Jy, currents_Jy", "def avalanche_slab(self, from_y, from_x, to_y, to_x):\n self.grid[from_y, from_x] -= 1\n self.grid[to_y, to_x] += 1\n \n self.avcount[self.it_num] += 1", "def Green_func(self):\n if self.bc == True:\n size = self.grid_size\n else:\n size = 2*self.grid_size\n self.Green = np.zeros([size, size])\n for x in range(len(self.Green[0])):\n for y in range(len(self.Green[1])):\n radius = np.sqrt(x**2 + y**2) \n if radius < self.soften: \n radius = self.soften\n self.Green[x, y]=1/(4 * np.pi * radius)\n if self.grid_size%2 == 0: \n self.Green[: size//2, size//2 : ] = np.flip(self.Green[: size//2, : size//2], axis = 1) # an intermittent step - the original grid has only been flipped once (2 x the original size)\n self.Green[ size//2 : , :] = np.flip(self.Green[: size//2, :], axis = 0)\n else: \n print(\"Exiting - Grid size is currently odd. Pleaset set to an even value.\")", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def update_H(self):\n self.grid.H[:, -1, :, :] = self.grid.H[:, 0, :, :]", "def update_H(self):\n self.grid.H[:, :, -1, :] = self.grid.H[:, :, 0, :]", "def update_variable_array(array,annuli,times,t,r,value):\n annulus=radius_to_annulus(r,annuli)\n annulus_start=np.sum(times[0:annulus])\n array[annulus_start+t]=value\n return ()", "def update(self):\n for i in range(self.min_y, self.max_y + 1):\n for j in range(self.min_x, self.max_x + 1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = self.lis[i-self.min_y][j-self.min_x]\n except IndexError:\n pass", "def interpolate_next(self):\n\n # Get valid ensembles\n valid_ens = self.valid_data[0, :]\n\n # Process ensembles\n n_ens = len(valid_ens)\n\n for n in np.arange(0, n_ens-1)[::-1]:\n if not valid_ens[n]:\n self.u_processed_mps[n] = self.u_processed_mps[n+1]\n self.v_processed_mps[n] = self.v_processed_mps[n+1]", "def _exchange_ghosts_mpi(self):\n for d in xrange(self._dim):\n if d in self._cutdir_list:\n self._exchange_ghosts_mpi_d(d)\n else:\n self._exchange_ghosts_local_d(d)", "def updateArrays(self):\n for channelNumber in range(0, 8):\n self.channels[channelNumber][self.currentPosition]=self._voltage_get(channelNumber)#update next element in each array\n self.currentPosition+=1\n if self.currentPosition>=self.numberOfPoints:#reset position to beginning when we hit max number of points (like rolling oscilloscope)\n self.currentPosition=0\n self.cursorXS = self.getCurrentPositionArray()\n #could also set the next points to NaN's to make a gap!", "def release_atoms(self):\r\n\t\thole_size = self.box_size/2\r\n\t\thole_left = self.box_size/2 - hole_size/2\r\n\t\thole_right = self.box_size/2 + hole_size/2\r\n\r\n\t\tx_vals = (self.pos.x > hole_left) & (self.pos.x < hole_right)\r\n\t\ty_vals = (self.pos.y > hole_left) & (self.pos.y < hole_right)\r\n\t\tindices = (self.pos.z < 0) & x_vals & y_vals\r\n\r\n\t\tescaped_count = np.sum(indices)\r\n\t\tlost_momentum = self.atom_mass*np.sum(self.vel.z)\r\n\r\n\t\t# this would look bettes as self.vel.values[:, indices] = ... , but that is actualy noticeably slower\r\n\t\tself.pos.x[indices], self.pos.y[indices], self.pos.z[indices] = *generator.uniform(hole_left, hole_right, size=(2, escaped_count)), np.full(escaped_count, self.box_size)\r\n\t\tif self.change_velocities:\r\n\t\t\t# changing the velocity makes the temperature decrease over time\r\n\t\t\tself.vel.x[indices], self.vel.y[indices], self.vel.z[indices] = generator.uniform(0, self.box_size, size=(3, escaped_count))\r\n\r\n\t\treturn escaped_count, lost_momentum", "def update_bom(self):\n bom2 = copy.copy(self.bom)\n ias1 = self.ias[self.chgs == 1]\n vs2 = copy.copy(self.vs)\n for i in ias1:\n iasc = self.ias[ np.logical_and(self.chgs==-1, self.bom[i]>0) ]\n nac = len(iasc)\n if nac > 0:\n #assert nac == 1\n j = iasc[0]\n bij = self.bom[i,j] - 1\n bom2[i,j] = bij\n bom2[j,i] = bij\n vs2[i] = vs2[i]+1; vs2[j] = vs2[j]+1\n self.bom = bom2\n self.vs = vs2", "def update_H(self):\n self.grid.H[-1, :, :, :] = self.grid.H[0, :, :, :]", "def update_E(self):\n self.grid.E[:, :, 0, :] = self.grid.E[:, :, -1, :]", "def update_E(self):\n self.grid.E[:, 0, :, :] = self.grid.E[:, -1, :, :]", "def newCurrent(BX,BY,xi,yi,expArr,t):\r\n #Go from frequency to time domain\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Find the xVals and yVals arrays\r\n xVals=xi[0]\r\n yVals=np.transpose(yi)[0]\r\n \r\n #Find dx and dy\r\n dx=xVals[1]-xVals[0]\r\n dy=yVals[1]-yVals[0]\r\n\r\n #Find the required derivatives\r\n dBXdyGrid,dBXdxGrid=np.gradient(BxTime,dx,dy)\r\n dBYdyGrid,dBYdxGrid=np.gradient(ByTime,dx,dy)\r\n\r\n #Find Jz\r\n JzGrid=dBYdxGrid-dBXdyGrid\r\n \r\n #Find divB\r\n # JzGrid=dBXdxGrid+dBYdyGrid\r\n\r\n return JzGrid", "def _exchange_ghosts_mpi_d(self, d):\n s_gh = self.gh_out[d]\n sl_l = [slice(None) for _ in xrange(self._dim)]\n sl_gh_l = [slice(None) for _ in xrange(self._dim)]\n sl_r = [slice(None) for _ in xrange(self._dim)]\n sl_gh_r = [slice(None) for _ in xrange(self._dim)]\n sl_l[d] = slice(1 * s_gh, 2 * s_gh)\n sl_gh_r[d] = slice(-1 * s_gh, None)\n sl_r[d] = slice(-2 * s_gh, -1 * s_gh)\n sl_gh_l[d] = slice(0, 1 * s_gh)\n for v_out in self.field_out:\n first_cut_dir = v_out.topology.cutdir.tolist().index(True)\n self._gh_to_l[d][...] = v_out.data[0][tuple(sl_gh_l)]\n self._gh_to_r[d][...] = v_out.data[0][tuple(sl_gh_r)]\n r_rk = v_out.topology.neighbours[1, d - first_cut_dir]\n l_rk = v_out.topology.neighbours[0, d - first_cut_dir]\n recv_r = self._comm.Irecv(\n [self._gh_from_r[d], self._gh_from_r[d].size,\n HYSOP_MPI_REAL],\n source=r_rk, tag=1234 + r_rk + 19 * d)\n recv_l = self._comm.Irecv(\n [self._gh_from_l[d], self._gh_from_l[d].size,\n HYSOP_MPI_REAL],\n source=l_rk, tag=4321 + l_rk + 17 * d)\n send_l = self._comm.Issend(\n [self._gh_to_l[d], self._gh_to_l[d].size, HYSOP_MPI_REAL],\n dest=l_rk, tag=1234 + self._comm_rank + 19 * d)\n send_r = self._comm.Issend(\n [self._gh_to_r[d], self._gh_to_r[d].size, HYSOP_MPI_REAL],\n dest=r_rk, tag=4321 + self._comm_rank + 17 * d)\n send_r.wait()\n recv_l.wait()\n v_out.data[0][tuple(sl_l)] += self._gh_from_l[d]\n send_l.wait()\n recv_r.wait()\n v_out.data[0][tuple(sl_r)] += self._gh_from_r[d]", "def update_E(self):\n self.grid.E[0, :, :, :] = self.grid.E[-1, :, :, :]", "def _triangulate_periodic(self,x):\n\n #1. Tile cell positions 9-fold to perform the periodic triangulation\n # Calculates y from x. y is (9nc x 2) matrix, where the first (nc x 2) are the \"true\" cell positions,\n # and the rest are translations\n y = make_y(x,self.L*self.grid_xy)\n\n\n #2. Perform the triangulation on y\n # The **triangle** package (tr) returns a dictionary, containing the triangulation.\n # This triangulation is extracted and saved as tri\n t = tr.triangulate({\"vertices\": y})\n tri = t[\"triangles\"]\n\n # Del = Delaunay(y)\n # tri = Del.simplices\n n_c = x.shape[0]\n\n #3. Find triangles with **at least one** cell within the \"true\" frame (i.e. with **at least one** \"normal cell\")\n # (Ignore entries with -1, a quirk of the **triangle** package, which denotes boundary triangles\n # Generate a mask -- one_in -- that considers such triangles\n # Save the new triangulation by applying the mask -- new_tri\n tri = tri[(tri != -1).all(axis=1)]\n one_in = (tri<n_c).any(axis=1)\n new_tri = tri[one_in]\n\n #4. Remove repeats in new_tri\n # new_tri contains repeats of the same cells, i.e. in cases where triangles straddle a boundary\n # Use remove_repeats function to remove these. Repeats are flagged up as entries with the same trio of\n # cell ids, which are transformed by the mod function to account for periodicity. See function for more details\n n_tri = self.remove_repeats(new_tri,n_c)\n\n # tri_same = (self.tris == n_tri).all()\n\n #6. Store outputs\n self.n_v = n_tri.shape[0]\n self.tris = n_tri\n self.Cents = x[self.tris]\n self.vs = self.get_vertex_periodic()\n\n #7. Manually calculate the neighbours. See doc_string for conventions.\n n_neigh = get_neighbours(n_tri)\n self.v_neighbours = n_neigh\n self.neighbours = self.vs[n_neigh]", "def resetParticles(self, gameState, ghost=None):\n # Particle with all ghosts in start state\n if not getattr(self, 'particles', []):\n p = tuple(gameState.getInitialAgentPosition(g) for g in\n self.ghostIndices)\n self.particles = [p] * self.numGhosts\n else:\n for p in self.particles:\n positions = list(p)\n positions[self.ghostIndices.index(ghost)] = \\\n gameState.getInitialAgentPosition(ghost)\n p = tuple(positions)", "def verletIntegration(self):\n for atom in range(0, self.numAtoms):\n \n # Update velocities\n self.atoms[atom].vx += (self.atoms[atom].fx/self.m)*self.dt\n self.atoms[atom].vy += (self.atoms[atom].fy/self.m)*self.dt\n self.atoms[atom].vz += (self.atoms[atom].fz/self.m)*self.dt\n \n \n # Update positions\n newX = self.atoms[atom].x + self.atoms[atom].vx*self.dt\n newY = self.atoms[atom].y + self.atoms[atom].vy*self.dt\n newZ = self.atoms[atom].z + self.atoms[atom].vz*self.dt\n\n # Update current positions (applying PBC)\n if newX < 0:\n self.atoms[atom].x = newX + self.lbox\n elif newX > self.lbox:\n self.atoms[atom].x = newX - self.lbox\n else:\n self.atoms[atom].x = newX\n \n if newY < 0:\n self.atoms[atom].y = newY + self.lbox\n elif newY > self.lbox:\n self.atoms[atom].y = newY - self.lbox\n else:\n self.atoms[atom].y = newY\n \n if newZ < 0:\n self.atoms[atom].z = newZ + self.lbox\n elif newZ > self.lbox:\n self.atoms[atom].z = newZ - self.lbox\n else:\n self.atoms[atom].z = newZ", "def yank(self):\r\n self.block.bucket_array.yank_cell(self)", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def edge_pressure(self):\n self.P[:,0] = self.P[:,1]\n self.P[:,-1] = self.P[:,-2]\n self.P[0,:] = self.P[1,:]\n self.P[-1,:] = self.P[-2,:]", "def major_loop(self):\r\n\r\n upper_curve = self.shape[0]-1\r\n upper_curve_length = np.sum(self.h[upper_curve] >= self.hr[upper_curve, 0])\r\n h = np.empty(2*(self.shape[0]+upper_curve_length-1)-1)*0\r\n hr = np.empty(2*(self.shape[0]+upper_curve_length-1)-1)*0\r\n m = np.empty(2*(self.shape[0]+upper_curve_length-1)-1)*0\r\n\r\n for i in range(upper_curve_length-1):\r\n pt_index = self.shape[1]-1-i\r\n h[i] = self.h[upper_curve, pt_index]\r\n hr[i] = self.hr[upper_curve, pt_index]\r\n m[i] = self.m[upper_curve, pt_index]\r\n for i in range(self.shape[0]):\r\n forc_index = self.shape[0]-1-i\r\n major_loop_index = upper_curve_length-1+i\r\n h[major_loop_index] = self.hr[forc_index, 0]\r\n hr[major_loop_index] = self.hr[forc_index, 0]\r\n m[major_loop_index] = self.m[forc_index, self.h[forc_index] >= self.hr[forc_index, 0]][0]\r\n\r\n h[self.shape[0]+upper_curve_length-2:] = self.h[0, self.h[0] >= self.hr[0, 0]]\r\n hr[self.shape[0]+upper_curve_length-2:] = self.hr[0, self.h[0] >= self.hr[0, 0]]\r\n m[self.shape[0]+upper_curve_length-2:] = self.m[0, self.h[0] >= self.hr[0, 0]]\r\n\r\n return h, hr, m", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p", "def HC_update(p_values, alpha):\n p_values = np.sort(p_values) # Make sure p-values are sorted in ascending order\n n = len(p_values) # Number of data points\n ivalues = np.arange(1, n + 1)\n #p_values = p_values[0:int(round(n/2))] # Cut-off half of the values\n HC_vec = np.sqrt(n)*(ivalues/(n+1) - p_values)/np.sqrt(p_values - p_values**2) # Calculate scores for all datapoints\n HC_vec_reduced = HC_vec[0:int(alpha*(len(HC_vec)-1))]\n max_idx = np.argmax(HC_vec_reduced)\n return HC_vec_reduced[max_idx], max_idx, HC_vec_reduced", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint" ]
[ "0.6893269", "0.54866093", "0.5398292", "0.5353562", "0.5318594", "0.5316869", "0.5310509", "0.53035486", "0.5277521", "0.520041", "0.5179516", "0.5174113", "0.5162675", "0.5162337", "0.5161567", "0.51536834", "0.51487446", "0.51144516", "0.50992984", "0.5064667", "0.5062222", "0.5048594", "0.5028387", "0.5019165", "0.50183487", "0.50116533", "0.50079083", "0.50047207", "0.49946132", "0.4979351" ]
0.6425586
1
Performs ghosts exchange locally in each direction
def _exchange_ghosts_local(self): for d in xrange(self._dim): self._exchange_ghosts_local_d(d)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exchange_ghosts_mpi(self):\n for d in xrange(self._dim):\n if d in self._cutdir_list:\n self._exchange_ghosts_mpi_d(d)\n else:\n self._exchange_ghosts_local_d(d)", "def move_ghosts(self):\n temp_ghosts = []\n for ghost in self.ghosts:\n if (self.player.x - ghost.x > 0 and\n board.board[ghost.y][ghost.x+1 if ghost.x < 34 else ghost.x] == '.' or\n board.board[ghost.y][ghost.x+1 if ghost.x < 34 else ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y][ghost.x+1] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y][ghost.x+1] = 'G'\n ghost.x += 1\n elif (self.player.x - ghost.x < 0 and\n board.board[ghost.y][ghost.x-1 if ghost.x > 0 else ghost.x] == '.'\n or board.board[ghost.y][ghost.x-1 if ghost.x > 0 else ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y][ghost.x-1] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y][ghost.x-1] = 'G'\n ghost.x -= 1\n elif (self.player.y - ghost.y > 0\n and board.board[ghost.y+1 if ghost.y < 14 else ghost.y][ghost.x] == '.'\n or board.board[ghost.y+1 if ghost.y < 14 else ghost.y][ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y+1][ghost.x] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y+1][ghost.x] = 'G'\n ghost.y += 1\n elif (board.board[ghost.y-1 if ghost.y > 0 else ghost.y][ghost.x] == '.'\n or board.board[ghost.y-1 if ghost.y > 0 else ghost.y][ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y-1][ghost.x] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y-1][ghost.x] = 'G'\n ghost.y -= 1\n temp_ghosts.append(ghost)\n self.ghosts = temp_ghosts", "def _exchange_ghosts_local_d(self, d):\n s_gh = self.gh_out[d]\n sl = [slice(None) for _ in xrange(self._dim)]\n sl_gh = [slice(None) for _ in xrange(self._dim)]\n sl[d] = slice(1 * s_gh, 2 * s_gh)\n sl_gh[d] = slice(-1 * s_gh, None)\n for v_out in self.field_out:\n v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]\n sl[d] = slice(-2 * s_gh, -1 * s_gh)\n sl_gh[d] = slice(0, 1 * s_gh)\n for v_out in self.field_out:\n v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]", "def _exchange_ghosts_mpi_d(self, d):\n s_gh = self.gh_out[d]\n sl_l = [slice(None) for _ in xrange(self._dim)]\n sl_gh_l = [slice(None) for _ in xrange(self._dim)]\n sl_r = [slice(None) for _ in xrange(self._dim)]\n sl_gh_r = [slice(None) for _ in xrange(self._dim)]\n sl_l[d] = slice(1 * s_gh, 2 * s_gh)\n sl_gh_r[d] = slice(-1 * s_gh, None)\n sl_r[d] = slice(-2 * s_gh, -1 * s_gh)\n sl_gh_l[d] = slice(0, 1 * s_gh)\n for v_out in self.field_out:\n first_cut_dir = v_out.topology.cutdir.tolist().index(True)\n self._gh_to_l[d][...] = v_out.data[0][tuple(sl_gh_l)]\n self._gh_to_r[d][...] = v_out.data[0][tuple(sl_gh_r)]\n r_rk = v_out.topology.neighbours[1, d - first_cut_dir]\n l_rk = v_out.topology.neighbours[0, d - first_cut_dir]\n recv_r = self._comm.Irecv(\n [self._gh_from_r[d], self._gh_from_r[d].size,\n HYSOP_MPI_REAL],\n source=r_rk, tag=1234 + r_rk + 19 * d)\n recv_l = self._comm.Irecv(\n [self._gh_from_l[d], self._gh_from_l[d].size,\n HYSOP_MPI_REAL],\n source=l_rk, tag=4321 + l_rk + 17 * d)\n send_l = self._comm.Issend(\n [self._gh_to_l[d], self._gh_to_l[d].size, HYSOP_MPI_REAL],\n dest=l_rk, tag=1234 + self._comm_rank + 19 * d)\n send_r = self._comm.Issend(\n [self._gh_to_r[d], self._gh_to_r[d].size, HYSOP_MPI_REAL],\n dest=r_rk, tag=4321 + self._comm_rank + 17 * d)\n send_r.wait()\n recv_l.wait()\n v_out.data[0][tuple(sl_l)] += self._gh_from_l[d]\n send_l.wait()\n recv_r.wait()\n v_out.data[0][tuple(sl_r)] += self._gh_from_r[d]", "def switch_hosts(self, t0, seed=None):\n assert len(self.extant_h) > 1, \"Error: attempted to switch between one host\"\n if seed:\n random.seed(seed)\n pick_p = random.choice(self.extant_p) # select an extant pathogen lineage at random\n pick_h = pick_p.host\n while pick_h == pick_p.host:\n pick_h = random.choice(self.extant_h)\n\n # add a node of degree size 2 to annotate host switch event in tree\n pick_p.dist = t0 - pick_p.height\n next_p = Tree(name=pick_p.name+'_m%s-%sm' % (pick_p.host.name, pick_h.name), dist=0)\n next_p.add_features(host=pick_h, height=t0)\n pick_p.up = next_p\n next_p.children = [pick_p]\n\n self.extant_p.remove(pick_p)\n self.extant_p.append(next_p)\n self.not_extant_p.append(pick_p)", "def trackGhosts(self, gameState):\n\n # Get some values that we will use later\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n noisyDistances = gameState.getAgentDistances()\n eatenBabies = self.getEatenBabies(gameState)\n\n # Track each opponent\n opponentFound = [False] * 4\n for idx in self.getOpponents(gameState):\n pos = gameState.getAgentState(idx).getPosition()\n\n # If we are close to opponents (we see them), update beliefs to one point\n if pos is not None:\n self.setBeliefs(pos, idx)\n opponentFound[idx] = True\n\n # If the teammate has eaten a ghost, update belief to initial position\n elif self.updateEatenOpponents1(gameState, idx):\n opponentFound[idx] = True\n print \"Our teammate has eaten an opponent, yeah!\"\n\n # If not, update beliefs taking into account opponents possible movements\n else:\n # elapseTime (update beliefs of opponent considering they have taken an action)\n self.elapseTime(idx)\n\n # If opponent has changed from ghost to pacman or viceversa (and haven't died), we know their x coordinate\n if self.isPacman[idx] != gameState.getAgentState(idx).isPacman:\n if self.isPacman[idx]: # Was pacman, now is ghost\n for pos in self.beliefs[idx].keys():\n if pos[0] != self.ghostLand:\n self.beliefs[idx].pop(pos, None)\n else: # Was ghost, now is pacman\n for pos in self.beliefs[idx].keys():\n if pos[0] != self.pacmanLand:\n self.beliefs[idx].pop(pos, None)\n self.beliefs[idx].normalize()\n\n # Get positions of me and my teammate\n pos0 = gameState.getAgentState(self.getTeam(gameState)[0]).getPosition()\n pos1 = gameState.getAgentState(self.getTeam(gameState)[1]).getPosition()\n # Remove impossible positions\n for p in self.beliefs[idx].keys():\n # We should see the opponents from there, if we don't they are not there\n if (pos0 is not None and util.manhattanDistance(p, pos0) <= 5) or (pos1 is not None and util.manhattanDistance(p, pos1) <= 5):\n self.beliefs[idx].pop(p, None)\n # There is still a food dot there, therefore the opponent is not there\n elif self.getFoodYouAreDefending(gameState)[p[0]][p[1]]:\n self.beliefs[idx].pop(p, None)\n # Our belief says the opponent could be a ghost when it is a pacman\n elif self.isPacman[idx] and p[0] * self.going_left < self.pacmanLand * self.going_left - 1:\n self.beliefs[idx].pop(p, None)\n # Our belief says the opponent could be a pacman when it is a ghost\n elif not self.isPacman[idx] and p[0] * self.going_left > self.ghostLand * self.going_left + 1:\n self.beliefs[idx].pop(p, None)\n\n # Calculate opponents that could have eaten the missing food\n eaters = [[], []]\n for i, pos in enumerate(eatenBabies):\n eater = []\n for idx in self.getOpponents(gameState):\n if opponentFound[idx]:\n continue\n if pos in self.beliefs[idx].keys() and self.beliefs[idx][pos] > 0:\n eater.append(idx)\n eaters[i] = eater\n if i > 1:\n break\n\n for idx in self.getOpponents(gameState):\n if not opponentFound[idx]:\n # If we are not close to opponents (we don't see them), check if only one ghost can have eaten the food\n newBelief = False\n # This dirty code just changes the ghost beliefs\n if len(eaters[0]) == 1:\n newBelief = True\n if len(eaters[1]) == 0:\n if eaters[0][0] == idx:\n self.setBeliefs(eatenBabies[0], idx)\n else:\n newBelief = False\n else: #1 || 2\n if eaters[0][0] == idx:\n self.setBeliefs(eatenBabies[0], idx)\n else:\n self.setBeliefs(eatenBabies[1], idx)\n elif len(eaters[1]) == 1:\n newBelief = True\n if len(eaters[0]) == 2:\n if eaters[1][0] == idx:\n self.setBeliefs(eatenBabies[1], idx)\n else:\n self.setBeliefs(eatenBabies[0], idx)\n else: # 0\n if eaters[1][0] == idx:\n self.setBeliefs(eatenBabies[1], idx)\n else:\n newBelief = False\n\n if not newBelief:\n # If we have not figured out the exact position, use noisy distance that we have\n self.observe(noisyDistances[idx], gameState, myPos, idx)\n\n # This is to see all the possible positions where the opponents may be, all probabilities are turned to one\n beliefs = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for idx, bel in enumerate(self.beliefs):\n for p in bel:\n if bel[p] > 0:\n beliefs[idx][p] = 1\n # beliefs[idx][p] = self.beliefs[idx][p]\n\n self.displayDistributionsOverPositions(beliefs)", "def game_loop(args):\n\n pygame.init()\n pygame.font.init()\n world = None\n tot_target_reached = 0\n num_min_waypoints = 21\n counter=0\n\n try:\n client = carla.Client(args.host, args.port)\n client.set_timeout(4.0)\n\n display = pygame.display.set_mode(\n (args.width, args.height),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n\n hud = HUD(args.width, args.height)\n world = World(client.load_world('Town01'), hud, args)\n # Changing The Map\n #world = World(client.load_world('Town03'), hud, args)\n # Town04 ,Town06 is highway | Town07 is country |Town03 default\n controller = KeyboardControl(world)\n\n if args.agent == \"Roaming\":\n agent = RoamingAgent(world.player)\n elif args.agent == \"Basic\":\n agent = BasicAgent(world.player)\n spawn_point = world.map.get_spawn_points()[0]\n agent.set_destination((spawn_point.location.x,\n spawn_point.location.y,\n spawn_point.location.z))\n else:\n agent = BehaviorAgent(world.player, behavior=args.behavior)\n\n spawn_points = world.map.get_spawn_points()\n random.shuffle(spawn_points)\n\n if spawn_points[0].location != agent.vehicle.get_location():\n destination = spawn_points[0].location\n else:\n destination = spawn_points[1].location\n\n agent.set_destination(agent.vehicle.get_location(), destination, clean=True)\n\n clock = pygame.time.Clock()\n\n while True:\n clock.tick_busy_loop(60)\n if controller.parse_events(client, world, clock):\n return\n\n # As soon as the server is ready continue!\n if not world.world.wait_for_tick(10.0):\n continue\n\n if args.agent == \"Roaming\" or args.agent == \"Basic\":\n if controller.parse_events(client, world, clock):\n return\n\n # as soon as the server is ready continue!\n world.world.wait_for_tick(10.0)\n\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n control = agent.run_step(world.player)\n control.manual_gear_shift = False\n world.player.apply_control(control)\n else:\n agent.update_information(world)\n\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n\n # Set new destination when target has been reached\n if len(agent.get_local_planner()._waypoints_queue) < num_min_waypoints and args.loop:\n agent.reroute(spawn_points)\n tot_target_reached += 1\n world.hud.notification(\"The target has been reached \" +\n str(tot_target_reached) + \" times.\", seconds=4.0)\n\n elif len(agent.get_local_planner()._waypoints_queue) == 0 and not args.loop:\n print(\"Target reached, mission accomplished...\")\n break\n\n speed_limit = world.player.get_speed_limit()\n agent.get_local_planner().set_speed(speed_limit)\n\n control = agent.run_step()\n world.player.apply_control(control)\n\n # #################################################\n # # it's my code\n # pt1_sum_ri = (0, 0)\n # pt2_sum_ri = (0, 0)\n # pt1_avg_ri = (0, 0)\n # count_posi_num_ri = 0\n #\n # pt1_sum_le = (0, 0)\n # pt2_sum_le = (0, 0)\n # pt1_avg_le = (0, 0)\n #\n # count_posi_num_le = 0\n #\n #\n # global Camera_image\n # RGB_Camera_im = cv2.cvtColor(Camera_image, cv2.COLOR_BGR2RGB)\n #\n # # Test lane dectection ,object detecion based on SSD, Yolo and Semantic Segmentation\n # #lines,size_im= lane_detectionv3(RGB_Camera_im)\n # #lines,size_im=object_detection_SSD(RGB_Camera_im)\n # #lines, size_im = object_detection_Yolo(RGB_Camera_im)\n # #lines, size_im = object_detection_mask(RGB_Camera_im)\n # #lines, size_im = lane_detectionv2(RGB_Camera_im)\n #\n # if lines is None: #in case HoughLinesP fails to return a set of lines\n # #make sure that this is the right shape [[ ]] and ***not*** []\n # lines = [[0,0,0,0]]\n # else:\n #\n # cv2.imshow('frame_size_im', size_im)\n # cv2.waitKey(1)\n # #cv2.imshow(\"test_im\", test_im) # original size image\n # #cv2.waitKey(1)\n\n #####################################################3\n # test= WorldRepresentation(world.world, world.player, args)\n # counter += 1\n # if ((counter % 10) == 0):\n # print(test.dynamic_objects())\n ##########################################################3\n\n\n finally:\n if world is not None:\n world.destroy()\n\n pygame.quit()", "def _move_ghost(self, ghost):\n pos = ghost['pos']\n new_pos = np.zeros(shape=(2,), dtype=np.float32)\n pillman = self.world_state['pillman']\n available = []\n for i in range(2, self.nactions + 1):\n update_2d_pos(self.map, pos, i, new_pos)\n if pos[0] != new_pos[0] or pos[1] != new_pos[1]:\n available.append(i)\n n_available = len(available)\n if n_available == 1:\n ghost['dir'] = available[0]\n elif n_available == 2:\n if ghost['dir'] not in available:\n if self.reverse_dir[ghost['dir'] - 2] == available[0]:\n ghost['dir'] = available[1]\n else:\n ghost['dir'] = available[0]\n else:\n rev_dir = self.reverse_dir[ghost['dir'] - 2]\n for i in range(n_available):\n if available[i] == rev_dir:\n available.pop(i)\n n_available -= 1\n break\n prods = np.zeros(n_available, dtype=np.float32)\n x = np.array(\n [pillman['pos'][0] - pos[0], pillman['pos'][1] - pos[1]], dtype=np.float32)\n norm = np.linalg.norm(x)\n if norm > 0:\n x *= 1. / norm\n for i in range(n_available):\n prods[i] = np.dot(x, self.dir_vec[available[i] - 2])\n if self.world_state['power'] == 0:\n if self.stochasticity > np.random.uniform():\n j = np.random.randint(n_available)\n else:\n # move towards pillman:\n j = np.argmax(prods)\n else:\n # run away from pillman:\n j = np.argmin(prods)\n ghost['dir'] = available[j]\n update_2d_pos(self.map, pos, ghost['dir'], pos)", "def move_to_stage_1(self, target, any_hostiles):\n # type: (RoomPosition, bool) -> None\n ordered_members = self.members_movement_order()\n\n self.log(\"Members {} moving - stage 1.\", _.pluck(ordered_members, 'name'))\n\n options = self.new_movement_opts()\n\n home = ordered_members[0].home\n origin = self.find_origin()\n\n serialized_obj = home.hive.honey.get_serialized_path_obj(origin, target, options)\n ordered_rooms_in_path = honey.get_room_list_from_serialized_obj(serialized_obj)\n\n room_path_lengths = []\n for room_name in ordered_rooms_in_path:\n room_path_lengths.push(len(serialized_obj[room_name]) - 1)\n\n members_path_positions = []\n any_member_off_path = False\n\n furthest_back_hurt_index = None\n\n for index in range(0, len(ordered_members)):\n drone = ordered_members[index]\n\n if drone.creep.hits < drone.creep.hitsMax:\n furthest_back_hurt_index = index\n\n room_index = ordered_rooms_in_path.indexOf(drone.pos.roomName)\n if not room_index:\n # if drone != ordered_members[0]:\n any_member_off_path = True\n members_path_positions.push(None)\n continue\n room_path = serialized_obj[drone.pos.roomName]\n\n path_index, moving_direction, reverse_dir = drone.creep.findIndexAndDirectionInPath(room_path)\n\n if path_index < 0:\n self.log(\"..: position ({},{}) is not within {} ({}, {}, {})\",\n drone.pos.x, drone.pos.y, room_path, path_index, moving_direction, reverse_dir)\n any_member_off_path = True\n members_path_positions.push(None)\n continue\n\n members_path_positions.push({\n 'room': room_index,\n 'path': path_index,\n 'dir': moving_direction,\n 'rev': reverse_dir,\n })\n\n if any_member_off_path:\n for i in range(len(ordered_members) - 1, -1, -1):\n member = ordered_members[i]\n\n moving_now = False\n if members_path_positions[i] is None:\n # Since the member is definitely off the path\n self.log(\"Member {} ({}) off path - individually following military path ({} -> {})..\",\n member.name, member.pos, origin, target)\n\n else:\n if member.pos.x <= 2 or member.pos.x >= 48 or member.pos.y <= 2 or member.pos.y >= 48 \\\n or _.some(member.room.look_for_in_area_around(LOOK_STRUCTURES, member.pos, 1),\n lambda s: s.destination):\n moving_now = True\n else:\n # members near members that are off path should also move, to make room available.\n for i2 in range(0, len(ordered_members)):\n other_member = ordered_members[i2]\n if members_path_positions[i2] is None \\\n and movement.chebyshev_distance_room_pos(other_member.pos, member.pos) \\\n <= len(ordered_members) + 1:\n moving_now = True\n break\n\n if moving_now:\n direction = members_path_positions[i].dir\n # key code turned from findIndexAndDirectionInPath when we're at an exit and we should\n # just say put.\n if direction != -30:\n result = member.creep.move(direction)\n member.creep.__direction_moved = direction\n if result != OK and result != ERR_TIRED:\n member.log(\"Error moving by squad path ({}.move({})): {}\",\n member.creep, direction, result)\n member.follow_military_path(origin, target, options)\n else:\n more_to_move_without_near_edge = Infinity\n # iterate backwards over every member so we can break the loop easily if any further back members are\n # too far behind.\n # ordered_members[0] is the head of the group\n any_fatigued = False\n for i in range(len(ordered_members) - 1, -1, -1):\n drone = ordered_members[i]\n\n if drone.creep.fatigue:\n any_fatigued = True\n\n # will sometimes be undefined, but that's ok since it's only used if furthest_back_hurt_index > 1\n prev_drone = ordered_members[i + 1]\n move_obj = members_path_positions[i]\n\n if drone.memory.off_path_for:\n del drone.memory.next_ppos\n del drone.memory.off_path_for\n del drone.memory.lost_path_at\n\n if more_to_move_without_near_edge <= 0 and not movement.is_edge_position(drone.pos):\n continue\n else:\n more_to_move_without_near_edge -= 1\n\n # self.log(\"[{}] regular stage1 movement in dir {}\", drone.name, move_obj.dir)\n\n # key code turned from findIndexAndDirectionInPath when we're at an exit and we should\n # just say put.\n if not move_obj and i == 0:\n drone.follow_military_path(origin, target, options)\n else:\n if furthest_back_hurt_index > i:\n drone.log(\"moving backwards to help out.\")\n if not drone.pos.isNearTo(prev_drone.pos) and any_fatigued:\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n\n if move_obj.dir != -30:\n result = drone.creep.move(move_obj.dir)\n drone.creep.__direction_moved = move_obj.dir\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\", drone.creep, move_obj.dir, result)\n\n if i != 0:\n next_member_obj = members_path_positions[i - 1]\n\n room_diff = next_member_obj['room'] - move_obj['room']\n if room_diff < 0:\n self.log(\"[{}] we're ahead - moving backwards ({})\", drone.name, move_obj.rev)\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n elif room_diff == 0:\n abs_path_diff = next_member_obj['path'] - move_obj['path']\n\n if abs_path_diff < 0:\n self.log(\"[{}] we're ahead - moving backwards ({}).\", drone.name, move_obj.rev)\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n elif room_diff == 1:\n # use the room path length to see how far we are to the edge of the room, to get an accurate\n # diff\n abs_path_diff = (next_member_obj['path'] - 4) \\\n + (room_path_lengths[move_obj['room']] - move_obj['path'])\n\n if abs_path_diff < 0:\n # room_path_lengths is an estimation, and may be off.\n abs_path_diff = next_member_obj['path']\n else:\n # just a message that we're quite far behind.\n abs_path_diff = 100\n\n self.log(\"[{}] room diff: {}, path diff: {}, pos: {}\",\n drone.name, room_diff, abs_path_diff, drone.pos)\n if abs_path_diff > 10 or (any_hostiles and abs_path_diff > 1):\n more_to_move_without_near_edge = 0\n continue\n elif abs_path_diff <= 1:\n more_to_move_without_near_edge += 1\n # TODO: move backwards to re-unite when there are hostiles.", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def server_move(heaps):\n new_heaps = heaps\n if heaps[0] >= heaps[1]:\n if heaps[0] >= heaps[2]:\n new_heaps[0] -= 1\n else:\n new_heaps[2] -= 1\n else:\n if heaps[1] >= heaps[2]:\n new_heaps[1] -= 1\n else:\n new_heaps[2] -= 1\n return new_heaps", "def worldEntrance(self, host = 0, port = 0):\n\n self.last_nearest = [ -1 , -1, -1]\n self.best_entity = [-1, -1, -1, -1, [-1, -1]]\n self.last_turn = [-1, -1, -1, [-1, -1]]\n self.turning = 0\n self.ent_to_connect = []\n self.connected = 0\n\n # message to send\n message = encodeMsg( [\"FINDNEAREST\", self.host, str(self.network_port), str(self.position[0]), str(self.position[1])] )\n\n if host:\n # the function is invoked with a special host to contact\n self.socket.sendto(message, (host, int(port)))\n\n else:\n\n try:\n f = file('entities.met', 'r')\n except:\n sys.stderr.write(\"No file for connection...\")\n globalvars.ALIVE = 0\n killAllThread()\n\n # read file\n list = f.readlines()\n \n f.close()\n \n # put randomly 10 entities\n for i in range(10):\n \n # retrieve entity\n entity = random.choice(list)\n host, stringPort = entity.split()\n port = int(stringPort)\n self.socket.sendto(message, (host, port))", "def deploy(self):\n step = 10\n for i in range(0, self.x, step): \n for j in range(0, self.y, step):\n self._place_nodes(i,j, step, max_nodes = 3)", "def can_eat_ghost(self, ghosts : list, current_level:int):\r\n\r\n pac_rect = pg.rect.Rect((self.pos[0], self.pos[1]), (self.grid_size, self.grid_size)) #< Hitbox from Pac-Man\r\n START_EAT = False\r\n ghost_cnt = 0\r\n to_last = length_ghost_list = len(ghosts)\r\n\r\n # Tim has to be in the end of the list\r\n if length_ghost_list > 4: \r\n to_last = -1 \r\n\r\n # Do following Checks for each ghost except the clone of Tim\r\n for ghost in ghosts[:to_last]:\r\n ghost_rect = pg.rect.Rect((ghost.pos[0], ghost.pos[1]), (self.grid_size, self.grid_size)) #< Hitbox from a ghost\r\n colli = pac_rect.colliderect(ghost_rect) #< Look if Pac-Man and the ghost have collided\r\n\r\n # When current level > 20 ghosts can't be eaten anymore\r\n if current_level >= 21 and self.eat_ghost:\r\n for ghost_2 in ghosts:\r\n ghost_2.cm_flag = True\r\n self.eat_ghost = False\r\n\r\n # When an energizer got eaten by Pac-Man while the ghosts still wear in frightened, make them vulnerbale for the same time again\r\n if self.energizer_flag and ghost.first == 1:\r\n ghost.first = 0\r\n ghost.frightend_frame_counter = 0\r\n\r\n # When Pac-Man ate an energizer and the ghosts are in Scatter, Chase or frightened put them in frightened mode\r\n if self.eat_ghost and (ghost.state == 's' or ghost.state == 'c' or ghost.state == 'f'):\r\n if ghost.first == 0:\r\n ghost.change_mode('f')\r\n ghost.first += 1\r\n\r\n # Check if ghost is in frightened\r\n if ghost.state == 'f':\r\n ghost_cnt += 1\r\n\r\n # When the ghost collides with Pac-Man, play the corresponding sounds, add the points, change the ghost mode and add a little extra time to the frightened mode\r\n if colli:\r\n self.play_eatghost()\r\n self.play_retreating()\r\n self.point_counter += self.count_eaten_ghost\r\n self.count_eaten_ghost += self.count_eaten_ghost\r\n ghost.change_mode('e')\r\n START_EAT = True\r\n self.frightened_frame_counter -= HALF_SEC_IN_FRAMES\r\n\r\n # When the ghost collides with Pac-Man while in Scatter or Chase, he will lose a life \r\n elif colli and (ghost.state == 'c' or ghost.state == 's') and self.eaten_counter == 0: #and False: #<And False to debugg\r\n self.play_death()\r\n self.pause_siren()\r\n self.eaten_counter += 1\r\n self.life_counter -= 1\r\n self.first_eaten = True\r\n\r\n # When no ghost from the list was in frightened mode stop the frightened mode music\r\n if ghost_cnt == 0:\r\n self.stop_powerpellet()\r\n return START_EAT", "def move_all_animals(self):\n\n y_lim, x_lim = np.shape(self.map)\n for y in range(y_lim):\n for x in range(x_lim):\n loc = y, x\n self.map[loc].migration(self.get_neighbour((y, x)))", "def update(self, player, world, deltaTime):\r\n if self.ghostPathIndex > len(self.path) - 1 or time.time() > self.lastTracked + 15 or self.firstTickScared == True:\r\n self.ghostPathIndex = 0\r\n self.firstTickScared = False\r\n\r\n plyGridX = int((player.boundingBox.pos.getX()) // world.nodeGrid.xScale) - 1\r\n plyGridY = int((player.boundingBox.pos.getY()) // world.nodeGrid.yScale) + 1\r\n ghostGridX = int((self.boundingBox.pos.getX()) // world.nodeGrid.xScale) - 1\r\n ghostGridY = int((self.boundingBox.pos.getY()) // world.nodeGrid.yScale) + 1\r\n\r\n # Reset alive if its made it\r\n if self.alive == False:\r\n self.alive = True\r\n self.scared = False\r\n\r\n if self.scared == True:\r\n # Make ghost to a random point\r\n randNode = world.nodeGrid.randomNode()\r\n self.path = world.nodeGrid.pathFind(world.nodeGrid.nodeList[ghostGridX][ghostGridY], randNode)\r\n else:\r\n try:\r\n self.path = world.nodeGrid.pathFind(world.nodeGrid.nodeList[ghostGridX][ghostGridY], world.nodeGrid.nodeList[plyGridX][plyGridY])\r\n except:\r\n print(\"error\")\r\n\r\n self.lastTracked = time.time()\r\n\r\n if self.ghostPathIndex < len(self.path) or self.lastTracked == 0:\r\n notAlive = not self.alive\r\n self.moveGhost(world, Point(self.path[self.ghostPathIndex].realPosX, self.path[self.ghostPathIndex].realPosY), (835 - (240 * self.scared) + (440 * notAlive)) * deltaTime)\r\n\r\n if BoundingBox.pointWithin(self.boundingBox, BoundingBox(Point(self.path[self.ghostPathIndex].realPosX, self.path[self.ghostPathIndex].realPosY), Point(world.nodeGrid.xScale, world.nodeGrid.yScale))):\r\n self.ghostPathIndex += 1", "def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path", "async def unwindGrid(fps):\n\n rg = homeGrid() # get a grid\n # overwrite the positions to the positions that the robots\n # are reporting\n for r in rg.robotDict.values():\n await fps.positioners[r.id].update_position()\n alpha, beta = fps.positioners[r.id].position\n r.setAlphaBeta(alpha, beta)\n\n forwardPath, reversePath = generatePath(rg)\n\n await fps.send_trajectory(reversePath)", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def test_vm_migration_across_hosts(self):\n\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def handle_timer(self):\n\n for dest in self.hosts_to_unused_ports:\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if api.current_time() != host.time_to_live] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n #Send the reachable routes (must be less than infinity)\n for dest in self.hosts_to_ports:\n if self.hosts_to_ports[dest].latency < INFINITY: \n distance_vector = self.hosts_to_ports[dest] \n host_latency = distance_vector.latency\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, host_latency)\n self.send(packet, distance_vector.port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, distance_vector.port)", "def _do_localisation(self):\n # Initiate global localization, wherein all particles are dispersed randomly through the free space in the map.\n self.global_localisation()\n\n #wait for pointcloud being destributet over the map\n count = 0\n while count < 50:\n self.rate.sleep()\n count = count + 1\n\n move_straight_count = 0\n while self.area_ellips > self.epsilon:\n range_front = []\n range_front[:20] = self.lidar_data[-20:]\n range_front[20:] = self.lidar_data[:20]\n\n obstacle_in_front = self._is_obstacle_in_front()\n if obstacle_in_front:\n # rotate to the right\n self._move(0, -0.75)\n else:\n if move_straight_count % 100 == 0:\n self._rotate_x_degrees(60, 360, True) \n\n # move straight forward\n move_straight_count = move_straight_count + 1\n self._move(0.25, 0)\n\n self._move(0, 0)\n return True", "def init_hostiles(self, number):\n for i in range(number):\n self.spaceships.append(self.hostile)\n SoundManager.add_sfx(\n self.hostile.states['exploded']['sfx'],\n self.hostile\n )", "def coalesce_hosts(self, host_node):\n c_hosts = host_node.children\n assert len(c_hosts) == 2, \"Error: function assumes binary tree\"\n h1, h2 = c_hosts\n\n # label the ancestral host node\n host_node.name = h1.name + '_' + h2.name\n\n # extract affected pathogen lineages in each descendant (H1, H2) of the host node\n p1 = filter(lambda x: x.host == h1, self.extant_p)\n p2 = filter(lambda x: x.host == h2, self.extant_p)\n\n if len(p1)>0 and len(p2) > 0 and random.uniform(0,1) < self.p_cospec:\n # cospeciation - pathogen lineages carried in H1 and H2 coalesce in host node\n # TODO: What if there are multiple pathogen lineages in H1 and/or H2?\n # Possibilities: (1) select one random pair of pathogen lineages to coalesce (only 1 cospeciation)\n # (2) every pair of pathogen lineages in H1/H2 has probability of cospeciation\n # This makes it possible for 3 or more path. lineages to coalesce at once\n # Current implementation (below) assumes (1).\n\n pick1 = random.sample(p1, 1)[0] # returns a list\n pick2 = random.sample(p2, 1)[0]\n pick1.host = host_node # relocate these pathogen lineages to ancestral host\n pick2.host = host_node\n to_coalesce = [pick1, pick2]\n self.coalesce_paths(to_coalesce, t0=host_node.height)\n\n # carry over all lineages to the ancestral host\n for node in p1+p2:\n node.host = host_node\n\n # update host lists\n self.extant_h.remove(h1)\n self.not_extant_h.append(h1)\n self.extant_h.remove(h2)\n self.not_extant_h.append(h2)\n self.extant_h.append(host_node)", "def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host", "def test_gre_loop(self):\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Addres\n #\n gre_if = VppGreInterface(self, self.pg0.local_ip4, \"1.1.1.2\")\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip4()\n\n #\n # add a route to the tunnel's destination that points\n # through the tunnel, hence forming a loop in the forwarding\n # graph\n #\n route_dst = VppIpRoute(\n self, \"1.1.1.2\", 32, [VppRoutePath(\"0.0.0.0\", gre_if.sw_if_index)]\n )\n route_dst.add_vpp_config()\n\n #\n # packets to the tunnels destination should be dropped\n #\n tx = self.create_stream_ip4(self.pg0, \"1.1.1.1\", \"1.1.1.2\")\n self.send_and_assert_no_replies(self.pg2, tx)\n\n self.logger.info(self.vapi.ppcli(\"sh adj 7\"))\n\n #\n # break the loop\n #\n route_dst.modify([VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index)])\n route_dst.add_vpp_config()\n\n rx = self.send_and_expect(self.pg0, tx, self.pg1)\n\n #\n # a good route throught the tunnel to check it restacked\n #\n route_via_tun_2 = VppIpRoute(\n self, \"2.2.2.2\", 32, [VppRoutePath(\"0.0.0.0\", gre_if.sw_if_index)]\n )\n route_via_tun_2.add_vpp_config()\n\n tx = self.create_stream_ip4(self.pg0, \"2.2.2.3\", \"2.2.2.2\")\n rx = self.send_and_expect(self.pg0, tx, self.pg1)\n self.verify_tunneled_4o4(self.pg1, rx, tx, self.pg0.local_ip4, \"1.1.1.2\")\n\n #\n # cleanup\n #\n route_via_tun_2.remove_vpp_config()\n gre_if.remove_vpp_config()", "def island_migration(self):\n for y in self.island_map:\n for cell in y:\n cell.migration()\n\n for y in self.island_map:\n for cell in y:\n for animal in cell.population:\n animal.has_moved = False", "def run(self):\n for cell in self.grid.each_cell():\n neighbors = []\n if cell.north:\n neighbors.append(cell.north)\n if cell.east:\n neighbors.append(cell.east)\n if neighbors:\n neighbor = random.choice(neighbors)\n if neighbor:\n cell.link(neighbor)\n return self.grid", "def teleport(self, agent_host, move_up):\n\n move_by = 4\n if move_up:\n tel_y= self.curr_y+move_by\n else:\n tel_y= self.curr_y-move_by\n tp_command = \"tp {} {} {}\".format(self.curr_x,tel_y,self.curr_z)\n #print(\"X,Y,Z----: {},{},{}\".format(self.curr_x,tel_y,self.curr_z))\n return tp_command\n '''agent_host.sendCommand(tp_command)\n good_frame = False\n start = timer()\n while not good_frame:\n world_state = agent_host.getWorldState()\n if not world_state.is_mission_running:\n print \"Mission ended prematurely - error.\"\n exit(1)\n if not good_frame and world_state.number_of_video_frames_since_last_state > 0:\n frame_x = world_state.video_frames[-1].xPos\n frame_z = world_state.video_frames[-1].zPos\n if math.fabs(frame_x - teleport_x) < 0.001 and math.fabs(frame_z - teleport_z) < 0.001:\n good_frame = True\n end_frame = timer()'''" ]
[ "0.69767755", "0.65840703", "0.6523343", "0.6032678", "0.5722102", "0.56207675", "0.5510207", "0.54849404", "0.5424678", "0.5414342", "0.5414342", "0.5345978", "0.5329319", "0.532052", "0.5301776", "0.5295316", "0.52799314", "0.5223032", "0.5206781", "0.5198322", "0.5179556", "0.51412094", "0.5124417", "0.5119338", "0.5118045", "0.5074472", "0.5045602", "0.50424075", "0.5025199", "0.5021071" ]
0.7840964
0
Performs ghosts exchange either locally or with mpi communications in each direction
def _exchange_ghosts_mpi(self): for d in xrange(self._dim): if d in self._cutdir_list: self._exchange_ghosts_mpi_d(d) else: self._exchange_ghosts_local_d(d)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exchange_ghosts_mpi_d(self, d):\n s_gh = self.gh_out[d]\n sl_l = [slice(None) for _ in xrange(self._dim)]\n sl_gh_l = [slice(None) for _ in xrange(self._dim)]\n sl_r = [slice(None) for _ in xrange(self._dim)]\n sl_gh_r = [slice(None) for _ in xrange(self._dim)]\n sl_l[d] = slice(1 * s_gh, 2 * s_gh)\n sl_gh_r[d] = slice(-1 * s_gh, None)\n sl_r[d] = slice(-2 * s_gh, -1 * s_gh)\n sl_gh_l[d] = slice(0, 1 * s_gh)\n for v_out in self.field_out:\n first_cut_dir = v_out.topology.cutdir.tolist().index(True)\n self._gh_to_l[d][...] = v_out.data[0][tuple(sl_gh_l)]\n self._gh_to_r[d][...] = v_out.data[0][tuple(sl_gh_r)]\n r_rk = v_out.topology.neighbours[1, d - first_cut_dir]\n l_rk = v_out.topology.neighbours[0, d - first_cut_dir]\n recv_r = self._comm.Irecv(\n [self._gh_from_r[d], self._gh_from_r[d].size,\n HYSOP_MPI_REAL],\n source=r_rk, tag=1234 + r_rk + 19 * d)\n recv_l = self._comm.Irecv(\n [self._gh_from_l[d], self._gh_from_l[d].size,\n HYSOP_MPI_REAL],\n source=l_rk, tag=4321 + l_rk + 17 * d)\n send_l = self._comm.Issend(\n [self._gh_to_l[d], self._gh_to_l[d].size, HYSOP_MPI_REAL],\n dest=l_rk, tag=1234 + self._comm_rank + 19 * d)\n send_r = self._comm.Issend(\n [self._gh_to_r[d], self._gh_to_r[d].size, HYSOP_MPI_REAL],\n dest=r_rk, tag=4321 + self._comm_rank + 17 * d)\n send_r.wait()\n recv_l.wait()\n v_out.data[0][tuple(sl_l)] += self._gh_from_l[d]\n send_l.wait()\n recv_r.wait()\n v_out.data[0][tuple(sl_r)] += self._gh_from_r[d]", "def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)", "def object_communicator():\n comm = MPI.COMM_WORLD", "def send_to_other_clients(self, msg):\r\n # get_nearby_roles\r\n role_id_2_ins = self.server.aoi_mgr.get_nearby_roles(self.role)\r\n\r\n for role_id in role_id_2_ins.keys():\r\n if role_id == self.client_pc_id:\r\n continue\r\n small_server = self.server.pc_id_2_small_server[role_id]\r\n small_server.send_to_client(msg)", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def implement_protocol(bridge_network, LAN_network):\n\n\tspawned = [] # stores messages that have been sent in each iteration\n\treceived = [] # stores received messages in each iteration\n\n\tcurr_time = 0\n\n\t# initial message spawned by each bridge.\n\tfor bridge in bridge_network:\n\t\tmsg = Message(bridge.bridge_id, 0, bridge, -1, None)\n\t\tspawned.append(msg)\n\n\t# first iteration.\n\t\"\"\"\n\tPops each message in the spawned list(could've been implemented\n\tas a queue) and simulates the sending using the SendMessage() function\n\t(defined in helpers.py).\n\t\"\"\"\n\twhile spawned:\n\t\tm = spawned.pop(0) \n\t\treceived_by_set = SendMessage(m, bridge_network, LAN_network)\n\n\t\tfor message in received_by_set:\n\t\t\treceived.append(message)\n\n\t\tcurr_time += 1\n\n\t# subsequent iterations\n\twhile True:\n\n\t\t# clear out spawned\n\t\tspawned = []\n\n\t\twhile received:\n\n\t\t\t# TODO: add received trace.\n\n\t\t\tm = received.pop(0)\n\t\t\tto_be_published = UpdateConfig(m, bridge_network)\n\n\t\t\tif to_be_published.root != -1:\n\t\t\t\tspawned.append(to_be_published)\n\n\t\tif not spawned:\n\t\t\tbreak\n\n\t\twhile spawned:\n\t\t\tm = spawned.pop(0)\n\n\t\t\treceived_by_set = SendMessage(m, bridge_network, LAN_network)\n\n\t\t\tfor message in received_by_set:\n\t\t\t\treceived.append(message)\n\n\t\tcurr_time += 1", "def _forward(self, messages):\n assert isinstance(messages, (tuple, list))\n assert len(messages) > 0\n assert all(isinstance(message, Message.Implementation) for message in messages)\n assert all(message.community == messages[0].community for message in messages)\n assert all(message.meta == messages[0].meta for message in messages)\n\n result = False\n meta = messages[0].meta\n if isinstance(meta.destination, CommunityDestination):\n # CommunityDestination.node_count is allowed to be zero\n if meta.destination.node_count > 0:\n result = all(self._send(list(islice(meta.community.dispersy_yield_random_candidates(), meta.destination.node_count)), [message]) for message in messages)\n\n elif isinstance(meta.destination, CandidateDestination):\n # CandidateDestination.candidates may be empty\n result = all(self._send(message.destination.candidates, [message]) for message in messages)\n\n elif isinstance(meta.destination, MemberDestination):\n # MemberDestination.candidates may be empty\n result = all(self._send([candidate\n for candidate\n in self._candidates.itervalues()\n if any(candidate.is_associated(message.community, member)\n for member\n in message.destination.members)],\n [message])\n for message\n in messages)\n\n else:\n raise NotImplementedError(meta.destination)\n \n if __debug__ and not result:\n candidates = list(islice(meta.community.dispersy_yield_random_candidates(), meta.destination.node_count))\n dprint(\"_forward failed, did not send %d %s messages destinationtype %s nr candidates %d\"%(len(messages), meta.name, type(meta.destination), len(candidates)), level=\"warning\")\n return result", "def rpc_sendback(rpc_flag):\n credential = pika.PlainCredentials('guest', 'guest')\n rpc_connection = pika.BlockingConnection(pika.ConnectionParameters(\n host='localhost', port=5672, virtual_host='/', credentials=credential))\n rpc_channel = rpc_connection.channel()\n rpc_channel.queue_declare(queue=str(rpc_flag))\n #send message to the command center using basic_publish\n if rpc_flag == \"c02\":\n rpc_channel.basic_publish(exchange='', routing_key=str(\n rpc_flag), body='Drone has reached the delivery address')\n elif rpc_flag == \"c03\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has unloaded the item')\n elif rpc_flag == \"c04\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has reached the parking spot and available for next instruction')", "def comm_times_single(ns, send_host, recv_host):\n\n return run_on_hosts((send_host, recv_host),\n '''python %sape/timings/communication/mpi_run_single.py \"%s\" %s %s'''%(\n ape_dir, str(ns), send_host, recv_host))", "def _exchange_ghosts_local_d(self, d):\n s_gh = self.gh_out[d]\n sl = [slice(None) for _ in xrange(self._dim)]\n sl_gh = [slice(None) for _ in xrange(self._dim)]\n sl[d] = slice(1 * s_gh, 2 * s_gh)\n sl_gh[d] = slice(-1 * s_gh, None)\n for v_out in self.field_out:\n v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]\n sl[d] = slice(-2 * s_gh, -1 * s_gh)\n sl_gh[d] = slice(0, 1 * s_gh)\n for v_out in self.field_out:\n v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]", "def nremote(self):", "def bcast(self, msg):\n for k, v in self.peers.iteritems():\n proto = v[2]\n proto.send_obj(msg)", "def mpi_send(data, dest: int, tag: int) -> None:\n MPI.COMM_WORLD.send(data, dest=dest, tag=tag)", "def send(self, *args):\n self.clean_outbox()\n msg, tag = self.process_outgoing(args)\n req = self.mpi_comm.isend(msg, dest=self.remote_rank, tag=tag)\n self._outbox.append(req)", "def send(msg, dest=None):", "def remotes():", "def send_to_other_non_target_clients(self, msg):\r\n # get_nearby_roles\r\n role_id_2_ins = self.server.aoi_mgr.get_nearby_roles(self.role)\r\n\r\n for role_id in role_id_2_ins.keys():\r\n if role_id == self.client_pc_id or role_id == self.role.target_role.pc_id:\r\n continue\r\n small_server = self.server.pc_id_2_small_server[role_id]\r\n small_server.send_to_client(msg)", "def distribute_messages(i, j):\n i_neighbors_except_j = [k for k in edges[i] if k != j]\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n send_message(i, j, i_neighbors_except_j)\n for k in j_neighbors_except_i:\n distribute_messages(j, k)", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def send_p():\n while 1:\n if PACKET_QUEUE:\n mpkt = PACKET_QUEUE.pop()\n sendp(mpkt, iface=IFACE, loop=0) # forward spoofed packet to the victim", "def test_star_routing_connectivity(self):\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n for source in range(len(self.multiplexers)):\n for destination in range(len(self.multiplexers)):\n if destination == source:\n continue\n envelope = Envelope(\n to=self.addresses[destination],\n sender=self.addresses[source],\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexers[source].put(envelope)\n delivered_envelope = self.multiplexers[destination].get(\n block=True, timeout=10\n )\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def offline_server_garbler_phase(env, storage, num_relus):\n\n # key generation \n now = env.now\n yield env.timeout(utils.off_client_compute_keygen) # client generates key\n yield env.timeout(utils.off_client_write_key / bandwidth) # client sends key to server\n # simulate linear layers\n for i in range(len(utils.off_client_compute_he_encrypt)): # for i in range(linear layers)....\n yield env.timeout(utils.off_client_compute_he_encrypt[i]) # client encrypts random share for layer i\n yield env.timeout(utils.off_client_write_linear[i] / bandwidth) # client sends encrypted share to server\n yield env.timeout(utils.off_server_compute_he_eval[i]) # server performs linear HE op to obtain output\n yield env.timeout(utils.off_server_write_linear[i] / bandwidth) # server sends encrypted output to client\n yield env.timeout(utils.off_client_compute_he_decrypt[i]) # client decrypts output\n\n # simulate ReLU layers\n yield env.timeout(utils.off_server_compute_garble) # server garbles ReLU\n yield env.timeout(utils.off_server_compute_encode) # server encodes labels\n yield env.timeout(utils.off_server_write_garbled_c / bandwidth) # server sends garbled circuit to client\n \n # oblivious transfer protocol (protocol 4 of https://eprint.iacr.org/2016/602)\n yield env.timeout(utils.off_client_write_base_ot / bandwidth) # client sends labels (k_0, k_1)..... BASE OT\n yield env.timeout(utils.off_client_write_ext_ot_setup / bandwidth) # client sends u_i to server ..... EXT OT\n yield env.timeout(utils.off_server_write_ext_ot_send / bandwidth) # server sends (y_0, y_1) to client.. EXT OT\n yield storage.put(num_relus)", "def test_mgre(self):\n\n for itf in self.pg_interfaces[3:]:\n #\n # one underlay nh for each overlay/tunnel peer\n #\n itf.generate_remote_hosts(4)\n itf.configure_ipv4_neighbors()\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Addres\n # - Add a route via the tunnel\n #\n gre_if = VppGreInterface(\n self,\n itf.local_ip4,\n \"0.0.0.0\",\n mode=(VppEnum.vl_api_tunnel_mode_t.TUNNEL_API_MODE_MP),\n )\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip4()\n gre_if.generate_remote_hosts(4)\n\n self.logger.info(self.vapi.cli(\"sh adj\"))\n self.logger.info(self.vapi.cli(\"sh ip fib\"))\n\n #\n # ensure we don't match to the tunnel if the source address\n # is all zeros\n #\n tx = self.create_tunnel_stream_4o4(\n self.pg0,\n \"0.0.0.0\",\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n self.send_and_assert_no_replies(self.pg0, tx)\n\n #\n # for-each peer\n #\n for ii in range(1, 4):\n route_addr = \"4.4.4.%d\" % ii\n tx_e = self.create_stream_ip4(self.pg0, \"5.5.5.5\", route_addr)\n\n #\n # route traffic via the peer\n #\n route_via_tun = VppIpRoute(\n self,\n route_addr,\n 32,\n [VppRoutePath(gre_if._remote_hosts[ii].ip4, gre_if.sw_if_index)],\n )\n route_via_tun.add_vpp_config()\n\n # all packets dropped at this point\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n #\n # Add a TEIB entry resolves the peer\n #\n teib = VppTeib(\n self,\n gre_if,\n gre_if._remote_hosts[ii].ip4,\n itf._remote_hosts[ii].ip4,\n )\n teib.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - packets are GRE encapped\n #\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n\n tx_i = self.create_tunnel_stream_4o4(\n self.pg0,\n itf._remote_hosts[ii].ip4,\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # delete and re-add the TEIB\n #\n teib.remove_vpp_config()\n self.send_and_assert_no_replies(self.pg0, tx_e)\n self.send_and_assert_no_replies(self.pg0, tx_i)\n\n teib.add_vpp_config()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # bounce the interface state and try packets again\n #\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n gre_if.admin_down()\n gre_if.unconfig_ip4()", "def _send(x, dst=0):\n x = torch.tensor(x)\n x = to_device(x)\n dist.send(x, dst)\n del x \n torch.cuda.empty_cache()", "def sendTopology(self, agent, collector):\n\n info(\"**** [G2]: sending topology\\n\")\n net = self.net\n topo = {'nodes':{}, 'links':{}}\n for s in net.switches:\n topo['nodes'][s.name] = {'agent':agent, 'ports':{}}\n path = '/sys/devices/virtual/net/'\n for child in os.listdir(path):\n parts = re.match('(^.+)-(.+)', child)\n if parts == None: continue\n if parts.group(1) in topo['nodes']:\n ifindex = open(path+child+'/ifindex').read().split('\\n',1)[0]\n topo['nodes'][parts.group(1)]['ports'][child] = {'ifindex': ifindex}\n i = 0\n for s1 in net.switches:\n j = 0\n for s2 in net.switches:\n if j > i:\n intfs = s1.connectionsTo(s2)\n for intf in intfs:\n s1ifIdx = topo['nodes'][s1.name]['ports'][intf[0].name]['ifindex']\n s2ifIdx = topo['nodes'][s2.name]['ports'][intf[1].name]['ifindex']\n linkName = '%s-%s' % (s1.name, s2.name)\n topo['links'][linkName] = {'node1': s1.name, 'port1': intf[0].name, 'node2': s2.name, 'port2': intf[1].name}\n j += 1\n i += 1\n put('http://%s:8008/topology/json' % collector, json=topo)", "async def test_distributed_paillier_with_communication(\n distributed_schemes: Tuple[DistributedPaillier, ...],\n plaintext: Union[float, int],\n) -> None:\n enc = {0: distributed_schemes[0].encrypt(plaintext)}\n distributed_schemes[0].pool.async_broadcast(enc[0], \"encryption\")\n assert not enc[0].fresh\n for iplayer in range(1, len(distributed_schemes)):\n enc[iplayer] = await distributed_schemes[iplayer].pool.recv(\n \"local0\", \"encryption\"\n )\n\n dec = await asyncio.gather(\n *[\n distributed_schemes[i].decrypt(enc[i])\n for i in range(len(distributed_schemes))\n ]\n )\n assert all(d == plaintext for d in dec)", "def notify_remote_orders(connection, orders):\n \n # get sockets\n socket_in = connection[0]\n socket_out = connection[1]\n\n # deal with null orders (empty string)\n if orders == '':\n orders = 'null'\n \n # send orders\n try:\n socket_out.sendall(orders)\n except:\n raise IOError, 'remote player cannot be reached'", "def send(self):\n while True:\n for neighbor_name in self.neighbors:\n if not self.neighbors[neighbor_name].is_killed:\n if self.neighbors[neighbor_name].update_ready:\n self.send_update(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkup_ready:\n self.send_linkup(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkdown_ready:\n self.send_linkdown(self.neighbors[neighbor_name])", "def send(self, msg):\r\n\r\n # don't need to handle barrier messages\r\n if not hasattr(msg, 'command'):\r\n return\r\n\r\n subcmd = OvsSender.subcmds[msg.command]\r\n \r\n\r\n # TODO: this is different for remote switches (ie, on physical network)\r\n dest = msg.switch.name\r\n\r\n params = []\r\n if msg.match.nw_src is not None:\r\n params.append(\"nw_src={0}\".format(msg.match.nw_src))\r\n if msg.match.nw_dst is not None:\r\n params.append(\"nw_dst={0}\".format(msg.match.nw_dst))\r\n if msg.match.dl_src is not None:\r\n params.append(\"dl_src={0}\".format(msg.match.dl_src))\r\n if msg.match.dl_dst is not None:\r\n params.append(\"dl_dst={0}\".format(msg.match.dl_dst))\r\n if msg.match.dl_type is not None:\r\n params.append(\"dl_type={0}\".format(msg.match.dl_type))\r\n\r\n params.append(\"priority={0}\".format(msg.priority))\r\n actions = [\"flood\" if a == OFPP_FLOOD else str(a) for a in msg.actions]\r\n\r\n if msg.command == OFPFC_ADD:\r\n params.append(\"action=output:\" + \",\".join(actions))\r\n\r\n paramstr = \",\".join(params)\r\n cmd = \"{0} {1} {2} {3}\".format(OvsSender.command,\r\n subcmd,\r\n dest,\r\n paramstr)\r\n ret = os.system(cmd)\r\n return ret" ]
[ "0.69065416", "0.6561432", "0.57953256", "0.5653083", "0.55906194", "0.55473036", "0.5502584", "0.5496955", "0.5474431", "0.54014534", "0.53840894", "0.5377938", "0.53468025", "0.5310125", "0.5282678", "0.52745557", "0.5266758", "0.5263322", "0.52578515", "0.52422863", "0.52318954", "0.5185584", "0.5179138", "0.5162724", "0.5144504", "0.5139125", "0.5125328", "0.5113971", "0.50959074", "0.50889534" ]
0.76470697
0
Return axample of configured AnimationManager instance
def get_animManager(): NUM_LINES = 50 NUM_STEPS = 1000 STEP_MAX = 0.1 fig = plt.figure('3D Random walk example') ax = fig.gca(projection='3d') ax.set_axis_off() # Setting the axes properties d = 1 ax.set_xlim3d([0.0 - d, 1.0 + d]) ax.set_ylim3d([0.0 - d, 1.0 + d]) ax.set_zlim3d([0.0 - d, 1.0 + d]) # generating random data and 3-D lines data = [Gen_RandLine(NUM_STEPS, STEP_MAX, dims=3) for index in range(NUM_LINES)] lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data] # pass figure to animation manager mng = AnimationManager(ax, fAnim=update_lines, fargs=(data, lines), numFramesModif=NUM_STEPS) # set some initial parameters mng.dlg.spinBox_period_modif.setValue(30) return mng
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSampler(self, *args):\n return _osgAnimation.Channel_getSampler(self, *args)", "def get_animator(self):\n \n return graphical.Animator(self)", "def __call__(self, *args):\n return _osgAnimation.AnimationManagerBase___call__(self, *args)", "def get_sampler(self):\n return self.sampler", "def _animation_init(self):\n\n self.animation_ax.set_xlim(self.plant.workspace_range[0][0],\n self.plant.workspace_range[0][1])\n self.animation_ax.set_ylim(self.plant.workspace_range[1][0],\n self.plant.workspace_range[1][1])\n self.animation_ax.set_xlabel(\"x position [m]\")\n self.animation_ax.set_ylabel(\"y position [m]\")\n for ap in self.animation_plots[:-1]:\n ap.set_data([], [])\n self.animation_plots[-1].set_text(\"t = 0.000\")\n\n self.tau_arrowarcs = []\n self.tau_arrowheads = []\n for link in range(self.plant.n_links):\n arc, head = get_arrow(radius=0.001,\n centX=0,\n centY=0,\n angle_=110,\n theta2_=320,\n color_=\"red\")\n self.tau_arrowarcs.append(arc)\n self.tau_arrowheads.append(head)\n self.animation_ax.add_patch(arc)\n self.animation_ax.add_patch(head)\n\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def registerAnimation(self, *args):\n return _osgAnimation.AnimationManagerBase_registerAnimation(self, *args)", "def get_animated(self):\n return self.animated", "def get_samplerate(self):\n\t\treturn _PM_UPDATE_RATE / self.output_decimation", "def animate(self, *args, **kwargs):\n i = ImageAnimatorWCS(self.data, wcs=self.axes_wcs, *args, **kwargs)\n return i", "def findAnimation(self, *args):\n return _osgAnimation.BasicAnimationManager_findAnimation(self, *args)", "def get_ammos(self):\n return self.__ammos", "def _init_anim(self):\n pass", "def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]", "def update(self, *args):\n return _osgAnimation.BasicAnimationManager_update(self, *args)", "def update(self, *args):\n return _osgAnimation.AnimationManagerBase_update(self, *args)", "def get_estimate(self, mag=False):\n if mag is False:\n return super(PhotoSamplers,self).get_estimate()\n \n return self._magsamples.get_estimate()", "def _define_amplitude(self):\n self.amplitude = LivingAnimation(\n label=\"Amplitude\",\n initial_value=0,\n value_range={'min': 0, 'max': MAX_AMPLITUDE},\n duration_range={'min': MIN_VERT_SPEED, 'max': MAX_VERT_SPEED}\n )", "def samplerate(self):\n return self.sound.samplerate", "def libraryName(self):\n return _osgAnimation.BasicAnimationManager_libraryName(self)", "def playAnimation(self, *args):\n return _osgAnimation.BasicAnimationManager_playAnimation(self, *args)", "def getDuration(self):\n return _osgAnimation.Animation_getDuration(self)", "def sample_rate(self):\n if self.has_data():\n try:\n return round(\n 1.0\n / np.float64(\n (\n np.median(\n np.diff(self.dataset.coords[\"time\"].to_index())\n / np.timedelta64(1, \"s\")\n )\n )\n ),\n 0,\n )\n except AttributeError:\n self.logger.warning(\n \"Something weird happend with xarray time indexing\"\n )\n\n raise ValueError(\n \"Something weird happend with xarray time indexing\"\n )\n return self.run_metadata.sample_rate", "def samples(self):\n return np.full(self.duration, self._value)", "def getCalibration(self):\n self.a0 = float(self.getParameter(index=1))\n self.a1 = float(self.getParameter(index=2))\n self.a2 = float(self.getParameter(index=3))\n self.a3 = float(self.getParameter(index=4))\n status = self.getStatus()\n self.wavelength = [ self.a0 + self.a1*x + self.a2*x*x + self.a3*x*x*x \n for x in range(status.pixels)]\n if self.discardTrailingSamples > 0:\n self.wavelength = self.wavelength[:-self.discardTrailingSamples]\n if self.discardLeadingSamples > 0:\n self.wavelength = self.wavelength[self.discardLeadingSamples:]", "def plot_averaged_amplitude(self):\r\n\r\n tremap = np.arange(0, self.period, 1./self.framerate)\r\n\r\n ipy_offset = -2\r\n\r\n # find center of image\r\n\r\n self.ipx = int(self.imageData.shape[1]/2.)\r\n\r\n self.ipy = int(self.imageData.shape[2]/2.)\r\n\r\n posx = range(1, self.imageData.shape[1]-1, 2)\r\n\r\n colormap = mpl.cm.gist_ncar\r\n\r\n colx = [colormap(i) for i in np.linspace(0, 0.9, (len(posx)))]\r\n\r\n for i, px in enumerate(posx):\r\n\r\n remapped1 = self.measure_amplitude(self.ipy+ipy_offset, px)\r\n\r\n mpl.plot(tremap, remapped1/self.meanimagevalue, 'o-', markerfacecolor=colx[i], markersize=2.0)\r\n\r\n mpl.title('Averaged Amplitude Center')\r\n\r\n return (ipy_offset, posx, colx)", "def current_mean(self):\r\n values = self._timings\r\n return np.mean(values)", "def _get_interpolation(self) :\n \n return self._interpolation", "def _get_animated_artists(self):\n return tuple(a for ax_ in self.ax.get_figure().get_axes()\n for a in ax_.get_children()\n if a.get_animated() and a not in self.artists)", "def sample_rate(self):\n return self._sample_rate", "def get_recorded_audio(self):\n return self.frames" ]
[ "0.5929234", "0.58319426", "0.5712612", "0.5358319", "0.5345582", "0.5287932", "0.51266444", "0.50897235", "0.50664383", "0.50573474", "0.49518934", "0.49007067", "0.48587754", "0.48277414", "0.48074946", "0.4796288", "0.47952378", "0.47919708", "0.47805485", "0.4770601", "0.4750935", "0.4722837", "0.46999067", "0.4673331", "0.46406594", "0.4631924", "0.4572155", "0.45488346", "0.4546125", "0.4540263" ]
0.5915016
1
Creates instance of abstract memorybased persistence component.
def __init__(self): super(MemoryPersistence, self).__init__(descriptor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __new__(cls, *args, **kwargs):\n obj = super(Memory, cls).__new__(cls, *args, **kwargs)\n obj.__dict__ = cls.data\n\n return obj", "def get_instance():\n if PersistenceManager._instance is None:\n PersistenceManager._instance = PersistenceManager()\n return PersistenceManager._instance", "def Get(self):\n\n if not hasattr(self, \"_instance\"):\n self._instance = PersistenceManager()\n\n return self._instance", "def _create_db(self):\n self.db = easydms.dbcore.Database(\":memory:\")\n self.db.create_db()", "def storage_factory():\n return storage(transaction.manager, **kwargs)", "def __init__(self, memory_path):\n self.memory_path = memory_path\n self.facts = self._load()\n memory_directory = os.path.dirname(memory_path)\n os.makedirs(memory_directory, exist_ok=True)", "def create_memory_agent(reactor, pumper, server_protocol):\n # Note, we currently don't actually do any \"resource traversing\"\n # and basically accept any path at all to our websocket resource\n if server_protocol is None:\n server_protocol = WebSocketServerProtocol\n return _TwistedWebMemoryAgent(reactor, pumper, server_protocol)", "def to_in_memory(self) -> DiGraphGPKGView:\n # TODO: make into 'copy' method instead, taking path as a parameter?\n db_id = uuid.uuid4()\n path = f\"file:unweaver-{db_id}?mode=memory&cache=shared\"\n new_network = self.network.copy(path)\n return self.__class__(network=new_network)", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def instantiate(cls, data_store, identifier):\n pass", "def create_shared_memory_manager(\n address: typing.Tuple[str, int], authkey: typing.Optional[bytes]\n) -> SharedMemoryManager:\n smm = SharedMemoryManager(address=address, authkey=authkey)\n return smm", "def __create_in_memory_db_table(name, *columns, **kwargs):\n import datetime\n from sqlalchemy import Column, DateTime, CheckConstraint\n from sqlalchemy.pool import StaticPool\n from sqlalchemy.schema import Table\n from sqlalchemy.orm import registry\n from rucio.db.sqla.models import ModelBase\n from rucio.db.sqla.session import get_maker, create_engine\n\n engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)\n\n # Create a class which inherits from ModelBase. This will allow us to use the rucio-specific methods like .save()\n DeclarativeObj = type('DeclarativeObj{}'.format(name), (ModelBase,), {})\n # Create a new declarative base and map the previously created object into the base\n mapper_registry = registry()\n InMemoryBase = mapper_registry.generate_base(name='InMemoryBase{}'.format(name))\n table_args = tuple(columns) + tuple(kwargs.get('table_args', ())) + (\n Column(\"created_at\", DateTime, default=datetime.datetime.utcnow),\n Column(\"updated_at\", DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),\n CheckConstraint('CREATED_AT IS NOT NULL', name=name.upper() + '_CREATED_NN'),\n CheckConstraint('UPDATED_AT IS NOT NULL', name=name.upper() + '_UPDATED_NN'),\n )\n table = Table(\n name,\n InMemoryBase.metadata,\n *table_args\n )\n mapper_registry.map_imperatively(DeclarativeObj, table)\n # Performa actual creation of the in-memory table\n InMemoryBase.metadata.create_all(engine)\n\n # Register the new table with the associated engine into the sqlalchemy sessionmaker\n # In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed\n # during test case initialization, so there is no risk here to have concurrent calls from within the\n # same process\n senssionmaker = get_maker()\n senssionmaker.kw.setdefault('binds', {}).update({DeclarativeObj: engine})\n return DeclarativeObj", "def as_mpi_memory(cls, obj) -> MPI.memory:\n return MPI.memory.fromaddress(obj.data_ptr(), 0)", "def __new__(cls, manager, device_config, log_file_name, log_directory):\n # slowly migrate away from using 'hub_port_name' but maintain backwards compatibility\n if \"console_port_name\" not in device_config[\"persistent\"]:\n device_config[\"persistent\"][\"console_port_name\"] = \\\n device_config[\"persistent\"][\"hub_port_name\"]\n\n identifier = device_config[\"persistent\"][\"console_port_name\"]\n if identifier not in cls._instances:\n obj = super(Cambrionix, cls).__new__(cls)\n cls._instances[identifier] = obj\n\n return cls._instances[identifier]", "def new_manager() -> SyncManager:\n return Manager()", "def __init__(self) -> None:\n self.Database = Database()", "def __init__(self):\n self._datastore = dict()", "def new_datastore(self, **kwargs) -> DataStore:\n return storage.DataStore(\n self.system_params, self.param_name, self.param_vals, **kwargs\n )", "def create_storage(conf):\n _name = conf.get(\"name\", \"\")\n _cls = importer(conf['class'])\n _kwargs = conf['kwargs']\n _io = importer(_kwargs['io_class'])\n return _cls(_kwargs[\"storage_config\"], name=_name, io_class=_io)", "def initialize_sqlite_memory():\n global engine\n if engine is None:\n engine = create_engine(\"sqlite://\")\n Session.configure(bind=engine)\n _populate()", "def sess():\n engine = create_engine(\"sqlite://\", echo=True)\n with engine.begin() as conn:\n conn.execute(\"ATTACH DATABASE ':memory:' AS placebo\")\n Base.metadata.create_all(engine)\n sess = sessionmaker(bind=engine)()\n return sess", "def new_datastore(self, **kwargs):\n return storage.DataStore(self.system_params, self.param_name, self.param_vals, **kwargs)", "def storage(self) -> storage.Storage:\n raise ValueError('Not implemented.')", "def get_instance():\n \"\"\"Add more judgement for selecting more database backend\"\"\"\n return IMPL", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def persistent_stores(self):\n stores = (PersistentStore(name='storage_capacity_service_db',\n initializer='init_stores:init_storage_capacity_service_db',\n spatial=True\n ),\n )\n\n return stores", "def __init__(self):\r\n self.__memory = []", "def set_persistence(self, p):\n self.persistence = p", "def __init__(self, database_manager=DataBaseManager()):\n self.database_manager = database_manager", "def db(self):\n return DbManager(self)" ]
[ "0.6396316", "0.612985", "0.6109966", "0.60697746", "0.5990019", "0.5682012", "0.5597681", "0.55813473", "0.5535128", "0.5517559", "0.54749537", "0.54711515", "0.5453528", "0.543589", "0.54355305", "0.5423502", "0.5406449", "0.54031616", "0.5364649", "0.5349354", "0.53341067", "0.53252995", "0.53250766", "0.5302557", "0.52931696", "0.5263669", "0.52566266", "0.52364343", "0.5225789", "0.522326" ]
0.7326523
0
En el constructor tenemos self.beneficios_maquinas que es un diccionario que nos indicara cuanto ganaremos con cada maquina
def __init__(self): self.beneficios_maquinas = {"tragaperras":500 , "b_jack" : 900 , "poker" : 1000 , "baccarat": 600 , "dados":500 , "ruleta":900 , "bingo" :750 ,"carreras":700} self.catalogo = {"tragaperras":50000 , "b_jack" : 90000 , "poker" : 100000 , "baccarat": 60000 , "dados": 50000 , "ruleta":90000 , "bingo" :75000 ,"carreras":70000, "PAREDES-2" : 80000 , "PAREDES-3": 250000 , "PAREDES-4":1000000,"SUELO-2" : 80000 , "SUELO-3": 250000 , "SUELO-4":1000000,"REF-2" : 80000 , "REF-3": 250000 , "REF-4":1000000}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, sistema, nombre, espacios_de_atencion, niveles=None, capacity=float('inf'), init=0):\r\n\r\n super(MedioDeAlmacenamiento, self).__init__(sistema, capacity, init)\r\n self.nombre = nombre\r\n self.espacios_de_atencion = espacios_de_atencion\r\n self.espacios_en_uso = 0\r\n self.cola = []\r\n\r\n if niveles is not None:\r\n self.niveles = niveles\r\n if sum(self.niveles.values()) > 0:\r\n self.get(sum(self.niveles.values()))\r\n self.espacio = self.capacity - self.level", "def entrega_lista_maquina (self,maquina):\n\n if maquina == \"tragaperras\":\n return self.tragaperras\n\n elif maquina == \"b_jack\":\n return self.b_jack\n\n elif maquina == \"poker\":\n return self.poker\n\n elif maquina == \"baccarat\":\n return self.baccarat\n\n elif maquina == \"dados\":\n return self.dados\n\n if maquina == \"ruleta\":\n return self.ruleta\n\n if maquina == \"bingo\":\n return self.bingo\n\n if maquina == \"carreras\":\n return self.carreras", "def mortalidade(self):\n self.covidbr['mortalidade'] = self.covidbr['obitosAcumulado'] / \\\n (self.covidbr['populacaoTCU2019'] / (10**5))", "def __calcular_disparo_ganador(self, disparos):\n ganador = None\n participante = \"\"\n puntaje_mas_chico = 0\n for disparo in disparos:\n if puntaje_mas_chico <= disparo['puntaje_total']:\n puntaje_mas_chico = disparo['puntaje_total']\n participante = disparo\n ganador = dict(participante)\n return ganador", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def __init__(self, espec_izquierda, espec_derecha):\n self._izquierda = espec_izquierda\n self._derecha = espec_derecha\n return", "def __init__(self,cont_acc):\n self.Mecanisme = cont_acc[0]\n self.Intentionnalite = cont_acc[1]\n self.Activite_trauma = cont_acc[2]\n self.Detail_activite = cont_acc[3]\n self.Lieu = cont_acc[4]\n self.Type_lieu = cont_acc[5]\n self.Detail_Type_lieu = cont_acc[6]\n self.Ejection_vehicule = cont_acc[7]\n self.Passager_decede_mm_vehicule = cont_acc[8]\n self.AGV = cont_acc[9]\n self.Incarceration = cont_acc[10]\n self.Duree_incarceration = cont_acc[11]\n self.Cin_NSP = cont_acc[12]\n self.Ecrase_projete = cont_acc[13]\n self.Chute = cont_acc[14]\n self.Blast = cont_acc[15]\n self.Isch_du_membre = cont_acc[16]\n self.Amputation = cont_acc[17]\n self.Bassin = cont_acc[18]", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def __init__(self, nome):\n\n super().__init__(nome)\n self.preferencias = []\n self.qtd_preferencias_atendidas = 0", "def get_tarefa_mais_barata(tarefas):\n\tdict_custo_total = {}\n\ttarefa_barata = {}\n\tfor tarefa in tarefas:\n \t\ttarefa_id = tarefa['identificador']\n\t\tif not (dict_custo_total.has_key(tarefa_id)):\n\t\t\tdict_custo_total[tarefa_id] = {'tarefa': tarefa, 'custo': 0}\n\t\t\tfor outra_tarefa in tarefas:\n\t\t\t\tif (outra_tarefa['identificador'] != tarefa_id):\n\t\t\t\t\tdict_custo_total[tarefa_id]['custo'] += calcula_custo(tarefa['tempo_de_execucao'], outra_tarefa['custo_por_hora'])\n\n\t\tif (tarefa_barata == {} or (tarefa_barata['custo'] > dict_custo_total[tarefa_id]['custo'])):\n\t\t\ttarefa_barata = dict_custo_total[tarefa_id]\n\n\treturn tarefa_barata['tarefa']", "def resultadosMensuales(self):\r\n self.checkingConnection()\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''\r\n SELECT months.name, ingresos, compras, gastos,\r\n (ingresos - compras - gastos) AS Saldo FROM (\r\n\t\t\tSELECT month,\r\n ingresos, compras, gastos FROM ((SELECT Clients.month AS month,\r\n SUM(Clients.value) AS ingresos FROM Clients GROUP BY Clients.month)\r\n JOIN (SELECT Compras.month_id AS month2, SUM(Compras.value) AS compras\r\n FROM Compras GROUP BY Compras.month_id) JOIN (SELECT Gastos.month_id AS month3,\r\n SUM(Gastos.value) AS gastos FROM Gastos GROUP BY Gastos.month_id)\r\n ON month = month2 AND month2 = month3)\r\n\t\t\t) JOIN months ON month=months.id ''', self.db)\r\n # Set the empty lists\r\n self.months = []\r\n self.ingresos = []\r\n self.compras = []\r\n self.gastos = []\r\n self.total = []\r\n # Save the Query values in each list\r\n for i in range(self.model.rowCount()):\r\n # record is the row and value the column\r\n self.months.append(self.model.record(i).value(\"name\"))\r\n self.ingresos.append(self.model.record(i).value(\"ingresos\"))\r\n self.compras.append(self.model.record(i).value(\"compras\"))\r\n self.gastos.append(self.model.record(i).value(\"gastos\"))\r\n self.total.append(self.model.record(i).value(\"Saldo\"))\r\n\r\n self.setModel(self.model)\r\n # Creating the Bar Graph\r\n self.grafica(self.months)", "def getAbonos(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n\tif usuarioColaborador == \"\" and fechaInicio == \"\" and fechaFin == \"\":\n\t return self.conexion.ejecutarSQL(\"select a.id, a.fecha, a.hora, a.valor, a.id_venta, a.usuario_Colaborador, a.id_TipoPago, tP.tipo\\\n from abonos a, tipoPagos tP where a.id_tipoPago=tP.id\")\n elif usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"select a.id, a.fecha, a.hora, a.valor, a.id_venta, a.usuario_Colaborador, a.id_TipoPago, tP.tipo\\\n from abonos a, tipoPagos tP where a.id_tipoPago=tP.id \\\n and a.fecha between '%s' and '%s'\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"select a.id, a.fecha, a.hora, a.valor, a.id_venta, a.usuario_Colaborador, a.id_TipoPago, tP.tipo\\\n from abonos a, tipoPagos tP where a.id_tipoPago=tP.id \\\n and a.fecha between '%s' and '%s' \\\n and usuario_colaborador='%s'\" %(fechaInicio,fechaFin,usuarioColaborador))", "def fama (self , diccionario):\n\n decoracion_list = []\n for key , value in diccionario.items():\n a=[]\n a.append(key)\n a.append(value)\n decoracion_list.append (a)\n\n paredes_list = decoracion_list [0:3]\n suelo_list = decoracion_list [3:6]\n reforma_list = decoracion_list [6:]\n\n paredes = 1\n suelo = 1\n reforma = 1\n\n for i in range (len(paredes_list)):\n if paredes_list [i][1] == 1 :\n paredes = i+2 \n\n for i in range (len(suelo_list)):\n if suelo_list [i][1] == 1 :\n suelo = i+2\n\n for i in range (len(reforma_list)):\n if reforma_list [i][1] == 1 :\n reforma = i+2\n\n modificador_fama = 0\n\n if paredes >= 4 and suelo >= 4 and reforma >= 4 :\n modificador_fama = 45\n\n elif paredes >= 3 and suelo >= 3 and reforma >= 3 :\n modificador_fama = 33 \n\n elif paredes >= 2 and suelo >= 2 and reforma >= 2 :\n modificador_fama = 12\n\n fama = (10*paredes)+(10*suelo)+(10*reforma) + modificador_fama + kasino.modificador_fama\n\n \"\"\" FORMULA FAMA : Con esta formula se calcula la fama, que dependera de la decoracion e influira en los visitantes \n Se puede usar modificador_fama para calibrar el juego o añadir niveles de dificulad \"\"\"\n \n return fama , paredes , suelo , reforma", "def getStatVentesMois(self, in_data):\n\n try:\n date_debut = in_data['date_debut']\n dt_debut = dateutil.parser.parse(date_debut)\n date_fin = in_data['date_fin']\n dt_fin = dateutil.parser.parse(date_fin)\n except:\n out_data = {\n 'success': False\n }\n return out_data\n\n local_dt_debut = dt_debut.astimezone (pytz.timezone('Europe/Paris'))\n debut = datetime(local_dt_debut.year, local_dt_debut.month, local_dt_debut.day)\n local_dt_fin = dt_fin.astimezone (pytz.timezone('Europe/Paris'))\n fin = datetime(local_dt_fin.year, local_dt_fin.month, local_dt_fin.day) + timedelta(days=1)\n\n commandes=[]\n ventes=[]\n day = 0\n stop = False\n ca = 0\n nb_commandes = 0\n nb_souscriptions = 0\n while not stop :\n time_debut = debut + timedelta(days=day)\n timestamp = calendar.timegm(time_debut.timetuple()) * 1000\n time_fin = time_debut + timedelta(days=1)\n c_list = Commande.objects.filter(etat='PAY',date__gte=time_debut,date__lt=time_fin).distinct()\n # ch_list = CommandeHistory.objects.filter(etat='PAY',date__gte=time_debut, date__lt=time_fin)\n total_euros = 0\n total_souscriptions = 0\n total_commandes = 0\n\n for commande in c_list:\n total_euros += commande.montant\n for souscription in commande.souscription_set.all():\n total_souscriptions += souscription.quantite\n total_commandes += 1\n\n ca+=total_euros\n nb_souscriptions+=total_souscriptions\n nb_commandes+=total_commandes\n commandes.append([timestamp,total_commandes])\n ventes.append([timestamp,total_euros])\n day += 1\n if (debut + timedelta(days=day))>=fin:\n stop=True\n\n serie_list = [\n {\n 'label': \"commandes\",\n 'data': commandes,\n 'yaxis': 1\n },\n {\n 'label': \"€\",\n 'data': ventes,\n 'yaxis': 2\n }\n ]\n\n options = {\n \"series\": {\n \"lines\": {\n \"show\": True,\n \"fill\": True\n },\n \"points\": { \"show\": True }\n },\n 'axisLabels': {\n 'show': True\n },\n \"xaxis\": {\n \"mode\": \"time\",\n \"timeformat\": \"%e %b\",\n \"monthNames\": [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"juin\", \"juil\", \"aout\", \"sept\", \"oct\", \"nov\", \"dec\"]\n },\n \"yaxes\": [\n {\n 'axisLabel': 'commandes',\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n },\n {\n 'axisLabel': \"CA\",\n \"position\": \"right\",\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n }\n ],\n \"grid\": {\n \"hoverable\": True,\n \"borderWidth\": 1\n },\n \"colors\": [\"rgb(138,75,117)\", \"rgb(71,160,62)\"],\n \"tooltip\":True,\n \"tooltipOpts\": {\n \"content\": \"%x : %y %s\"\n },\n \"legend\": {\n \"show\": True,\n \"labelFormatter\": None, # null or (fn: string, series object -> string)\n #\"labelBoxBorderColor\": color,\n #noColumns: number\n #'position': \"ne\" or \"nw\" or \"se\" or \"sw\"\n #margin: number of pixels or [x margin, y margin]\n #backgroundColor: null or color\n #backgroundOpacity: number between 0 and 1\n #container: null or jQuery object/DOM element/jQuery expression\n #sorted: null/false, true, \"ascending\", \"descending\", \"reverse\", or a comparator\n }\n };\n\n\n out_data = {\n 'success': True,\n 'souscriptions': serie_list,\n 'options': options,\n 'ca':ca,\n 'nb_commandes':nb_commandes,\n 'nb_souscriptions':nb_souscriptions\n }\n return out_data", "def getCambiosQafectanCaja(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n\tif usuarioColaborador == \"\" and fechaInicio == \"\" and fechaFin == \"\":\n\t return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\"\"\")\n elif usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\"\"\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\n and c.usuario_Colaborador = '%s'\"\"\" %(fechaInicio,fechaFin,usuarioColaborador))", "def __init__(self):\r\n super(Sistema, self).__init__()\r\n\r\n self.datos = []\r\n\r\n self.productos = [\"Harina de Soya - Hi Pro/Pellet de Soya\", \"Harina de Soya - Full Fat\", \"Torta de Soya\",\r\n \"Torta de Girasol\", \"Aceite de Soya\", \"Grano de Soya\",\r\n \"Azucar\", \"Fierro\", \"Contenedor 20\", \"Contenedor 40\"]\r\n\r\n self.colas_espera_transbordo = {\r\n \"Harina de Soya - Hi Pro/Pellet de Soya\":\r\n {\"Descarga\": [], \"Carga\": []},\r\n \"Harina de Soya - Full Fat\":\r\n {\"Descarga\": [], \"Carga\": []},\r\n \"Torta de Soya\":\r\n {\"Descarga\": [], \"Carga\": []},\r\n \"Torta de Girasol\":\r\n {\"Descarga\": [], \"Carga\": []},\r\n \"Grano de Soya\":\r\n {\"Descarga\": [], \"Carga\": []},\r\n \"Azucar\":\r\n {\"Descarga\": [], \"Carga\": []},\r\n \"Fierro\":\r\n {\"Descarga\": [], \"Carga\": []}\r\n }\r\n\r\n self.camiones_en_sistema = []\r\n self.capacidad_sistema = 20\r\n\r\n # Definición de Recursos de Atención\r\n horarios = {\r\n \"horario 1\": {\"L-V\": {\"Ingreso\": 7.5, \"I. Descanso\": 13.0, \"F. Descanso\": 14.0, \"Salida\": 16.5},\r\n \"SAB\": {\"Ingreso\": 8.5, \"Salida\": 12.5}},\r\n \"horario 2\": {\"L-V\": {\"Ingreso\": 8.0, \"I. Descanso\": 12.0, \"F. Descanso\": 13.0, \"Salida\": 17.0},\r\n \"SAB\": {\"Ingreso\": 8.5, \"Salida\": 12.5}}\r\n }\r\n\r\n self.recursos_atencion = {\r\n \"Ventanilla Recepcion\":\r\n Recurso(self, \"Ventanilla Recepcion\",\r\n horarios[\"horario 1\"],\r\n capacity=1),\r\n \"Ventanilla Despacho\":\r\n Recurso(self, \"Ventanilla Despacho\",\r\n horarios[\"horario 1\"],\r\n capacity=1),\r\n \"Balanza 2\":\r\n Recurso(self, \"Balanza 2\",\r\n horarios[\"horario 1\"],\r\n capacity=1),\r\n \"Estacion Volcadora\":\r\n Recurso(self, \"Estacion Volcadora\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Estacion Tolva/Balanza 3\":\r\n Recurso(self, \"Estacion Tolva/Balanza 3\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Pala Mecanica\":\r\n Recurso(self, \"Pala Mecanica\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Cuadrilla de Estibaje\":\r\n Recurso(self, \"Cuadrillas de Estibaje\",\r\n horarios[\"horario 2\"],\r\n capacity=3),\r\n \"Cabina de Recepcion - T1\":\r\n Recurso(self, \"Cabina de Recepcion - T1\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Cabina de Despacho - T1\":\r\n Recurso(self, \"Cabina de Despacho - T1\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Cabina de Recepcion - T2\":\r\n Recurso(self, \"Cabina de Recepcion - T2\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Cabina de Despacho - T2\":\r\n Recurso(self, \"Cabina de Despacho - T2\",\r\n horarios[\"horario 2\"],\r\n capacity=1),\r\n \"Grua\":\r\n Recurso(self, \"Grua\",\r\n horarios[\"horario 2\"],\r\n capacity=1)}\r\n\r\n # Definición de medios de almacenamiento\r\n niv_tolva = {\r\n \"Harina de Soya - Hi Pro/Pellet de Soya\": 0\r\n }\r\n niv_almacen_1 = {\r\n \"Harina de Soya - Hi Pro/Pellet de Soya\": 500\r\n }\r\n niv_almacen_2 = {\r\n \"Harina de Soya - Full Fat\": 100,\r\n \"Torta de Soya\": 100,\r\n \"Torta de Girasol\": 100,\r\n \"Azucar\": 100\r\n }\r\n niv_almacen_ext = {\r\n \"Grano de Soya\": 0\r\n }\r\n niv_tanque_1 = {\r\n \"Aceite de Soya\": 0\r\n }\r\n niv_tanque_2 = {\r\n \"Aceite de Soya\": 0\r\n }\r\n niv_patio_cont = {\r\n \"Contenedor 20\": 0,\r\n \"Contenedor 40\": 0\r\n }\r\n self.medios_almacenamiento = {\r\n \"Tolva\":\r\n MedioDeAlmacenamiento(self, \"Tolva\", 2, niv_tolva, 400),\r\n \"Almacen 1\":\r\n MedioDeAlmacenamiento(self, \"Almacen 1\", 3, niv_almacen_1, 2500),\r\n \"Almacen 2\":\r\n MedioDeAlmacenamiento(self, \"Almacen 2\", 1, niv_almacen_2, 1500),\r\n \"Almacen Ext\":\r\n MedioDeAlmacenamiento(self, \"Almacen Ext\", 3, niv_almacen_ext, 1500),\r\n \"Tanque 1\":\r\n MedioDeAlmacenamiento(self, \"Tanque 1\", 2, niv_tanque_1, 400),\r\n \"Tanque 2\":\r\n MedioDeAlmacenamiento(self, \"Tanque 2\", 2, niv_tanque_2, 500),\r\n \"Patio de Contenedores\":\r\n MedioDeAlmacenamiento(self, \"Patio de Contenedores\", 1, niv_patio_cont, 2500)}\r\n\r\n # Definicion de Operaciones # TODO Ingresar datos reales\r\n operaciones_manipuleo = {\r\n \"Descarga con volcadora\":\r\n OperacionManipuleo(\"Descarga con volcadora\",\r\n self.recursos_atencion[\"Estacion Volcadora\"],\r\n \"uniforme\", [14, 20],\r\n self.datos),\r\n \"Descarga a pulso - Sacos\":\r\n OperacionManipuleo(\"Descarga a pulso - Sacos\",\r\n self.recursos_atencion[\"Cuadrilla de Estibaje\"],\r\n \"uniforme\", [30, 45],\r\n self.datos),\r\n \"Descarga a pulso - Granos\":\r\n OperacionManipuleo(\"Descarga a pulso - Granos\",\r\n self.recursos_atencion[\"Cuadrilla de Estibaje\"],\r\n \"uniforme\", [40, 60],\r\n self.datos),\r\n \"Descarga con bombas electricas - T1\":\r\n OperacionManipuleo(\"Descarga con bombas electricas\",\r\n self.recursos_atencion[\"Cabina de Recepcion - T1\"],\r\n \"uniforme\", [40, 50],\r\n self.datos),\r\n \"Descarga con bombas electricas - T2\":\r\n OperacionManipuleo(\"Descarga con bombas electricas\",\r\n self.recursos_atencion[\"Cabina de Recepcion - T2\"],\r\n \"uniforme\", [40, 50],\r\n self.datos),\r\n \"Descarga con grua\":\r\n OperacionManipuleo(\"Descarga con grua\",\r\n self.recursos_atencion[\"Grua\"],\r\n \"uniforme\", [15, 20],\r\n self.datos),\r\n \"Carga con tolva\":\r\n OperacionManipuleo(\"Carga con tolva\",\r\n self.recursos_atencion[\"Estacion Tolva/Balanza 3\"],\r\n \"uniforme\", [14, 20],\r\n self.datos),\r\n \"Carga con pala mecanica\":\r\n OperacionManipuleo(\"Carga con pala mecanica\",\r\n self.recursos_atencion[\"Pala Mecanica\"],\r\n \"uniforme\", [18, 30],\r\n self.datos),\r\n \"Carga a pulso - Sacos\":\r\n OperacionManipuleo(\"Carga a pulso - Sacos\",\r\n self.recursos_atencion[\"Cuadrilla de Estibaje\"],\r\n \"uniforme\", [45, 70],\r\n self.datos),\r\n \"Carga a pulso - Granos\":\r\n OperacionManipuleo(\"Carga a pulso - Granos\",\r\n self.recursos_atencion[\"Cuadrilla de Estibaje\"],\r\n \"uniforme\", [60, 90],\r\n self.datos),\r\n \"Carga con bombas electricas - T1\":\r\n OperacionManipuleo(\"Carga con bombas electricas\",\r\n self.recursos_atencion[\"Cabina de Despacho - T1\"],\r\n \"uniforme\", [45, 60],\r\n self.datos),\r\n \"Carga con bombas electricas - T2\":\r\n OperacionManipuleo(\"Carga con bombas electricas\",\r\n self.recursos_atencion[\"Cabina de Despacho - T2\"],\r\n \"uniforme\", [45, 60],\r\n self.datos),\r\n \"Carga con grua\":\r\n OperacionManipuleo(\"Carga con grua\",\r\n self.recursos_atencion[\"Grua\"],\r\n \"uniforme\", [15, 22],\r\n self.datos),\r\n \"Transbordo en sistema mecanizado (D)\":\r\n OperacionManipuleo(\"Transbordo en sistema mecanizado (D)\",\r\n self.recursos_atencion[\"Estacion Volcadora\"],\r\n \"uniforme\", [14, 25],\r\n self.datos),\r\n \"Transbordo en sistema mecanizado (C)\":\r\n OperacionManipuleo(\"Transbordo en sistema mecanizado (C)\",\r\n self.recursos_atencion[\"Estacion Tolva/Balanza 3\"],\r\n \"uniforme\", [14, 25],\r\n self.datos),\r\n \"Transbordo a pulso - Sacos\":\r\n OperacionManipuleo(\"Transbordo a pulso - Sacos\",\r\n self.recursos_atencion[\"Cuadrilla de Estibaje\"],\r\n \"uniforme\", [40, 60],\r\n self.datos),\r\n \"Transbordo a pulso - Granos\":\r\n OperacionManipuleo(\"Transbordo a pulso - Sacos\",\r\n self.recursos_atencion[\"Cuadrilla de Estibaje\"],\r\n \"uniforme\", [45, 65],\r\n self.datos),\r\n \"Transbordo con grua\":\r\n OperacionManipuleo(\"Transbordo con grua\",\r\n self.recursos_atencion[\"Grua\"],\r\n \"uniforme\", [15, 20],\r\n self.datos)}\r\n\r\n operaciones_complementarias = {\r\n \"Atencion recepcion 1\":\r\n Operacion(\"Atencion recepcion 1\",\r\n self.recursos_atencion[\"Ventanilla Recepcion\"],\r\n \"uniforme\", [2, 10],\r\n self.datos),\r\n \"Atencion despacho 1\":\r\n Operacion(\"Atencion despacho 1\",\r\n self.recursos_atencion[\"Ventanilla Despacho\"],\r\n \"uniforme\", [2, 10],\r\n self.datos),\r\n \"Primer pesaje\":\r\n Operacion(\"Primer pesaje\",\r\n self.recursos_atencion[\"Balanza 2\"],\r\n \"uniforme\", [3, 6],\r\n self.datos),\r\n \"Segundo pesaje\":\r\n Operacion(\"Segundo pesaje\",\r\n self.recursos_atencion[\"Balanza 2\"],\r\n \"uniforme\", [3, 6],\r\n self.datos),\r\n \"Primer pesaje - B3\":\r\n Operacion(\"Primer pesaje - B3\",\r\n self.recursos_atencion[\"Estacion Tolva/Balanza 3\"],\r\n \"uniforme\", [3, 6],\r\n self.datos),\r\n \"Segundo pesaje - B3\":\r\n Operacion(\"Segundo pesaje -B3\",\r\n self.recursos_atencion[\"Estacion Tolva/Balanza 3\"],\r\n \"uniforme\", [3, 6],\r\n self.datos),\r\n \"Atencion recepcion 2\":\r\n Operacion(\"Atencion recepcion 2\",\r\n self.recursos_atencion[\"Ventanilla Recepcion\"],\r\n \"uniforme\", [4, 8],\r\n self.datos),\r\n \"Atencion despacho 2\":\r\n Operacion(\"Atencion despacho 2\",\r\n self.recursos_atencion[\"Ventanilla Despacho\"],\r\n \"uniforme\", [2, 5],\r\n self.datos)}\r\n\r\n # Diccionario general de operaciones\r\n self.operaciones = {\r\n \"Operaciones manipuleo\":\r\n operaciones_manipuleo,\r\n \"Operaciones complementarias\":\r\n operaciones_complementarias}", "def __init__(self, nombre, cantidad, precio):\n\n # Atributos privados por convensión\n self._an = 15 # Ancho de columna nombre\n self._ac = 8 # Ancho de columna cantidad\n self._ap = 10 # Ancho de columna precio\n self._ast = 10 # Ancho de columna subtotal\n\n # Se inicializan los atributos de la instancia\n self.nombre = nombre\n self.cantidad = cantidad\n self.precio = precio", "def __init__(self, nombre, cantidad, precio, marca, modelo):\n\n # Se ejecuta el constructor de la clase padre\n super().__init__(nombre, cantidad, precio)\n\n # Se modifica el valor de un atributo privado\n self._an = 25\n\n # Se inicializan los atributos de la clase hija\n self.marca = marca\n self.modelo = modelo", "def __init__(self, marqueur, allele, hauteur, concordance_mere_foetus, informatif, num_foetus, contamination, taux):\n\n super().__init__(marqueur, allele, hauteur, informatif)\n self.num_foetus = num_foetus\n self.contamination = contamination\n self.taux = taux\n self.concordance_mere_foetus = concordance_mere_foetus", "def resultadosAnuales(self):\r\n self.checkingConnection()\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''SELECT years, ingresos, compras, gastos, \r\n (ingresos - compras - gastos) AS Total FROM (\r\n\t\t\tSELECT years, \r\n ingresos, compras, gastos FROM ((SELECT Clients.year AS years, \r\n SUM(Clients.value) AS ingresos FROM Clients GROUP BY Clients.year) \r\n JOIN (SELECT Compras.year AS year2, SUM(Compras.value) AS compras \r\n FROM Compras GROUP BY Compras.year) JOIN (SELECT Gastos.year AS year3, \r\n SUM(Gastos.value) AS gastos FROM Gastos GROUP BY Gastos.year) \r\n ON years = year2 AND year2 = year3)\r\n\t\t\t) ''', self.db)\r\n # Getting the table values\r\n self.years = []\r\n self.ingresos = []\r\n self.compras = []\r\n self.gastos = []\r\n self.total = []\r\n # Save the Query values in each list\r\n for i in range(self.model.rowCount()):\r\n # record is the row and value the column\r\n self.years.append(self.model.record(i).value(\"years\"))\r\n self.ingresos.append(self.model.record(i).value(\"ingresos\"))\r\n self.compras.append(self.model.record(i).value(\"compras\"))\r\n self.gastos.append(self.model.record(i).value(\"gastos\"))\r\n self.total.append(self.model.record(i).value(\"Total\"))\r\n self.setModel(self.model)\r\n # Creating the Bar Graph\r\n self.grafica(self.years)", "def get_bilan_conso(self):\n qs = self.get_cerema_cities().aggregate(bilan=Coalesce(Sum(\"naf11art21\"), float(0)))\n return qs[\"bilan\"] / 10000", "def __init__(self, tamanho_bateria: int = 75):\n self.tamanho_bateria = tamanho_bateria", "def f_precios_masivos(p0_fini, p1_ffin, p2_gran, p3_inst, p4_oatk, p5_ginc):\n\n def f_datetime_range_fx(p0_start, p1_end, p2_inc, p3_delta):\n \"\"\"\n Parameters\n ----------\n p0_start\n p1_end\n p2_inc\n p3_delta\n Returns\n -------\n ls_resultado\n Debugging\n ---------\n \"\"\"\n\n ls_result = []\n nxt = p0_start\n\n while nxt <= p1_end:\n ls_result.append(nxt)\n if p3_delta == 'minutes':\n nxt += timedelta(minutes=p2_inc)\n elif p3_delta == 'hours':\n nxt += timedelta(hours=p2_inc)\n elif p3_delta == 'days':\n nxt += timedelta(days=p2_inc)\n\n return ls_result\n\n # inicializar api de OANDA\n\n api = API(access_token=p4_oatk)\n\n gn = {'S30': 30, 'S10': 10, 'S5': 5, 'M1': 60, 'M5': 60 * 5, 'M15': 60 * 15,\n 'M30': 60 * 30, 'H1': 60 * 60, 'H4': 60 * 60 * 4, 'H8': 60 * 60 * 8,\n 'D': 60 * 60 * 24, 'W': 60 * 60 * 24 * 7, 'M': 60 * 60 * 24 * 7 * 4}\n\n # -- para el caso donde con 1 peticion se cubran las 2 fechas\n if int((p1_ffin - p0_fini).total_seconds() / gn[p2_gran]) < 4999:\n\n # Fecha inicial y fecha final\n f1 = p0_fini.strftime('%Y-%m-%dT%H:%M:%S')\n f2 = p1_ffin.strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n # print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n r_df_final = pd.DataFrame(lista)\n r_df_final = r_df_final[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n r_df_final['TimeStamp'] = pd.to_datetime(r_df_final['TimeStamp'])\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final\n\n # -- para el caso donde se construyen fechas secuenciales\n else:\n\n # hacer series de fechas e iteraciones para pedir todos los precios\n fechas = f_datetime_range_fx(p0_start=p0_fini, p1_end=p1_ffin, p2_inc=p5_ginc,\n p3_delta='minutes')\n\n # Lista para ir guardando los data frames\n lista_df = list()\n\n for n_fecha in range(0, len(fechas) - 1):\n\n # Fecha inicial y fecha final\n f1 = fechas[n_fecha].strftime('%Y-%m-%dT%H:%M:%S')\n f2 = fechas[n_fecha + 1].strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n pd_hist = pd.DataFrame(lista)\n pd_hist = pd_hist[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n pd_hist['TimeStamp'] = pd.to_datetime(pd_hist['TimeStamp'])\n\n # Ir guardando resultados en una lista\n lista_df.append(pd_hist)\n\n # Concatenar todas las listas\n r_df_final = pd.concat([lista_df[i] for i in range(0, len(lista_df))])\n\n # resetear index en dataframe resultante porque guarda los indices del dataframe pasado\n r_df_final = r_df_final.reset_index(drop=True)\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final", "def mostrarBicicletasDisponiveis(self) -> int:\n estoque_atual = Loja().mostrarEstoque()\n print(f'Bicicletas disponíveis: {estoque_atual}')\n return estoque_atual", "def resultat(self, concordance_mf, concordance_pf, liste_F, liste_M, liste_P):\n resultat = {\"Marqueur\": [], \"Conclusion\": [], \"Concordance Mere/Foetus\": [], \"Détails M/F\": [],\n \"Concordance Pere/Foetus\": [], \"Détails P/F\": []}\n marqueurs_conta = 0\n marqueurs_non_conta = 0\n somme_conta = 0\n if liste_F[0].allele[1] == 0.0:\n self.set_sexe(\"F\")\n else:\n self.set_sexe(\"M\")\n if concordance_mf != 16 and concordance_pf != 16 and concordance_pf != None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Conclusion\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\" and liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M : \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\n \"P : \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n elif liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\"\")\n elif liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails M/F\"].append(\"\")\n else:\n resultat[\"Détails M/F\"].append(\"\")\n resultat[\"Détails P/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\",\n \"Concordance Pere/Foetus\", \"Détails P/F\"])\n return resultats, conclusion\n elif concordance_mf != len(liste_F) and concordance_pf == len(liste_F) or concordance_mf != len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Conclusion\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n else:\n resultat[\"Détails M/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf == len(liste_F) or concordance_mf == len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Concordance Mere/Foetus\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf != len(liste_F):\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Concordance Mere/Foetus\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)))\n else:\n resultat[\"Détails P/F\"].append(\"\")\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat,\n columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\", \"Concordance Pere/Foetus\",\n \"Détails P/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def agregar(self, medicamento, codigo):\n self.medicina[medicamento] = Farmacia(medicamento, codigo, \"No vencido\")", "def getStatVentesAnnee(self, in_data):\n\n try:\n date_debut = in_data['date_debut']\n dt_debut = dateutil.parser.parse(date_debut)\n date_fin = in_data['date_fin']\n dt_fin = dateutil.parser.parse(date_fin)\n except:\n out_data = {\n 'success': False\n }\n return out_data\n\n local_dt_debut = dt_debut.astimezone (pytz.timezone('Europe/Paris'))\n debut = datetime(local_dt_debut.year, local_dt_debut.month,1)\n local_dt_fin = dt_fin.astimezone (pytz.timezone('Europe/Paris'))\n fin = datetime(local_dt_fin.year, local_dt_fin.month,1) + relativedelta(months=+1)\n\n commandes=[]\n ventes=[]\n month = 0\n stop = False\n ca = 0\n nb_commandes = 0\n nb_souscriptions = 0\n while not stop :\n time_debut = debut + relativedelta(months=+month)\n timestamp = calendar.timegm(time_debut.timetuple()) * 1000\n time_fin = time_debut + relativedelta(months=+1)\n # ch_list = CommandeHistory.objects.filter(etat='PAY',date__gte=time_debut, date__lt=time_fin)\n c_list = Commande.objects.filter(etat='PAY',date__gte=time_debut,date__lt=time_fin).distinct()\n total_euros = 0\n total_souscriptions = 0\n total_commandes = 0\n for commande in c_list:\n total_euros += commande.montant\n for souscription in commande.souscription_set.all():\n total_souscriptions += souscription.quantite\n total_commandes += 1\n\n ca+=total_euros\n nb_souscriptions+=total_souscriptions\n nb_commandes+=total_commandes\n commandes.append([timestamp,total_commandes])\n ventes.append([timestamp,total_euros])\n month += 1\n if (debut + relativedelta(months=+month))>=fin:\n stop=True\n\n serie_list = [\n {\n 'label': \"commandes\",\n 'data': commandes,\n 'yaxis': 1\n },\n {\n 'label': \"€\",\n 'data': ventes,\n 'yaxis': 2\n }\n ]\n\n options = {\n \"series\": {\n \"lines\": {\n \"show\": True,\n \"fill\": True\n },\n \"points\": { \"show\": True }\n },\n 'axisLabels': {\n 'show': True\n },\n \"xaxis\": {\n \"mode\": \"time\",\n \"timeformat\": \"%b %y\",\n \"monthNames\": [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"juin\", \"juil\", \"aout\", \"sept\", \"oct\", \"nov\", \"dec\"]\n },\n \"yaxes\": [\n {\n 'axisLabel': 'commandes',\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n },\n {\n 'axisLabel': \"CA\",\n \"position\": \"right\",\n \"tickDecimals\": 0,\n \"min\":0\n }\n ],\n \"grid\": {\n \"hoverable\": True,\n \"borderWidth\": 1\n },\n \"colors\": [\"rgb(138,75,117)\", \"rgb(71,160,62)\"],\n \"tooltip\":True,\n \"tooltipOpts\": {\n \"content\": \"%x : %y %s\"\n },\n \"legend\": {\n \"show\": True,\n \"labelFormatter\": None, # null or (fn: string, series object -> string)\n #\"labelBoxBorderColor\": color,\n #noColumns: number\n #'position': \"ne\" or \"nw\" or \"se\" or \"sw\"\n #margin: number of pixels or [x margin, y margin]\n #backgroundColor: null or color\n #backgroundOpacity: number between 0 and 1\n #container: null or jQuery object/DOM element/jQuery expression\n #sorted: null/false, true, \"ascending\", \"descending\", \"reverse\", or a comparator\n }\n };\n\n\n out_data = {\n 'success': True,\n 'souscriptions': serie_list,\n 'options': options,\n 'ca':ca,\n 'nb_commandes':nb_commandes,\n 'nb_souscriptions':nb_souscriptions\n }\n return out_data", "def generarConsultasConexion(self):\n for parRecursos in self.CombiConsultaLibre:\n parRecursosL0=self.limpiaRecursos(parRecursos[0])\n parRecursosL1=self.limpiaRecursos(parRecursos[1])\n \n if self.nivel_profundidad>=1:\n consultasparql = self.busConex1 % (parRecursosL0,parRecursosL1,self.limit_BC)\n print consultasparql;\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n triple = parRecursos[0]+\"-|\"+parRecursos[1]+\"-|\"+resul['p1']['value']\n self.ResultConsultasConexion.append(triple) \n \n if self.nivel_profundidad>=2:\n consultasparql = self.busConex2 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o1+\"*-|\"+resul['p2']['value']\n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n \n if self.nivel_profundidad>=3:\n consultasparql = self.busConex3_1 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n o2=resul['o2']['value']\n o2=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o2+\"*-|\"+resul['p2']['value']\n triple3 = o1+\"*-|\"+o2+\"*-|\"+resul['p3']['value'] \n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n self.ResultConsultasConexion.append(triple3) \n\n consultasparql = self.busConex3_2 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n o2=resul['o2']['value']\n o2=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o2+\"*-|\"+resul['p2']['value']\n triple3 = o2+\"*-|\"+o1+\"*-|\"+resul['p3']['value'] \n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n self.ResultConsultasConexion.append(triple3)", "def inicializa_tabuleiro(self, cavernas, brisas, fedores, wumpus=1, ouros=1):\n casas_livres = self.casas_livres()\n casas_sem_caverna = self.casas_sem_caverna()\n casas_sem_brisa = self.casas_sem_brisa()\n casas_sem_fedor = self.casas_sem_fedor()\n\n for _ in range(ouros):\n x, y = casas_livres.pop(random.randint(0, len(casas_livres) - 1))\n self.posiciona_ouro(x, y)\n\n for _ in range(wumpus):\n x, y = casas_livres.pop(random.randint(0,len(casas_livres) - 1))\n self.posiciona_wumpus(x, y)\n\n for _ in range(cavernas):\n x, y = casas_sem_caverna.pop(random.randint(0, len(casas_sem_caverna) - 1))\n self.posiciona_caverna(x, y)\n\n for _ in range(brisas):\n x, y = casas_sem_brisa.pop(random.randint(0,len(casas_sem_brisa) - 1))\n self.posiciona_brisa(x, y)\n\n casas_com_wumpus = self.casas_com_wumpus()\n casas_proximas = []\n for x, y in casas_com_wumpus:\n # Pega todas as casas ao redor da casa que tem o wumpus.\n if x > 0:\n casas_proximas.append((x - 1, y))\n if x < self.tamanho - 1:\n casas_proximas.append((x + 1, y))\n if y > 0:\n casas_proximas.append((x, y - 1))\n if y < self.tamanho - 1:\n casas_proximas.append((x, y + 1))\n\n for _ in range(fedores):\n if casas_proximas:\n x, y = casas_proximas.pop(random.randint(0, len(casas_proximas) - 1))\n self.posiciona_fedor(x, y)" ]
[ "0.59883666", "0.57581353", "0.56326735", "0.56068796", "0.53704154", "0.533318", "0.53174007", "0.52919334", "0.5273505", "0.52577746", "0.5244493", "0.52358776", "0.5231335", "0.5222198", "0.5213215", "0.51973087", "0.5174603", "0.5153929", "0.5133798", "0.5123422", "0.5096795", "0.5085688", "0.50816935", "0.50675267", "0.5056353", "0.50552493", "0.5042773", "0.50387156", "0.5007452", "0.49952647" ]
0.6868715
0
Este metodo nos creara el mapa Kasino por defecto se creara de 40x40 y aunque en este prog no es necesario, dejo la opcion de introducir ancho y largo distintos para poder crear diferentes mapas segun avances o segun dificultad... TENER EN CUENTA QUE LOS ESPACIOS DE LA LISTA DE EL MAPA SON 3 CARACTERES ESPACIO
def crear_mapa (self, ancho = 40 , largo = 40): for i in range (largo): a = " " b = [] for z in range (ancho): b.append(a) kasino.mapa.append(b) for i in range (1,ancho -1): kasino.mapa[0][i]="═══" kasino.mapa[largo-1][i]="═══" for i in range (1,largo -1): kasino.mapa[i][0]= " ║" kasino.mapa[i][ancho-1]= "║" kasino.mapa [0][0]=" ╔" kasino.mapa [0][ancho-1]="╗" kasino.mapa [largo-1][0]=" ╚" kasino.mapa [largo-1][ancho-1]="╝"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pintarIMAGENENMAPA(self, pos):\n # Agrego al vector que controla las images\n k = (pos[0], pos[1], \"img\"+str(self.idIMG))\n # Si deseo pintar una silla\n if self.queIMGAgregar == 1:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSilla, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar una mesa\n if self.queIMGAgregar == 2:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgMesa, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Se deseo pintar una nevera\n if self.queIMGAgregar == 3:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgNevera, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar una cama\n if self.queIMGAgregar == 4:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgCama, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar un sofa\n if self.queIMGAgregar == 5:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSofa, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar tv\n if self.queIMGAgregar == 6:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgTV, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar lampara\n if self.queIMGAgregar == 7:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgLampara, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar planta\n if self.queIMGAgregar == 8:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgPlanta, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar sanitario\n if self.queIMGAgregar == 9:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSanitario, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar lavamanos\n if self.queIMGAgregar == 10:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgLavamanos, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar la ducha\n if self.queIMGAgregar == 11:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgDucha, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1", "def JUEGOPAREJAS():\r\n cuadrados=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\r\n ##############Primeramente, definimos los cuadrados de nuestra pizarra##############\r\n Cuadrado1=Rect(420,140,100,100)\r\n Cuadrado2=Rect(520,140,100,100)\r\n Cuadrado3=Rect(620,140,100,100)\r\n Cuadrado4=Rect(720,140,100,100)\r\n Cuadrado5=Rect(420,240,100,100)\r\n Cuadrado6=Rect(520,240,100,100)\r\n Cuadrado7=Rect(620,240,100,100)\r\n Cuadrado8=Rect(720,240,100,100)\r\n Cuadrado9=Rect(420,340,100,100)\r\n Cuadrado10=Rect(520,340,100,100)\r\n Cuadrado11=Rect(620,340,100,100)\r\n Cuadrado12=Rect(720,340,100,100)\r\n Cuadrado13=Rect(420,440,100,100)\r\n Cuadrado14=Rect(520,440,100,100)\r\n Cuadrado15=Rect(620,440,100,100)\r\n Cuadrado16=Rect(720,440,100,100)\r\n\r\n Totalcuadrados=(Cuadrado1,Cuadrado2,Cuadrado3,Cuadrado4,Cuadrado5,Cuadrado6,Cuadrado7,Cuadrado8,Cuadrado9,Cuadrado10,Cuadrado11,Cuadrado12,Cuadrado13,Cuadrado14,Cuadrado15,Cuadrado16)\r\n listafiguras=[]\r\n def ventanaprincipal():\r\n \"\"\"\"\r\n Esta funcion, genera la ventana principal de nuestro juego de parejas. Contiene\r\n cada uno de los titulos e imagenes del juego.\r\n :return None\r\n \"\"\"\r\n titulo_principal=pygame.image.load(\"../recursor/Imagenes juegos/parejas_1.png\")\r\n ventana.blit(titulo_principal, (405,0))\r\n logo_plaython=pygame.image.load(\"../recursor/Imagenes juegos/plaython.png\")#Cargamos la imagen del logo\r\n imagen_decoracion=pygame.image.load(\"../recursor/Imagenes juegos/imagenpc.png\")\r\n imagen_decoracion = pygame.transform.scale(imagen_decoracion, (210, 210))\r\n ventana.blit(logo_plaython, (900, 180))\r\n ventana.blit(imagen_decoracion,(100,240))\r\n titulo_plaython = pygame.image.load(\"../recursor/Imagenes juegos/TITULOPLAYTHON.png\")\r\n titulo_plaython = pygame.transform.scale(titulo_plaython, (240, 150))\r\n ventana.blit(titulo_plaython, (505, 550))\r\n\r\n def dibujarcirculos(x,color):\r\n \"\"\"\r\n Esta funcion, dibuja cada uno de las figuras de forma circular del juego en base a su generacion aleatoria.\r\n :param int x: Representa la casilla elegida, y en base a este valor, se dibuja un cuadrado en determinado lugar\r\n :param tuple color: Representa un color determinado, el cual se encuentra escrito en base a sus valores RGB\r\n :return None\r\n \"\"\"\r\n if x==1:\r\n pygame.draw.circle(ventana,color,(470,190),30)\r\n elif x==2:\r\n pygame.draw.circle(ventana,color,(570,190),30)\r\n elif x==3:\r\n pygame.draw.circle(ventana,color,(670,190),30)\r\n elif x==4:\r\n pygame.draw.circle(ventana,color,(770,190),30)\r\n elif x==5:\r\n pygame.draw.circle(ventana,color,(470,290),30)\r\n elif x==6:\r\n pygame.draw.circle(ventana,color,(570,290),30)\r\n elif x==7:\r\n pygame.draw.circle(ventana,color,(670,290),30)\r\n elif x==8:\r\n pygame.draw.circle(ventana,color,(770,290),30)\r\n elif x==9:\r\n pygame.draw.circle(ventana,color,(470,390),30)\r\n elif x==10:\r\n pygame.draw.circle(ventana,color,(570,390),30)\r\n elif x==11:\r\n pygame.draw.circle(ventana,color,(670,390),30)\r\n elif x==12:\r\n pygame.draw.circle(ventana,color,(770,390),30)\r\n elif x==13:\r\n pygame.draw.circle(ventana,color,(470,490),30)\r\n elif x==14:\r\n pygame.draw.circle(ventana,color,(570,490),30)\r\n elif x==15:\r\n pygame.draw.circle(ventana,color,(670,490),30)\r\n elif x==16:\r\n pygame.draw.circle(ventana,color,(770,490),30)\r\n\r\n def dibujarcuadrados(x,color):\r\n \"\"\"\r\n Esta funcion, dibuja cada uno de las figuras de forma cuadrada del juego en base a su generacion aleatoria.\r\n :param int x: Representa la casilla elegida, y en base a este valor, se dibuja un cuadrado en determinado lugar\r\n :param tuple color: Representa un color determinado, el cual se encuentra escrito en base a sus valores RGB\r\n :return None\r\n \"\"\"\r\n if x==1:\r\n pygame.draw.rect(ventana,color,Rect(440,160,60,60))\r\n elif x==2:\r\n pygame.draw.rect(ventana,color,Rect(540,160,60,60))\r\n elif x==3:\r\n pygame.draw.rect(ventana,color,Rect(640,160,60,60))\r\n elif x==4:\r\n pygame.draw.rect(ventana,color,Rect(740,160,60,60))\r\n elif x==5:\r\n pygame.draw.rect(ventana,color,Rect(440,260,60,60))\r\n elif x==6:\r\n pygame.draw.rect(ventana,color,Rect(540,260,60,60))\r\n elif x==7:\r\n pygame.draw.rect(ventana,color,Rect(640,260,60,60))\r\n elif x==8:\r\n pygame.draw.rect(ventana,color,Rect(740,260,60,60))\r\n elif x==9:\r\n pygame.draw.rect(ventana,color,Rect(440,360,60,60))\r\n elif x==10:\r\n pygame.draw.rect(ventana,color,Rect(540,360,60,60))\r\n elif x==11:\r\n pygame.draw.rect(ventana,color,Rect(640,360,60,60))\r\n elif x==12:\r\n pygame.draw.rect(ventana,color,Rect(740,360,60,60))\r\n elif x==13:\r\n pygame.draw.rect(ventana,color,Rect(440,460,60,60))\r\n elif x==14:\r\n pygame.draw.rect(ventana,color,Rect(540,460,60,60))\r\n elif x==15:\r\n pygame.draw.rect(ventana,color,Rect(640,460,60,60))\r\n elif x==16:\r\n pygame.draw.rect(ventana,color,Rect(740,460,60,60))\r\n\r\n def dibujardiamantes(x,color):\r\n \"\"\"\r\n Esta funcion, dibuja cada uno de las figuras de forma de diamante del juego en base a su generacion aleatoria.\r\n :param int x: Representa la casilla elegida, y en base a este valor, se dibuja un cuadrado en determinado lugar\r\n :param tuple color: Representa un color determinado, el cual se encuentra escrito en base a sus valores RGB\r\n :return None\r\n \"\"\"\r\n if x==1:\r\n pygame.draw.polygon(ventana,color,( (430,190),(470,160),(510,190),(470,220)))\r\n elif x==2:\r\n pygame.draw.polygon(ventana,color,( (530,190),(570,160),(610,190),(570,220) ))\r\n elif x==3:\r\n pygame.draw.polygon(ventana,color,( (630,190),(670,160),(710,190),(670,220) ))\r\n elif x==4:\r\n pygame.draw.polygon(ventana,color,( (730,190),(770,160),(810,190),(770,220) ))\r\n elif x==5:\r\n pygame.draw.polygon(ventana,color,( (430,290),(470,260),(510,290),(470,320) ))\r\n elif x==6:\r\n pygame.draw.polygon(ventana,color,( (530,290),(570,260),(610,290),(570,320) ))\r\n elif x==7:\r\n pygame.draw.polygon(ventana,color,( (630,290),(670,260),(710,290),(670,320) ))\r\n elif x==8:\r\n pygame.draw.polygon(ventana,color,( (730,290),(770,260),(810,290),(770,320) ))\r\n elif x==9:\r\n pygame.draw.polygon(ventana,color,( (430,390),(470,360),(510,390),(470,420) ))\r\n elif x==10:\r\n pygame.draw.polygon(ventana,color,( (530,390),(570,360),(610,390),(570,420) ))\r\n elif x==11:\r\n pygame.draw.polygon(ventana,color,( (630,390),(670,360),(710,390),(670,420) ))\r\n elif x==12:\r\n pygame.draw.polygon(ventana,color,( (730,390),(770,360),(810,390),(770,420) ))\r\n elif x==13:\r\n pygame.draw.polygon(ventana,color,( (430,490),(470,460),(510,490),(470,520) ))\r\n elif x==14:\r\n pygame.draw.polygon(ventana,color,( (530,490),(570,460),(610,490),(570,520) ))\r\n elif x==15:\r\n pygame.draw.polygon(ventana,color,( (630,490),(670,460),(710,490),(670,520) ))\r\n elif x==16:\r\n pygame.draw.polygon(ventana,color,( (730,490),(770,460),(810,490),(770,520) ))\r\n def dibujartriangulos(x,color):\r\n \"\"\"\r\n Esta funcion, dibuja cada uno de las figuras de forma triangular del juego en base a su generacion aleatoria.\r\n :param int x: Representa la casilla elegida, y en base a este valor, se dibuja un cuadrado en determinado lugar\r\n :param tuple color: Representa un color determinado, el cual se encuentra escrito en base a sus valores RGB\r\n :return None\r\n \"\"\"\r\n if x==1:\r\n pygame.draw.polygon(ventana,color,( (430,160),(510,160),(470,220) ))\r\n elif x==2:\r\n pygame.draw.polygon(ventana,color,( (530,160),(610,160),(570,220) ))\r\n elif x==3:\r\n pygame.draw.polygon(ventana,color,( (630,160),(710,160),(670,220) ))\r\n elif x==4:\r\n pygame.draw.polygon(ventana,color,( (730,160),(810,160),(770,220) ))\r\n elif x==5:\r\n pygame.draw.polygon(ventana,color,( (430,260),(510,260),(470,320)))\r\n elif x==6:\r\n pygame.draw.polygon(ventana,color,( (530,260),(610,260),(570,320) ))\r\n elif x==7:\r\n pygame.draw.polygon(ventana,color,( (630,260),(710,260),(670,320) ))\r\n elif x==8:\r\n pygame.draw.polygon(ventana,color,( (730,260),(810,260),(770,320) ))\r\n elif x==9:\r\n pygame.draw.polygon(ventana,color,( (430,360),(510,360),(470,420) ))\r\n elif x==10:\r\n pygame.draw.polygon(ventana,color,( (530,360),(610,360),(570,420) ))\r\n elif x==11:\r\n pygame.draw.polygon(ventana,color,( (630,360),(710,360),(670,420) ))\r\n elif x==12:\r\n pygame.draw.polygon(ventana,color,( (730,360),(810,360),(770,420) ))\r\n elif x==13:\r\n pygame.draw.polygon(ventana,color,( (430,460),(510,460),(470,520) ))\r\n elif x==14:\r\n pygame.draw.polygon(ventana,color,( (530,460),(610,460),(570,520) ))\r\n elif x==15:\r\n pygame.draw.polygon(ventana,color,( (630,460),(710,460),(670,520) ))\r\n elif x==16:\r\n pygame.draw.polygon(ventana,color,( (730,460),(810,460),(770,520) ))\r\n\r\n def iniciarjuego():\r\n \"\"\"\"\r\n Esta funcion, crea cuatro figuras de cada una de las posibles, dos de ellas con diferente color de las otras dos.\r\n \"\"\"\r\n for i in range(4):\r\n if i == 1 or i == 0:\r\n color = (250,208,120,98)\r\n else:\r\n color = (159, 250, 120,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujarcuadrados(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (195, 139, 255,100)\r\n else:\r\n color = (250, 242, 120,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujarcirculos(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (250, 145, 137,98)\r\n else:\r\n color = (126, 139, 250,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujartriangulos(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (176, 255, 237,100)\r\n else:\r\n color = (255, 176, 228,100)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujardiamantes(x, color)\r\n cuadrados.remove(x)\r\n\r\n def mostrar(posicion_del_mouse):\r\n \"\"\"\r\n Esta funcion muestra segun la casilla que clickee el jugador.\r\n :param tuple posicion_del_mouse: Este parametro representa segun clickee el jugador (posicion en x y y).\r\n \"\"\"\r\n numerodecuadrado = numerocuadrado(posicion_del_mouse)\r\n ubicaciondelcuadrado=ubicacion_cuadrado(posicion_del_mouse)\r\n if ubicaciondelcuadrado==0 or ubicaciondelcuadrado==1:\r\n color = (250,208,120,98)\r\n dibujarcuadrados(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==2 or ubicaciondelcuadrado==3:\r\n color = (159, 250, 120,98)\r\n dibujarcuadrados(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==4 or ubicaciondelcuadrado==5:\r\n color = (195, 139, 255,100)\r\n dibujarcirculos(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==6 or ubicaciondelcuadrado==7:\r\n color = (250, 242, 120,98)\r\n dibujarcirculos(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==8 or ubicaciondelcuadrado==9:\r\n color = (250, 145, 137,98)\r\n dibujartriangulos(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==10 or ubicaciondelcuadrado==11:\r\n color = (126, 139, 250,98)\r\n dibujartriangulos(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==12 or ubicaciondelcuadrado==13:\r\n color = (176, 255, 237,100)\r\n dibujardiamantes(numerodecuadrado,color)\r\n elif ubicaciondelcuadrado==14 or ubicaciondelcuadrado==15:\r\n color = (255, 176, 228,100)\r\n dibujardiamantes(numerodecuadrado,color)\r\n\r\n def esconderfichas(posicion_del_mouse):\r\n \"\"\"\"\r\n Esta funcion, esconde las fichas, si la seleccion del usuario no es la correcta.\r\n :param tuple posicion_del_mouse: :param tuple primeraeleccion: Representa la posicion de donde clickeo el jugador (tanto en el ejex como en el eje y).\r\n \"\"\"\r\n numerodecuadrado=numerocuadrado(posicion_del_mouse)\r\n for j in range(16):\r\n if numerodecuadrado==j+1:\r\n pygame.draw.rect(ventana,(135,206,250),Totalcuadrados[j].inflate(-10,-10))\r\n\r\n def encontrarparejas(primeraeleccion,segundaeleccion):\r\n \"\"\"\r\n Esta funcion determina si se encontro una pareja.\r\n :param tuple primeraeleccion: Representa la posicion de donde clickeo el jugador (tanto en el ejex como en el eje y).\r\n :param tuple segundaeleccion: Representa la segunda posicion de donde clickeo el jugador (tanto en el ejex como en el eje y).\r\n Retorna un booleano\r\n \"\"\"\r\n x=ubicacion_cuadrado(primeraeleccion)\r\n y=ubicacion_cuadrado(segundaeleccion)\r\n if (x==0 and y==1 or x==1 and y==0)\\\r\n or (x == 2 and y == 3 or x == 3 and y == 2) \\\r\n or (x == 4 and y == 5 or x == 5 and y == 4)\\\r\n or (x == 6 and y == 7 or x == 7 and y == 6)\\\r\n or (x == 8 and y == 9 or x == 9 and y == 8) \\\r\n or (x==10 and y==11 or x==11 and y==10)\\\r\n or (x==12 and y==13 or x==13 and y==12)\\\r\n or (x==14 and y==15 or x==15 and y==14):\r\n return True\r\n\r\n def ubicacion_cuadrado(posicion_del_mouse):\r\n \"\"\"\r\n Esta funcion, retorna la figura que se encuentra en determinado indice segun se clickee.\r\n :param tuple posicion_del_mouse: Representa la posicion de donde clickeo el jugador (tanto en el ejex como en el eje y).\r\n Retorna la figura de determinado indice\r\n \"\"\"\r\n for j in range(16):\r\n if Totalcuadrados[j].collidepoint(posicion_del_mouse):\r\n return listafiguras.index(j+1)\r\n\r\n def dibujartablero():\r\n \"\"\"\"\r\n Esta funcion, dibuja los cuadrados que hacen nuestro tablero.\r\n :return None\r\n \"\"\"\r\n ventana.fill((135,206,250)) #colocamos el color de fondo de nuestra pantalla\r\n for j in range(16): #iteramos entre los cuadrados ya diseñados, eligiendo parametros como el color y el grueso de sus lineas.\r\n pygame.draw.rect(ventana,(255,255,255),Totalcuadrados[j],10)\r\n\r\n def numerocuadrado(posicion_del_mouse):\r\n \"\"\"\r\n Esta funcion, retorna el indice de determinado cuadrado segun se clickee.\r\n :param tuple posicion_del_mouse: Representa la posicion de donde clickeo el jugador (tanto en el ejex como en el eje y).\r\n Retorna el indice de terminada posicion\r\n \"\"\"\r\n\r\n for j in range(16):\r\n if Totalcuadrados[j].collidepoint(posicion_del_mouse):\r\n return j+1\r\n\r\n def main():\r\n \"\"\"\"\r\n Funcion proncipal del juego.\r\n :return None\r\n \"\"\"\r\n pygame.init()\r\n global ventana\r\n ventana= pygame.display.set_mode((1280,700))\r\n ventana.fill((135,206,250))\r\n pygame.display.set_caption('Encuentra las parejas')\r\n logo = pygame.image.load(\"../recursor/icono.ico\")\r\n pygame.display.set_icon(logo)\r\n pygame.display.update()\r\n dibujartablero()\r\n iniciarjuego()\r\n titulo_principal=pygame.image.load(\"../recursor/Imagenes juegos/parejas_2.png\")\r\n ventana.blit(titulo_principal, (405,0))\r\n pygame.display.update()\r\n pygame.time.wait(3000)\r\n dibujartablero()\r\n ventanaprincipal()\r\n pygame.display.update()\r\n prueba=0\r\n eleccioncorrecta=[]\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type==QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type==pygame.MOUSEBUTTONUP:\r\n posicion_del_mouse=pygame.mouse.get_pos()\r\n mostrar(posicion_del_mouse)\r\n pygame.display.update()\r\n if prueba==0:\r\n primeraeleccion=posicion_del_mouse\r\n if numerocuadrado(primeraeleccion) in eleccioncorrecta:\r\n prueba=0\r\n else:\r\n prueba=1\r\n else:\r\n segundaeleccion=posicion_del_mouse\r\n if numerocuadrado(segundaeleccion) in eleccioncorrecta:\r\n prueba=1\r\n else:\r\n prueba=0\r\n if not (numerocuadrado(primeraeleccion) in eleccioncorrecta) and not numerocuadrado(segundaeleccion) in eleccioncorrecta:\r\n if encontrarparejas(primeraeleccion,segundaeleccion):\r\n eleccioncorrecta.append(numerocuadrado(primeraeleccion))\r\n eleccioncorrecta.append(numerocuadrado(segundaeleccion))\r\n else:\r\n pygame.time.wait(1000)\r\n esconderfichas(primeraeleccion)\r\n esconderfichas(segundaeleccion)\r\n pygame.display.update()\r\n\r\n if len(eleccioncorrecta)==16:\r\n ganador=pygame.image.load('../recursor/Imagenes juegos/Ganadorgg0000.png')\r\n ventana.blit(ganador,(420, 140))\r\n pygame.display.update()\r\n time.sleep(4)\r\n pygame.quit()\r\n break\r\n\r\n main()", "def maps(offices, fixed):\n with Image(filename=BAT_B) as page, Drawing() as draw:\n for office, x, y in MAP_POSITIONS:\n label = door_label(offices[office], logo=False)\n if label:\n draw.composite(\"over\", x, y, label.width / 3, label.height / 3, label)\n draw(page)\n page.save(filename=\"generated_map%s.png\" % (\"_fixed\" if fixed else \"\"))", "def PINTARMATRIXDEBOTONES(self):\n for i in range(0, 26):\n for j in range(0, 26):\n x0 = ((i+1)*24) + 26\n y0 = ((j)*21) \n self.telaMAPA.create_rectangle(x0, 545 - y0, x0 + 10, 550 - y0, tag=str(i)+\"-\"+str(j))\n # Se guarda la inforamcion\n info = (x0, 545 - y0, str(i)+\"-\"+str(j))\n self.bononesPlanoXY.append(info)", "def cargar_mapa (self):\n\n stream_cargar = open ('yo_mapa.txt', 'rt',encoding=\"utf-8\")\n mapa=stream_cargar.readlines()\n \n a = mapa[0].split(\"X\")\n mapa__I=[]\n mapa__D=[]\n toca = \"izda\"\n for lista in a:\n pasar=\"X\"\n linea1=[]\n trozo=\"\"\n for i in lista:\n if pasar==\"X\":\n \n borrar = [\"[\",\"'\"]\n if i in borrar:\n pass\n elif i == \",\" or i == \"]\":\n linea1.append(trozo)\n trozo=\"\"\n pasar=\"V\"\n elif i == \"S\":\n toca=\"dxa\"\n else:\n trozo+=i\n\n else:\n pasar=\"X\"\n pass\n if toca == \"izda\":\n mapa__I.append(linea1)\n else:\n mapa__D.append(linea1)\n\n mapa_cargado=[]\n for i in range (len(mapa__I)):\n\n mapa_cargado.append(mapa__I[i]+mapa__D[i]) \n\n stream_cargar=(close)\n return mapa_cargado", "def imprime_mapa(lat,lon):\r\n\r\n lista=[\"colegio\", \"starbucks\",\"estadio de baloncesto\", \"bar\",\"restaurante vegano\",\"peluqueria perros\",\"aeropuerto\"]\r\n \r\n tipo=list()\r\n latitud=list()\r\n longitud=list()\r\n\r\n for q in lista:\r\n resultado=foursquare_visual({'latitud':lat, 'longitud':lon},q)\r\n \r\n for r in resultado:\r\n tipo.append(q.replace(\" \",\"_\"))\r\n latitud.append(r['latitud'])\r\n longitud.append(r['longitud'])\r\n #if q == \"colegio\" or q == \"peluqueria perros\":\r\n # print(pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud}))\r\n # raise\r\n \r\n \r\n df=pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud})\r\n\r\n \r\n\r\n mapa = Map(location=[lat,lon],zoom_start=15)\r\n\r\n empresa = {\r\n \"location\":[lat, lon ],\r\n \"tooltip\" : \"Empresa\"\r\n }\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-dot-circle-o\",\r\n icon_color = \"white\"\r\n )\r\n Marker(**empresa,icon = icon ).add_to(mapa)\r\n\r\n\r\n for i, row in df.iterrows():\r\n establecimiento = {\r\n \"location\":[row[\"latitud\"], row[\"logitud\"]],\r\n \"tooltip\" : row[\"tipo\"].replace(\"_\",\" \").capitalize()\r\n }\r\n\r\n if row[\"tipo\"] == \"starbucks\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"fa-coffee\",\r\n icon_color = \"white\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"restaurante_vegano\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"leaf\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"colegio\":\r\n icon = Icon(color = \"blue\",\r\n prefix = \"fa\",\r\n icon = \"fa-graduation-cap \",\r\n icon_color = \"black\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"peluqueria_perros\":\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-paw\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"estadio_de_baloncesto\":\r\n icon = Icon(color = \"orange\",\r\n prefix = \"fa\",\r\n icon = \"fa-futbol-o \",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"aeropuerto\":\r\n icon = Icon(color = \"white\",\r\n prefix = \"fa\",\r\n icon = \"fa-plane\",\r\n icon_color = \"black\"\r\n )\r\n elif row[\"tipo\"] == \"bar\":\r\n icon = Icon(color = \"pink\",\r\n prefix = \"fa\",\r\n icon = \"fa-glass\",\r\n icon_color = \"white\"\r\n )\r\n \r\n else:\r\n prefix = \"fa\",\r\n icon = \"briefcase\",\r\n icon_color = \"black\" \r\n Marker(**establecimiento,icon = icon ).add_to(mapa)\r\n return mapa", "def ubicar_portaviones():\n tamano = Portaviones.tamano #se importa el tamano del barco desde su clase\n cantidad = Portaviones.cantidad #se importa la cantidad de barcos de este tamano desde su clase\n orientacion = orientaciones[(randint(0, 1))] #elige aleatoriamente el index de la tupla orientaciones = (\"Vertical\", \"Horizontal\")\n while cantidad > 0:\n if orientacion == \"Vertical\":\n #se eligen random las filas y las columnas\n coor_fila = randint(1, numero_filas) \n coor_columna = randint(1, numero_columnas)\n while (coor_fila + tamano) > 10: #como su orientacion es vertical la fila incial del barco mas su tamano (3) no puede ser mayor que 10 porque se saldria del mapa\n coor_fila = randint(1,numero_filas)\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion) #lista donde se ubicaran temporalmente las ubicaciones de los barcos\n while len(lista_temporal) < tamano: #sacar las posiciones restantes \n coor_fila += 1\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n cantidad -= 1\n elif orientacion == \"Horizontal\":\n #se eligen random las filas y las columnas\n coor_fila = randint(1, numero_filas)\n coor_columna = randint(1, numero_columnas)\n while (coor_columna + tamano) > 10: #como su orientacion es horizontal la columna incial del barco mas su tamano (3) no puede ser mayor que 10 porque se saldria del mapa\n coor_columna = randint(1, numero_columnas)\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n while len(lista_temporal) < tamano: #sacar las posiciones restantes\n coor_columna += 1\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n for x in lista_temporal:\n lista_ubicacion_barco.append(x) #se agregan las ubicaciones ya validadas a una lista general donde iran todas las posiciones de los barcos\n coordenadas_portaviones.append(x)\n lista_temporal.clear() #se limpia la lista para que pueda ser usada en los siguientes barcos\n cantidad -= 1", "def PINTARPANTALLA(self):\n # Se configura la pantalla principal\n self.pantalla.title(\"Modelado de interiores\")\n self.pantalla.geometry(\"900x600\")\n\n # Se configura la pantalla del mapa interior\n self.telaMAPA.place(x=0, y=0)\n\n # Se configura la pantalla de control\n self.telaPANELDECONTROL.place(x=700, y=0)\n self.btnADDpunto.place(x=20, y=20)\n self.btnEliminarPunto.place(x=100, y=20)\n self.btnModificarPunto.place(x=20, y=60)\n self.btnLoadJSON.place(x=100, y=60)\n self.btnVerArbol.place(x=50, y=560)\n self.btnRepresetar.place(x=20, y=100)\n self.btnRepresetarPasoAPaso.place(x=20, y=140)\n self.btnConfiguracion.place(x=120, y=140)\n self.btnPlanoAlternativo.place(x=140, y=100)\n self.btnADDIMG.place(x=20, y=200)\n self.btnREMOVEIMG.place(x=120, y=200)\n # Se pintan los botones de los objetos de interior\n self.btnSilla.place(x=20, y=240)\n self.btnMesa.place(x=80, y=240)\n self.btnNevera.place(x=140, y=240)\n self.btnCama.place(x=20, y=290)\n self.btnSofa.place(x=80, y=290)\n self.btnTV.place(x=140, y=290)\n self.btnLampara.place(x=20, y=340)\n self.btnPlanta.place(x=80, y=340)\n self.btnSanitario.place(x=140, y=340)\n self.btnLavamanos.place(x=20, y=390)\n self.btnDucha.place(x=80, y=390)\n\n # Se pintan las lineas\n self.PINTARLEYENDAPLANOXY()\n # Se lanza el evento que actualiza la pantalla\n self.pantalla.after(0, self.update_graphic)\n self.pantalla.mainloop()", "def build_map(n=30,m=30, preset=True, filename='/home/sji367/small_grid.mat', key='new_grid'):\n if preset:\n the_map = []\n row = [0] * n\n for i in range(m):\n the_map.append(list(row))\n \n # fillout the map matrix with a '+' pattern\n for x in range(n / 8, n * 7 / 8):\n the_map[m / 2][x] = 1\n for y in range(m/8, m * 7 / 8):\n the_map[y][n / 2] = 1\n \n # randomly select start and finish locations from a list\n sf = []\n sf.append((0, 0, n - 1, m - 1))\n sf.append((0, m - 1, n - 1, 0))\n sf.append((n / 2 - 1, m / 2 - 1, n / 2 + 1, m / 2 + 1))\n sf.append((n / 2 - 1, m / 2 + 1, n / 2 + 1, m / 2 - 1))\n sf.append((n / 2 - 1, 0, n / 2 + 1, m - 1))\n sf.append((n / 2 + 1, m - 1, n / 2 - 1, 0))\n sf.append((0, m / 2 - 1, n - 1, m / 2 + 1))\n sf.append((n - 1, m / 2 + 1, 0, m / 2 - 1))\n (xStart, yStart, xFinish, yFinish) = random.choice(sf)\n else:\n grid = loadmat(filename)\n the_map = grid[key]\n xStart = 19\n yStart = 31\n xFinish = 67\n yFinish = 98\n \n return the_map, xStart, yStart, xFinish, yFinish", "def MakePmapProgram(MaterialInfoList,OutputPath,GasType,GasAtomType,SpecialPairList,GasAtomDictionary,\r\n MaterialAtomDictionary,GridSpacingP,HEPCP,CutOff,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting):\r\n\r\n def MakeAtomAtomFile(PmapOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary,CutOff):\r\n\r\n with open('%s/atom_atom_file' % (PmapOutputPath), 'w') as AtomAtomFile:\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n AtomAtomFile.write('\\n')\r\n\r\n for i in range(len(MaterialInfo[5])):\r\n for j in range(len(MaterialInfo[5])):\r\n if i <= j:\r\n AtomAtomFile.write('%-10s%-10sOFF\\n' % (MaterialInfo[5][i], MaterialInfo[5][j]))\r\n\r\n for k in range(len(GasAtomType)):\r\n for l in range(len(GasAtomType)):\r\n if k <= l:\r\n Key=False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[k] in SpecialPair[0] and GasAtomType[l] in SpecialPair[0] and GasAtomType[k]!=GasAtomType[l]:\r\n Key=True\r\n if Key==False:\r\n num1 = GasAtomDictionary.get(GasAtomType[k])\r\n num2 = GasAtomDictionary.get(GasAtomType[l])\r\n sig1 = str('%.3f' % ((float(num1[0]) + float(num2[0])) / 2))\r\n eps1 = str('%.3f' % ((float(num1[1]) * float(num2[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(GasAtomType[k],GasAtomType[l],'LJ',sig1,eps1,CutOff,GasAtomType[k],GasAtomType[l],'WFCOUL',CutOff))\r\n\r\n for h in range(len(GasAtomType)):\r\n for g in range(len(MaterialInfo[5])):\r\n Key = False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[h] in SpecialPair[0] and MaterialInfo[5][g] in SpecialPair[0]:\r\n Key = True\r\n if Key==False:\r\n num3 = GasAtomDictionary.get(GasAtomType[h])\r\n num4 = MaterialAtomDictionary.get(MaterialInfo[5][g])\r\n sig2 = str('%.3f' % ((float(num3[0]) + float(num4[0])) / 2))\r\n eps2 = str('%.3f' % ((float(num3[1]) * float(num4[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(GasAtomType[h],MaterialInfo[5][g],'LJ',sig2,eps2,CutOff,GasAtomType[h],MaterialInfo[5][g],'WFCOUL',CutOff))\r\n\r\n for m in SpecialPairList:\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(m[0][0],m[0][1],'LJ',m[1][0],m[1][1],CutOff,m[0][0],m[0][1],'WFCOUL',CutOff))\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n\r\n def MakeIntramolecularFile(PmapOutputPath,MaterialInfo,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/intramolecular_file' % (PmapOutputPath), 'w') as IntraFile:\r\n IntraFile.write('Intra: %s'%(MaterialInfo[7]))\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n IntraFile.write('\\nIntra: %s'%(i))\r\n\r\n def MakeMoleMolePmapFile(PmapOutputPath,MaterialInfo,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/mole_mole_pmap_file' % (PmapOutputPath), 'w') as MoleMolePmap:\r\n MoleMolePmap.write('''%s %s NCOUL OFF\r\n%s %s COUL OFF\\n\\n'''%(MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],MaterialInfo[7]))\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n MoleMolePmap.write('''%s %s NCOUL OFF\r\n%s %s COUL OFF\r\n\r\n%s %s NCOUL BASIC LJ FAST\r\n%s %s COUL OFF\\n\\n''' % (i, i, i, i, i,MaterialInfo[7], i, MaterialInfo[7]))\r\n\r\n def MakePmapMaker(PmapOutputPath,MaterialInfo,GasAtomType,GridSpacingP,HEPCP,GasAtomDictionary):\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n with open('%s/pmap_maker_%s_in_%s.ctr'%(PmapOutputPath,i,MaterialInfo[7]), 'w') as PmapMaker:\r\n PmapMaker.write('''------ General Information ------------------------------------------\r\n%s molecule in %s\r\n1 # No. of iterations\r\n1 # No. of steps between writes to output/log file\r\n2 # No. of steps between writes to crash file\r\n2 # No. of steps between writes to config. file\r\n1 # Start numbering simulations from .\r\n30728 # Iseed\r\n1 # specifies contents of config file\r\n%s_in_%s.res # Restart File to write to\r\n%s_in_%s.con # Configuration File\r\n\r\n------ Atomic Types --------------------------------------------------\r\n%s # number of atomic types\r\n\r\n%s\r\n%s.atm'''%(i,MaterialInfo[7],i,MaterialInfo[7],i,MaterialInfo[7],len(MaterialInfo[5])+1,i,i))\r\n\r\n for j in MaterialInfo[5]:\r\n PmapMaker.write('\\n\\n%s\\n%s.atm' % (j,j))\r\n\r\n PmapMaker.write('''\\n------ Molecule Types -------------------------------------------------\r\n2\r\n\r\n%s\r\n%s.mol\r\n\r\n%s\r\n%s.mol\r\n------ Simulation Cell Information ------------------------------------\r\n%s # Fundamental cell file\r\n%s # No. of unit cells in x, y, z direction\r\n1, 1, 1 # (1 = Periodic) in x, y, z\r\n------ Forcefield Information -------------------------------------------\r\nBASIC\r\nMOL\r\natom_atom_file # atom-atom interaction file\r\nmole_mole_pmap_file # sorbate-sorbate interaction file\r\nintramolecular_file # intramolecular interaction file/specification\r\n------ Mapmaker Information -----------------------------------------------\r\n1 # Number of maps to make\r\n\r\n%s # Sorbent to map\r\n%s # Sorbate to probe map with\r\nNCOUL LJ # Interaction type to map\r\n%s # Approxiamte grid spacing (Ang)\r\n%s # High end potential cutoff (kJ/mol)\r\n%s_in_%s.pmap # Map filename or AUTO\r\n------ Configuration Initialization -------------------------------------\r\n%s # Sorbate_Type\r\nMOLECULE NULL\r\n%s # Sorbate_Type\r\nFIXED NULL''' % (i, i,MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],', '.join(MaterialInfo[4]),MaterialInfo[7],i,GridSpacingP,HEPCP,i,MaterialInfo[7],i,MaterialInfo[7]))\r\n\r\n def MakeTorqueFile(PmapOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,GasAtomType,GasAtomDictionary,MaterialInfo,OutputPath):\r\n\r\n Node = random.choice(Nodes)\r\n\r\n with open('%s/run_pmapmaker.pbs' % (PmapOutputPath), 'w') as Torque:\r\n Torque.write('''#!/bin/bash\r\n#PBS -l nodes=%s\r\n#PBS -N MuSiC_pmap.%s\r\n#PBS -o music_pmap_jobs.out\r\n#PBS -j oe\r\n\r\n#\r\n# The number of processors you desire is indicated by replacing\r\n# <nproc> above.\r\n#\r\n\r\n#\r\n# GROMACS path and arguments to mdrun :\r\n#\r\ncd $PBS_O_WORKDIR\r\n\r\n# =============== Environment Setting ============================ #\\n''' % (Node, TaskSuffix))\r\n\r\n for i in TorqueSetting:\r\n Torque.write('%s' % (i))\r\n\r\n Torque.write('''# =============== Don't Change Above Setting ===================== #\r\n\r\necho \"============The computed nodes============\"\r\ncp -f $PBS_NODEFILE NODE.txt\r\necho \"User: \" $USER\r\ncat $PBS_NODEFILE\r\necho \"Job ID: \" $PBS_JOBID\r\necho \"Job Cookie: \" $PBS_JOBCOOKIE\r\necho \"Using executable: \" `which mpirun`\r\necho `date`\r\necho \"============Finished setting==============\"\r\n\r\n# =========== Setting Jobs ============================ #\\n''')\r\n\r\n for j in MuSiCSetting:\r\n Torque.write('%s' % (j))\r\n\r\n Torque.write('''export ATOMSDIR=%s\r\n export MOLSDIR=%s\r\n export PMAPDIR=%s\r\n export EMAPDIR=%s\r\n export SMAPDIR=%s''' % (os.path.join(OutputPath, 'Atoms'), os.path.join(OutputPath, 'Mols'),\r\n os.path.join(OutputPath, 'Maps'), os.path.join(OutputPath, 'Maps'),\r\n os.path.join(OutputPath, 'Maps')))\r\n\r\n Torque.write('''# =========== Setting Jobs ============================ #\r\n\r\n# +++++++++++++++ Start Computing +++++++++++++++++++++ #\r\n\r\nTIME_DIR=$(date '+%Y-%m-%d_%H-%M-%S')\r\nTIME_DIR=\"${USER}_jobs_${TIME_DIR}_${PBS_JOBID}\"\r\nif [ -d /utmp ]; then\r\n TEMP_DIR=/utmp/${USER}/${TIME_DIR}\r\nelse\r\n TEMP_DIR=/temp/${USER}/${TIME_DIR}\r\nfi\r\nmkdir -p ${TEMP_DIR}\r\ncp -rf * ${TEMP_DIR}\r\ncd ${TEMP_DIR}\r\nrm -f music_pmap_jobs.out\r\necho \"The temp direcotry: \" ${TEMP_DIR}\r\necho \"============Finished setting==============\"\r\n\r\necho \"+++++++++++++ Run MuSic ++++++++++++++++++++++++++++\"\\n''')\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0] != '0':\r\n Torque.write('music_mapmaker pmap_maker_%s_in_%s.ctr > pmap_maker_%s_in_%s.txt\\necho `date`\\n'%(i,MaterialInfo[7],i,MaterialInfo[7]))\r\n\r\n Torque.write('''echo \"+++++++++++++ Finish MuSic +++++++++++++++++++++++++\"\r\n\r\ncd $PBS_O_WORKDIR\r\ncp -rf ${TEMP_DIR}/* .\r\nrm -rf ${TEMP_DIR}\r\n\r\n\r\necho \"All files were copied back!\"\r\necho \"The work direcotry: \" $PBS_O_WORKDIR\r\necho `date`\r\necho \"============Finished Job ==============\"''')\r\n\r\n def main():\r\n\r\n for MaterialInfo in MaterialInfoList:\r\n if MaterialInfo[6]==True:\r\n PmapOutputPath='%s/%s/%s/%s'%(OutputPath,'MakePmap','_'.join(GasType),MaterialInfo[7])\r\n if os.path.exists(PmapOutputPath):\r\n pass\r\n else:\r\n os.makedirs(PmapOutputPath)\r\n\r\n MakeAtomAtomFile(PmapOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary,CutOff)\r\n MakeMoleMolePmapFile(PmapOutputPath, MaterialInfo, GasAtomType,GasAtomDictionary)\r\n MakePmapMaker(PmapOutputPath,MaterialInfo,GasAtomType,GridSpacingP,HEPCP,GasAtomDictionary)\r\n MakeIntramolecularFile(PmapOutputPath, MaterialInfo, GasAtomType,GasAtomDictionary)\r\n MakeTorqueFile(PmapOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,GasAtomType,GasAtomDictionary,MaterialInfo,OutputPath)\r\n\r\n if __name__ == '__main__':\r\n main()", "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)", "def create_map(width, height, pixels):\n\n\n\n\n def index_to_xy(i, width, height):\n \"\"\" Takes 0 based index going line wise from top\n left to bottom right, returns x, y coordinates so\n that 0,0 is on bottom left corner\n \"\"\"\n x = i % width\n y = i // width\n y*= -1\n y+= height - 1\n return (x,y)\n\n def place_terrain(type, i):\n \"\"\"This won't return anything, just do side effects\n\n The object \"gameLogic\" is used to place the object\n initially. It doesn't matter where this object is,\n as long as it exists. There must be an easier way,\n but this works.\n \"\"\"\n x,y = index_to_xy(i, width, height)\n\n object_name = terrain_types.get(type, \"water\")\n\n if ob[\"fast_create\"] > 0 and not (x%ob[\"fast_create\"] == 0 and y%ob[\"fast_create\"] == 0):\n return\n\n if object_name != \"water\":\n object = scene.addObject(object_name, \"gameLogic\")\n object.worldPosition = (x,y,0)\n\n\n list(map( (lambda tup : place_terrain(tup[1], tup[0])), list(enumerate(pixels)) ))", "def CrearLlaveMusicaGenero(catalog):\n Lista = ['Reggae' , 'Down-tempo' , 'Chill-out' , 'Hip-hop' , 'Jazz and Funk' , 'Pop' , \n 'R&B' , 'Rock' , 'Metal']\n \n for genero in Lista:\n mp.put(catalog['musicaGenero'], genero, om.newMap('RBT'))", "def create_data_ia(map_size, enemy_id, ia_id):\n data_ia = {'player1': {},\n 'player2': {},\n 'main_turn': 1,\n 'attack_turn': 0,\n 'map_size': map_size,\n 'enemy_id': enemy_id,\n 'ia_id': ia_id}\n\n\n order_unit = {}\n order_unit['if_left'] = [(2,3), (3,2), (1,3), (2,2), (3,1), (1,2), (2,1), (1,1)]\n order_unit['if_right'] = [(map_size -1, map_size -2), (map_size -2, map_size -1), (map_size, map_size -2), (map_size -1, map_size -1), (map_size -1, map_size -1), (map_size -2, map_size), (map_size, map_size-1), (map_size -1, map_size), (map_size, map_size)]\n\n for i in range(2):\n for line in range(1, 4):\n for column in range(1, 4):\n unit = 'E'\n life = 4\n\n if line >= 2 and column >= 2:\n unit = 'D'\n life = 10\n\n if line + column != 6:\n x_pos = abs(i * map_size - line + i)\n y_pos = abs(i * map_size - column + i)\n\n if i == 0:\n unit_id = (order_unit['if_left'].index((x_pos,y_pos))) + 1\n data_ia['player1'][(x_pos, y_pos)] = [unit, life, unit_id]\n else:\n unit_id = (order_unit['if_right'].index((x_pos,y_pos))) + 1\n data_ia['player2'][(x_pos, y_pos)] = [unit, life, unit_id]\n\n return data_ia", "def generate_map(self):\n\n # Create main streets first\n self.create_main_streets()\n\n # Then create the commercial buildings in the center of town\n self.create_commercial_center()\n\n # Then create the neighborhoods that populate the rest of the city\n while(self.create_neighborhood()):\n pass\n\n # Clean up any invalid buildings that were created\n self.delete_inaccessible_buildings()", "def demander_map(nom_maps):\n\n # On affiche la liste des labyrinthes disponibles\n map_msg = \"Labyrinthes existants : \"\n print(map_msg)\n\n # On affiche les choix possibles\n for i, nom in enumerate(nom_maps):\n print(\" {} - {}\".format(i + 1, nom))\n\n # Tant que l'input est incorrecte...\n indice = None\n while indice not in range(1, len(nom_maps) + 1):\n # ...On demande l'indice du labyrinthe\n try:\n indice = int(input(\"Entrez un numéro de labyrinthe pour commencer à jouer : \"))\n except:\n pass\n\n return indice - 1", "def juego_nuevo():\n show_title(\"Crear sopa de NxN letras\")\n nxn = pedir_entero(\"Ingrese un numero entero de la cantidad de\\nfilas y columnas que desea (Entre 10 y 20):\\n\",10,20)\n n_palabras = pedir_entero(\"Ingrese un numero entero de la cantidad de\\npalabas que deasea agregar (Entre 0 y %d):\\n\"%(nxn/2),0,(nxn/2))\n palabras = []\n palabra_min_caracteres = 3\n palabra_repetida = False\n while len(palabras)<n_palabras:\n if palabra_repetida :\n show_msg(\"Ingreso una palabra repetida\")\n palabra_repetida = False\n # Pedir una palabra que cumpla con los requisitos\n palabra = pedir_palabra(\"[%d|%d]Ingrese una palabra entre %d y %d caracteres: \"%(len(palabras)+1,n_palabras,palabra_min_caracteres,(nxn/2)),palabra_min_caracteres,(nxn/2))\n if palabra in palabras:\n palabra_repetida = True\n else :\n palabras.append(palabra)\n matrix = crear_matrix(nxn)\n matrix,posiciones,salteadas = procesar_palabras(matrix, nxn, palabras)\n matrix = completar_matrix(matrix, nxn)\n return procesar_juego(matrix,nxn,n_palabras,salteadas,posiciones)", "def addMapMusicaGenero(catalog, musica):\n #Reggae\n\n RBTreggaeEntry = mp.get(catalog['musicaGenero'], 'Reggae')\n RBTreggae = me.getValue(RBTreggaeEntry) \n EstaKey = om.contains(RBTreggae, musica['tempo'])\n\n if not(EstaKey) and (float(musica['tempo']) >= 60 and float(musica['tempo']) <= 90):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTreggae, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTreggae, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTreggae, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Reggae', RBTreggae)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTreggae, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTreggae, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Reggae', RBTreggae)\n \n #Down-tempo\n\n RBTdown_tempoEntry = mp.get(catalog['musicaGenero'], 'Down-tempo')\n RBTdown_tempo = me.getValue(RBTdown_tempoEntry) \n EstaKey = om.contains(RBTdown_tempo, musica['tempo'])\n\n if not(EstaKey) and (float(musica['tempo']) >= 70 and float(musica['tempo']) <= 100):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTdown_tempo, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTdown_tempo, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTdown_tempo, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Down-tempo', RBTdown_tempo)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTdown_tempo, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTdown_tempo, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Down-tempo', RBTdown_tempo)\n \n #Chill-out\n\n RBTchill_outEntry = mp.get(catalog['musicaGenero'], 'Chill-out')\n RBTchill_out = me.getValue(RBTchill_outEntry) \n EstaKey = om.contains(RBTchill_out, musica['tempo'])\n\n if not(EstaKey) and (float(musica['tempo']) >= 90 and float(musica['tempo']) <= 120):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTchill_out, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTchill_out, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTchill_out, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Chill-out', RBTchill_out)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTchill_out, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTchill_out, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Chill-out', RBTchill_out)\n\n #Hip-hop\n\n RBThip_hopEntry = mp.get(catalog['musicaGenero'], 'Hip-hop')\n RBThip_hop = me.getValue(RBThip_hopEntry) \n EstaKey = om.contains(RBThip_hop, musica['tempo'])\n\n if not(EstaKey) and (float(musica['tempo']) >= 85 and float(musica['tempo']) <= 115):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBThip_hop, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBThip_hop, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBThip_hop, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Hip-hop', RBThip_hop)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBThip_hop, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBThip_hop, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Hip-hop', RBThip_hop)\n \n #Jazz and Funk \n\n RBTjazzandfunkEntry = mp.get(catalog['musicaGenero'], 'Jazz and Funk')\n RBTjazzandfunk = me.getValue(RBTjazzandfunkEntry) \n EstaKey = om.contains(RBTjazzandfunk, musica['tempo'])\n\n if not(EstaKey) and (float(musica['tempo']) >= 120 and float(musica['tempo']) <= 125):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTjazzandfunk, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTjazzandfunk, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTjazzandfunk, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Jazz and Funk', RBTjazzandfunk)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTjazzandfunk, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTjazzandfunk, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Jazz and Funk', RBTjazzandfunk)\n \n #Pop\n\n RBTpopEntry = mp.get(catalog['musicaGenero'], 'Pop')\n RBTpop = me.getValue(RBTpopEntry) \n EstaKey = om.contains(RBTpop, musica['tempo'])\n\n if not(EstaKey) and (float(musica['tempo']) >= 100 and float(musica['tempo']) <= 130):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTpop, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTpop, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTpop, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Pop', RBTpop)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTpop, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTpop, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Pop', RBTpop)\n \n #R&B\n\n RBTrandbEntry = mp.get(catalog['musicaGenero'], 'R&B')\n RBTrandb = me.getValue(RBTrandbEntry) \n\n EstaKey = om.contains(RBTrandb, musica['tempo'])\n if not(EstaKey) and (float(musica['tempo']) >= 60 and float(musica['tempo']) <= 80):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTrandb, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTrandb, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTrandb, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'R&B', RBTrandb)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTrandb, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTrandb, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'R&B', RBTrandb)\n\n #Rock\n\n RBTrockEntry = mp.get(catalog['musicaGenero'], 'Rock')\n RBTrock = me.getValue(RBTrockEntry) \n\n EstaKey = om.contains(RBTrock, musica['tempo'])\n if not(EstaKey) and (float(musica['tempo']) >= 110 and float(musica['tempo']) <= 140):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTrock, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTrock, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTrock, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Rock', RBTrock)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTrock, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTrock, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Rock', RBTrock)\n\n #Metal\n\n RBTmetalEntry = mp.get(catalog['musicaGenero'], 'Metal')\n RBTmetal = me.getValue(RBTmetalEntry) \n\n EstaKey = om.contains(RBTmetal, musica['tempo'])\n if not(EstaKey) and (float(musica['tempo']) >= 110 and float(musica['tempo']) <= 140):\n ArtistList = lt.newList('ARRAY_LIST')\n om.put(RBTmetal, musica['tempo'], ArtistList)\n ListaArtistaEntry = om.get(RBTmetal, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTmetal, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Rock', RBTmetal)\n elif EstaKey:\n ListaArtistaEntry = om.get(RBTmetal, musica['tempo'])\n ListaArtista = me.getValue(ListaArtistaEntry)\n lt.addLast(ListaArtista, musica)\n om.put(RBTmetal, musica['tempo'], ListaArtista)\n mp.put(catalog['musicaGenero'], 'Metal', RBTmetal)", "def CharucoBoard_create(squaresX, squaresY, squareLength, markerLength, dictionary):\n pass", "def generate_map(self):\n map = Map.Map(50, 80, 1000, 10, 6)\n\n #here we can map out our larger map structure\n if self.level < 2:\n map.make_greathall()\n elif self.level >= 2 and self.level < 20:\n map.make_map()\n elif self.level >= 20:\n map.make_cave()\n else:\n map.make_map()\n return map", "def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))", "def make_SHIGUCHI_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from m1_info, m2_info, m3_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m2 = m2_info[2]\n\n m2_points = m2_info[3]\n\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI\n m1 & m2 -> base point = m2_p3 = (dx_U_right, dy_U_right)\n m1 & m3 -> base point = m3_p2 = (dx_L_right, dy_L_right)\n\n m4 & m2 -> base point = m2_p0 = (dx_U_left, dy_U_left)\n m4 & m3 -> base point = m3_p1 = (dx_L_left, dy_L_left)\n \"\"\"\n dx_U_right = m2_p3[0]\n dy_U_right = m2_p3[1]\n\n dx_L_right = m3_p2[0]\n dy_L_right = m3_p2[1]\n\n dx_U_left = m2_p0[0]\n dy_U_left = m2_p0[1]\n\n dx_L_left = m3_p1[0]\n dy_L_left = m3_p1[1]\n\n \"\"\"\n 3 Call appropriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n elif SHIGUCHI_name == 'IRIWA':\n # Right side\n dx = dx_U_right\n dy = dy_U_right\n m_info = m2_info\n choice = 'UpperRight'\n m2_right_KUMIKI_points1, m2_right_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_right_KUMIKI_points1)\n # rs.AddPolyline(m2_right_KUMIKI_points2)\n\n dx = dx_L_right\n dy = dy_L_right\n m_info = m3_info\n choice = 'LowerRight'\n m3_right_KUMIKI_points1, m3_right_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_right_KUMIKI_points1)\n # rs.AddPolyline(m3_right_KUMIKI_points2)\n\n # Left side\n dx = dx_U_left\n dy = dy_U_left\n m_info = m2_info\n choice = 'UpperLeft'\n m2_left_KUMIKI_points1, m2_left_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_left_KUMIKI_points1)\n # rs.AddPolyline(m2_left_KUMIKI_points2)\n\n dx = dx_L_left\n dy = dy_L_left\n m_info = m3_info\n choice = 'LowerLeft'\n m3_left_KUMIKI_points1, m3_left_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_left_KUMIKI_points1)\n # rs.AddPolyline(m3_left_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n elif SHIGUCHI_name == 'HAKO':\n pass\n else:\n sys.exit()\n\n SHIGUCHI_list =\\\n [m2_right_KUMIKI_points1, m2_right_KUMIKI_points2,\\\n m3_right_KUMIKI_points1, m3_right_KUMIKI_points2,\\\n m2_left_KUMIKI_points1, m2_left_KUMIKI_points2,\\\n m3_left_KUMIKI_points1, m3_left_KUMIKI_points2]\n\n return SHIGUCHI_list", "def draw_final_screen(self):\r\n self.master.title(\"Map\") # title of the screen\r\n self.canvas.pack(fill=BOTH, expand=1) # arrange objects inside the screen\r\n best_solution, best_score = self.find_best_solution_and_score()\r\n # 1\r\n poly_1_coords = [60, 60, 350, 60, 350, 150, 60, 150]\r\n enum_color = best_solution.genetic_units[1].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_1_coords, outline='black', fill=gui_color, width=2)\r\n # 2\r\n poly_2_coords = [60, 150, 350, 150, 350, 350, 260, 350, 260, 200, 60, 200]\r\n enum_color = best_solution.genetic_units[2].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_2_coords, outline='black', fill=gui_color, width=2)\r\n # 3\r\n poly_3_coords = [60, 200, 260, 200, 260, 440, 60, 440]\r\n enum_color = best_solution.genetic_units[3].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_3_coords, outline='black', fill=gui_color, width=2)\r\n # 4\r\n poly_4_coords = [60, 440, 60, 350, 110, 350, 110, 370, 220, 370, 220, 440]\r\n enum_color = best_solution.genetic_units[4].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_4_coords, outline='black', fill=gui_color, width=2)\r\n # 5\r\n poly_5_coords = [160, 440, 160, 400, 290, 400, 290, 440]\r\n enum_color = best_solution.genetic_units[5].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_5_coords, outline='black', fill=gui_color, width=2)\r\n # 6\r\n poly_6_coords = [350, 60, 640, 60, 640, 220, 350, 220]\r\n enum_color = best_solution.genetic_units[6].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_6_coords, outline='black', fill=gui_color, width=2)\r\n # 10\r\n poly_10_coords = [350, 220, 640, 220, 640, 450, 350, 450]\r\n enum_color = best_solution.genetic_units[10].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_10_coords, outline='black', fill=gui_color, width=2)\r\n # 8\r\n poly_8_coords = [350, 220, 550, 220, 550, 410, 350, 410]\r\n enum_color = best_solution.genetic_units[8].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_8_coords, outline='black', fill=gui_color, width=2)\r\n # 9\r\n poly_9_coords = [260, 350, 400, 350, 400, 380, 450, 380, 450, 440, 260, 440]\r\n enum_color = best_solution.genetic_units[9].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_9_coords, outline='black', fill=gui_color, width=2)\r\n # 7\r\n poly_7_coords = [350, 150, 500, 150, 500, 250, 350, 250]\r\n enum_color = best_solution.genetic_units[7].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_7_coords, outline='black', fill=gui_color, width=2)\r\n # 11\r\n poly_11_coords = [0, 0, 450, 0, 450, 60, 60, 60, 60, 440, 250, 440, 250, 500, 0, 500]\r\n enum_color = best_solution.genetic_units[11].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_11_coords, outline='black', fill=gui_color, width=2)\r\n # 12\r\n poly_12_coords = [450, 0, 700, 0, 700, 500, 250, 500, 250, 440, 640, 440, 640, 60, 450, 60]\r\n enum_color = best_solution.genetic_units[12].color\r\n gui_color = ENUM_TO_GUI_COLORS[enum_color]\r\n self.canvas.create_polygon(poly_12_coords, outline='black', fill=gui_color, width=2)\r\n # draw the text at the bottom of the gui.\r\n self.canvas.create_text(330, 520, font=\"Purisa\", fill=\"black\",\r\n text=f\"Done after {self.simulation.round} rounds Best score: {best_score}\")\r\n # arrange objects on the canvas.\r\n self.canvas.pack(fill=BOTH, expand=1)", "def create_map(grid_size):\n STATUS['game_grid'] = [] # Could be a tuple?\n STATUS['grid_size'] = grid_size\n x_coord = 1\n y_coord = 1\n grid_size_counter = grid_size * grid_size\n while grid_size_counter:\n STATUS['game_grid'].append([x_coord, y_coord])\n x_coord += 1\n if x_coord == grid_size + 1:\n y_coord += 1\n x_coord = 1\n grid_size_counter -= 1", "def crea_falla( lats, lons, prof, dip, strike, latini, latfin, area_sf, profundidad, razon_aspecto ):\n \n # se pasa los arrays de lats y lons a arrays unidimensionales que contienen las coordenadas sin repeticion\n\n # longitudes\n vector_lon_input = lons[0,:] # primera fila de matriz de lons, columnas se repiten\n # se chequea si son crecientes monotonos, util para interpolacion \n if all( x < y for x, y in zip( vector_lon_input, vector_lon_input[1:] ) ):\n vector_lon_input = vector_lon_input\n else:\n vector_lon_input = vector_lon_input[::-1]\n\n # latitudes\n vector_lat_input = lats[:,0] # primera columna de matriz de lats, filas se repiten\n # se chequea si son crecientes monotonos, util para interpolacion \n if all( x < y for x, y in zip( vector_lat_input, vector_lat_input[1:] ) ):\n vector_lat_input = vector_lat_input\n else:\n vector_lat_input = vector_lat_input[::-1]\n\n\n lim_norte = latini # nuevo limite superior\n dif_lim_norte = np.abs( lats-lim_norte ) # diferencias entre array de latitudes y valor del limite superior\n idx_lim_norte = ( np.where( dif_lim_norte == dif_lim_norte.min() )[0][0], np.where( dif_lim_norte == dif_lim_norte.min() )[1][0] )# indice del valor de Slab2.0 que mas se aproxima \n\n lim_sur = latfin # nuevo limite inferior\n dif_lim_sur = np.abs( lats-lim_sur ) # diferencias entre array de latitudes y valor del limite inferior\n idx_lim_sur = ( np.where( dif_lim_sur == dif_lim_sur.min() )[0][0], np.where( dif_lim_sur == dif_lim_sur.min() )[1][0] )# indice del valor de Slab2.0 que mas se aproxima \n\n # se calcula la distancia entre los limites (largo de la falla) en metros\n largo_falla = Geodesic.WGS84.Inverse(lats[idx_lim_norte], lons[idx_lim_norte], lats[idx_lim_sur], lons[idx_lim_sur] )[ \"s12\" ]\n largo_subfalla = np.sqrt( area_sf ) # subfallas cuadradas\n n_fallas_filas = np.floor_divide( largo_falla, largo_subfalla ) # cantidad de fallas en sentido norte - sur \n # a partir del numero de fallas en el sentido norte sur (ctdad de latitudes) se crea un vector de latitudes equidistantes\n lats_fallas = np.reshape( np.linspace( lim_norte, lim_sur, int( n_fallas_filas ) ),( int( n_fallas_filas ),1 ) )\n \n # se busca la latitud del medio para referenciarla a la profundidad deseada\n if len(lats_fallas)%2 != 0:\n lat_mediana = lats_fallas[ np.floor_divide( len( lats_fallas ), 2) ]\n else:\n lat_mediana = lats_fallas[ np.floor_divide( len( lats_fallas ), 2) - 1 ]\n\n # busca indice de la latitud del medio\n dif_lat_mediana = np.abs( lats - lat_mediana )\n # primer indice, muestra la linea de profundidades para esta latitud\n idx_lat_mediana = np.where( dif_lat_mediana == dif_lat_mediana.min() )[0][0] \n # se busca indice de la profundidad en la linea de la latitud media\n dif_profundidad = np.abs( profundidad - prof[ idx_lat_mediana, ] )\n idx_profundidad = np.where( dif_profundidad == dif_profundidad.min() )[0][0]\n \n # indice elemento central de la falla creada, a partir de la latitud central y la profundidad\n idx_subfalla_central = ( idx_lat_mediana, idx_profundidad )\n\n # longitud de la subfalla central\n lon_subfalla_central = lons[ idx_subfalla_central ]#[0][0]\n # profundidad de la subfalla central (punto con la profundidad mas cercana a la ingresada)\n prof_subfalla_central = prof[ idx_subfalla_central ]#[0][0]\n\n # se busca los indices de los elementos mas cercanos a las latitudes de las fallas creadas por el linespace\n dif_lats = np.ones( (len( lats_fallas ), ) + np.shape( lats ) ) # inicializacion de array para diferencias de latitudes\n for i in range( len( lats_fallas ) ):\n dif_lats[i] = np.abs( lats - lats_fallas[i] )\n \n idx_fallas = np.ones( (len( lats_fallas ), ) + ( 1,2 ) ) # inicializacion de array con los indices de las latitudes \n for j in range( len( lats_fallas ) ):\n idx_fallas[j] = ( np.where( dif_lats[j] == dif_lats[j].min() )[0][0], np.where( dif_lats[j] == dif_lats[j].min() )[1][0] )\n \n # ancho de la falla\n ancho_falla = largo_falla/razon_aspecto\n n_fallas_columnas = np.floor_divide( ancho_falla, largo_subfalla ) # numero de subfallas en el sentido este-oeste\n # completar array de latitudes con el nuevo ancho\n #matriz_latitudes = np.reshape(np.tile(lats_fallas, int(n_fallas_columnas)),(int(n_fallas_columnas),(len(lats_fallas))))\n matriz_latitudes = np.tile( lats_fallas, int( n_fallas_columnas ) )\n # creacion de array con longitudes a usarse\n # calculo de longitudes de los centros de las subfallas a partir del ancho de la falla\n # es necesario saber si la cantidad es par o impar\n if n_fallas_columnas%2 != 0:\n mitad_ancho = ancho_falla / 2 # en metros\n n_fallas_xlado = int( n_fallas_columnas ) // 2 # cantidad de subfallas a ambos lados de falla central\n lon_limite_oeste = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 270, mitad_ancho )[ \"lon2\" ]\n lon_limite_este = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 90, mitad_ancho )[ \"lon2\" ]\n lons_subfallas_oeste = np.linspace( lon_limite_oeste, lon_subfalla_central, ( n_fallas_xlado + 1 ) )\n lons_subfallas_este = np.linspace( lon_subfalla_central, lon_limite_este, ( n_fallas_xlado + 1 ) )\n lons_subfallas = np.append( lons_subfallas_oeste[:-1], lons_subfallas_este ) # vector con las longitudes de las subfallas\n lons_subfallas = np.reshape( lons_subfallas, ( 1, int( n_fallas_columnas ) ) )\n else:\n mitad_ancho = ancho_falla / 2 \n n_fallas_oeste = int( n_fallas_columnas ) / 2 - 1 # -1 para no contar 2 veces la subfalla del medio\n n_fallas_este = int( n_fallas_columnas ) / 2\n lon_limite_oeste = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 270, ( mitad_ancho - largo_subfalla ) )[ \"lon2\" ]\n lon_limite_este = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 90, mitad_ancho )[ \"lon2\" ]\n lons_subfallas_oeste = np.linspace( lon_limite_oeste, lon_subfalla_central, ( int( n_fallas_oeste ) + 1 ) )\n lons_subfallas_este = np.linspace( lon_subfalla_central, lon_limite_este, ( int( n_fallas_este ) + 1 ) )\n lons_subfallas = np.append( lons_subfallas_oeste[:-1], lons_subfallas_este ) # vector con las longitudes de las subfallas\n lons_subfallas = np.reshape( lons_subfallas, ( 1, int( n_fallas_columnas ) ) )\n\n # creacion de matriz de longitudes\n matriz_longitudes = np.tile( lons_subfallas, ( int( n_fallas_filas ), 1 ) ) # matriz con longitudes de las subfallas\n\n # se debe encontrar las profundidades, dips y strikes correspondientes a estas latitudes y longitudes de cada subfalla\n # profundidades correspondientes a cada subfalla:\n # se interpolara para encontrar los valores de profundidad correspondientes a cada subfalla\n \n vec_lons_subfallas_todas = np.reshape( matriz_longitudes, \n ( int( n_fallas_filas * n_fallas_columnas ), ) ) # vector con todos los elementos de la matriz de longitudes de las subfallas creadas\n vec_lats_subfallas_todas = np.reshape( matriz_latitudes, \n ( int( n_fallas_filas * n_fallas_columnas ), ) ) # vector con todos los elementos de la matriz de latitudes de las subfallas creadas\n\n\n # objeto de interpolacion de profundidades\n profs_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), prof )\n # inicializacion array de valores interpolados de profundidades\n prof_subfallas = np.ones( ( int( n_fallas_columnas * n_fallas_filas ), 1) )\n for p in range( int( n_fallas_columnas*n_fallas_filas ) ):\n prof_subfallas[p] = profs_int( ( vec_lats_subfallas_todas[p], vec_lons_subfallas_todas[p] ) )\n prof_subfallas = np.reshape( prof_subfallas, ( int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n \n # dips correspondientes a cada subfalla:\n # se interpolara para encontrar los valores de dip correspondientes a cada subfalla\n\n # objeto de interpolacion de dips\n dips_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), dip )\n # inicializacion array de valores interpolados de dip\n dip_subfallas = np.ones( ( int( n_fallas_columnas * n_fallas_filas ), 1) )\n for d in range( int( n_fallas_columnas * n_fallas_filas ) ):\n dip_subfallas[d] = dips_int( ( vec_lats_subfallas_todas[d], vec_lons_subfallas_todas[d] ) )\n dip_subfallas = np.reshape( dip_subfallas, (int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n \n # strike correspondiente a cada subfalla:\n # se interpolara para encontrar los valores de strike correspondientes a cada subfalla\n\n # objeto de interpolacion de strikes\n strikes_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), strike )\n # inicializacion array de valores interpolados de strike\n strike_subfallas = np.ones( ( int( n_fallas_columnas*n_fallas_filas ), 1) )\n for s in range( int( n_fallas_columnas*n_fallas_filas ) ):\n strike_subfallas[s] = strikes_int( ( vec_lats_subfallas_todas[s], vec_lons_subfallas_todas[s] ) )\n strike_subfallas = np.reshape( strike_subfallas, ( int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n # revisar, quiza sea necesario invertir los valores de la latitud\n\n\n\n\n return largo_falla, matriz_longitudes, matriz_latitudes, prof_subfallas, dip_subfallas, strike_subfallas", "def mapa(player):\n mapita = pd.read_csv(\"./data/database_shots.csv\")\n mapita[\"coord_x\"] = pd.to_numeric(mapita[\"coord_x\"], downcast=\"float\")\n mapita[\"coord_y\"] = pd.to_numeric(mapita[\"coord_y\"], downcast=\"float\")\n mapita[\"coord_y\"] = mapita[\"coord_y\"] * (-1)\n listita = mapita[mapita[\"name\"] == player]\n listita_1 = pd.DataFrame(listita, columns = ['name', \"shot\", 'coord_x','coord_y'])\n listita_1.shot = listita_1.shot.apply(lambda x: 'in' if 'in' in x else x)\n listita_1.shot = listita_1.shot.apply(lambda x: 'in' if 'dunk' in x else x)\n listita_1.shot = listita_1.shot.apply(lambda x: 'out' if 'out' in x else x)\n plt.figure(figsize=(18,20))\n pl.draw_court(outer_lines=True)\n plt.axis('off')\n plt.xlim(0,260)\n plt.ylim(-283,0)\n markers = {\"#local-in\": \"s\", \"#local-out\": \"x\"}\n sns.scatterplot(data = listita_1, x = \"coord_x\", y = \"coord_y\", s = 300, hue = \"shot\", style = \"shot\")\n plt.legend(loc = 4,bbox_to_anchor=(0.95,0.05), fontsize=6, title='Shots attempted',title_fontsize=8, mode = \"expand\")\n plt.savefig(\"./images/map.png\",dpi = 600)", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def build_compass_map():\n\n for i in range(0, 100):\n # Add bears\n if ENEMY_LIST[i] == 1:\n HAS_COMPASS_MAP.append(COMPASS_DICT[3])\n # Add Grizzly bear\n elif ENEMY_LIST[i] == 2:\n HAS_COMPASS_MAP.append(COMPASS_DICT[4])\n # Add water spots\n elif GROUND_FEATURES_LIST[i] == 10:\n HAS_COMPASS_MAP.append(COMPASS_DICT[1])\n # Add Big Trees\n elif GROUND_FEATURES_LIST[i] == 11:\n HAS_COMPASS_MAP.append(COMPASS_DICT[2])\n # Add nothings\n else:\n HAS_COMPASS_MAP.append(COMPASS_DICT[5])", "def create_data_ui(data_map, clear):\n data_ui = [[]] * (16 + data_map['map_size'])\n\n # Initialisation of the displaying constants.\n grid_size = 5 * data_map['map_size']\n ui_color = '%(ui_color)s'\n\n margin = 5\n line_coloured = ui_color + ('█' * (117 + margin)) + Style.RESET_ALL\n if clear:\n margin = 9\n line_coloured = ui_color + ('█' * (121 + margin)) + Style.RESET_ALL\n\n\n border_black = Back.BLACK + ' ' + Style.RESET_ALL\n margin_left = ((20 - data_map['map_size']) * 5) / 2\n margin_right = ((20 - data_map['map_size']) * 5) - (((20 - data_map['map_size']) * 5) / 2)\n border_coloured_margin_left = ui_color + ('█' * (margin + margin_left)) + Style.RESET_ALL\n border_coloured_margin_right = ui_color + ('█' * (margin + margin_right)) + Style.RESET_ALL\n border_coloured_left = ui_color + ('█' * margin) + Style.RESET_ALL\n border_coloured_right = ui_color + ('█' * margin) + Style.RESET_ALL\n border_coloured_middle = ui_color + ('█' * 8) + Style.RESET_ALL\n\n border_white = ' ' * 2\n\n # Generate and save the top of the UI.\n for i in range(3):\n data_ui[i] = line_coloured\n\n # Generate and save the top of the grid.\n turn_message = 'Turn %(turn)s - %(playername)s, it\\'s up to you ! %(blank)s'\n data_ui[3] = border_coloured_margin_left + Fore.WHITE + Back.BLACK + ' ' + turn_message + ' ' + Style.RESET_ALL + border_coloured_margin_right\n data_ui[4] = border_coloured_margin_left + border_black + ' ' * (grid_size + 8) + border_black + border_coloured_margin_right\n\n # Generate and save the architecture of the grid.\n for i in range(1, data_map['map_size'] + 1):\n data_ui[i + 4] = border_coloured_margin_left + border_black + Fore.BLACK + ' ' + ('0' + str(i))[-2:] + ' ' + Style.RESET_ALL\n for j in range(1, data_map['map_size'] + 1):\n data_ui[i + 4] += '%((' + str(i) + ',' + str(j) + '))5s' + Style.RESET_ALL\n data_ui[i + 4] += ' ' + border_black + border_coloured_margin_right\n\n # Generate and save the foot of the grid.\n data_ui[data_map['map_size'] + 5] = border_coloured_margin_left + border_black + Fore.BLACK + ' '\n for i in range(1, data_map['map_size'] + 1):\n data_ui[data_map['map_size'] + 5] += ' ' + ('0' + str(i))[-2:] + ' '\n data_ui[data_map['map_size'] + 5] += ' ' + border_black + border_coloured_margin_right\n\n data_ui[data_map['map_size'] + 6] = border_coloured_margin_left + Back.BLACK + (grid_size + 12) * ' ' + Style.RESET_ALL + border_coloured_margin_right\n\n # Generate and save the top of the statistics.\n data_ui[data_map['map_size'] + 7] = line_coloured\n\n data_ui[data_map['map_size'] + 8] = border_coloured_left + Fore.WHITE + Back.BLACK + ' Your units:' + (' ' * 39) + Style.RESET_ALL + border_coloured_middle\n data_ui[data_map['map_size'] + 8] += Fore.WHITE + Back.BLACK + ' Opponent\\'s units:' + (' ' * 33) + Style.RESET_ALL + border_coloured_right\n\n # Generate and save the content of the statistics.\n for i in range(4):\n data_ui[data_map['map_size'] + 9 + i] = border_coloured_left + border_black + ' ' + border_white + Fore.BLACK + '%(stat' + str(i+1) + '1)s' + border_white + '%(stat' + str(i+1) + '2)s' + border_white + ' ' + border_black + border_coloured_middle\n data_ui[data_map['map_size'] + 9 + i] += border_black + ' ' + border_white + '%(stat' + str(i+1) + '3)s' + border_white + '%(stat' + str(i+1) + '4)s' + border_white + ' ' + border_black + border_coloured_right\n\n # Generate and save the foot of the statistics.\n data_ui[data_map['map_size'] + 13] = border_coloured_left + Back.BLACK + (' ' * 52) + Style.RESET_ALL + border_coloured_middle\n data_ui[data_map['map_size'] + 13] += Back.BLACK + (' ' * 52) + Style.RESET_ALL + border_coloured_right\n\n for i in range(2):\n data_ui[data_map['map_size'] + 14 + i] = line_coloured\n\n return data_ui" ]
[ "0.65309775", "0.63648427", "0.63088846", "0.62954193", "0.6213833", "0.6092138", "0.60337055", "0.60142654", "0.59787285", "0.5911133", "0.5909889", "0.5894976", "0.58894086", "0.58749473", "0.5871359", "0.5868573", "0.58479834", "0.58409506", "0.58083653", "0.57830596", "0.5765612", "0.5752556", "0.5744527", "0.57316905", "0.5726059", "0.571409", "0.568521", "0.56725883", "0.56697214", "0.56673086" ]
0.7384126
0
Este metodo nos dara mediante una formula la variable fama , que dependera de la decoracion del casino e influira en las visitas. Utilizamos el diccionario kasino.decoracion, variable propia de esta clase para hacer con este una lista, luego dividimos en 3 partes la lista segun corresponde. Luego las recorremos para puntuar paredes suelo y reforma segun indique y utilizaremos estas 3 variables para la formula fama.
def fama (self , diccionario): decoracion_list = [] for key , value in diccionario.items(): a=[] a.append(key) a.append(value) decoracion_list.append (a) paredes_list = decoracion_list [0:3] suelo_list = decoracion_list [3:6] reforma_list = decoracion_list [6:] paredes = 1 suelo = 1 reforma = 1 for i in range (len(paredes_list)): if paredes_list [i][1] == 1 : paredes = i+2 for i in range (len(suelo_list)): if suelo_list [i][1] == 1 : suelo = i+2 for i in range (len(reforma_list)): if reforma_list [i][1] == 1 : reforma = i+2 modificador_fama = 0 if paredes >= 4 and suelo >= 4 and reforma >= 4 : modificador_fama = 45 elif paredes >= 3 and suelo >= 3 and reforma >= 3 : modificador_fama = 33 elif paredes >= 2 and suelo >= 2 and reforma >= 2 : modificador_fama = 12 fama = (10*paredes)+(10*suelo)+(10*reforma) + modificador_fama + kasino.modificador_fama """ FORMULA FAMA : Con esta formula se calcula la fama, que dependera de la decoracion e influira en los visitantes Se puede usar modificador_fama para calibrar el juego o añadir niveles de dificulad """ return fama , paredes , suelo , reforma
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calcular_tres_marcas_menor_produccion(lista):\r\n lista_ordenada_menor_a_mayor = ordenar_lista_menor_a_mayor_produccion(lista)\r\n lista_marcas_menor_produccion = []\r\n for i in range(3):\r\n lista_marcas_menor_produccion.append(lista_ordenada_menor_a_mayor[i][0])\r\n \r\n return lista_marcas_menor_produccion", "def resultat(self, concordance_mf, concordance_pf, liste_F, liste_M, liste_P):\n resultat = {\"Marqueur\": [], \"Conclusion\": [], \"Concordance Mere/Foetus\": [], \"Détails M/F\": [],\n \"Concordance Pere/Foetus\": [], \"Détails P/F\": []}\n marqueurs_conta = 0\n marqueurs_non_conta = 0\n somme_conta = 0\n if liste_F[0].allele[1] == 0.0:\n self.set_sexe(\"F\")\n else:\n self.set_sexe(\"M\")\n if concordance_mf != 16 and concordance_pf != 16 and concordance_pf != None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Conclusion\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\" and liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M : \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\n \"P : \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n elif liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\"\")\n elif liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails M/F\"].append(\"\")\n else:\n resultat[\"Détails M/F\"].append(\"\")\n resultat[\"Détails P/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\",\n \"Concordance Pere/Foetus\", \"Détails P/F\"])\n return resultats, conclusion\n elif concordance_mf != len(liste_F) and concordance_pf == len(liste_F) or concordance_mf != len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Conclusion\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n else:\n resultat[\"Détails M/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf == len(liste_F) or concordance_mf == len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Concordance Mere/Foetus\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf != len(liste_F):\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Concordance Mere/Foetus\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)))\n else:\n resultat[\"Détails P/F\"].append(\"\")\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat,\n columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\", \"Concordance Pere/Foetus\",\n \"Détails P/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion", "def __init__(self, sistema, nombre, espacios_de_atencion, niveles=None, capacity=float('inf'), init=0):\r\n\r\n super(MedioDeAlmacenamiento, self).__init__(sistema, capacity, init)\r\n self.nombre = nombre\r\n self.espacios_de_atencion = espacios_de_atencion\r\n self.espacios_en_uso = 0\r\n self.cola = []\r\n\r\n if niveles is not None:\r\n self.niveles = niveles\r\n if sum(self.niveles.values()) > 0:\r\n self.get(sum(self.niveles.values()))\r\n self.espacio = self.capacity - self.level", "def cambiar_Fichas(self,lista):\n self.rellenar_atril()\n for letra in lista:\n self.bolsa.agregar_bolsa(letra, 1)\n random.shuffle(self.bolsa.bolsa)", "def calcula(self, is_deterministico):\n # criando header da tabela\n tabela = PrettyTable([\"Rodadas\",\n \"E[T1]\",\n \"E[W1]\",\n \"E[X1]\",\n \"E[N1]\",\n \"E[Nq1]\",\n \"E[Ns1]\",\n \"E[T2]\",\n \"E[W2]\",\n \"E[X2]\",\n \"E[N2]\",\n \"E[Nq2]\",\n \"E[Ns2]\",\n \"Var[W1]\",\n \"Var[W2]\"])\n \n\n for index in range(1, self.n_rodadas+1):\n # calculando a esperanca das metricas da fila 1\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w1: \", len(self.w1[index]))\n if len(self.w1[index]) > 0:\n self.x1_med_rodada[index] = sum(self.x1[index])/len(self.w1[index])\n self.w1_med_rodada[index] = sum(self.w1[index])/len(self.w1[index])\n self.nq1_med_rodada[index] = sum(self.nq1[index])/len(self.w1[index])\n self.ns1_med_rodada[index] = sum(self.ns1[index])/len(self.w1[index])\n self.n1_med_rodada[index] = sum(self.n1[index])/len(self.w1[index])\n self.t1_med_rodada[index] = sum(self.t1[index])/len(self.w1[index])\n\n # calculando a esperanca das metricas da fila 2\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w2: \", len(self.w2[index]))\n if len(self.w2[index]) > 0:\n self.x2_med_rodada[index] = sum(self.x2[index])/len(self.w2[index])\n self.w2_med_rodada[index] = sum(self.w2[index])/len(self.w2[index])\n self.nq2_med_rodada[index] = sum(self.nq2[index])/len(self.w2[index])\n self.ns2_med_rodada[index] = sum(self.ns2[index])/len(self.w2[index])\n self.n2_med_rodada[index] = sum(self.n2[index])/len(self.w2[index])\n self.t2_med_rodada[index] = sum(self.t2[index])/len(self.w2[index])\n\n # calculo de Var[W1] e Var[W2] para exibir na tabela\n if len(self.w1[index]) == 1:\n self.var_w1_med_rodada[index] = 0\n else:\n for amostra in range(len(self.w1[index])):\n self.var_w1_med_rodada[index] += (self.w1[index][amostra] - self.w1_med_rodada[index]) ** 2\n self.var_w1_med_rodada[index] /= (len(self.w1[index]) - 1)\n\n if len(self.w2[index]) == 1:\n self.var_w2_med_rodada[index] = 0\n else:\n for amostra2 in range(len(self.w2[index])):\n self.var_w2_med_rodada[index] += (self.w2[index][amostra2] - self.w2_med_rodada[index]) ** 2\n self.var_w2_med_rodada[index] /= (len(self.w2[index]) - 1)\n\n tabela.add_row([\"rodada_\" + str(index),\n round(self.t1_med_rodada[index], 6),\n round(self.w1_med_rodada[index], 6),\n round(self.x1_med_rodada[index], 6),\n round(self.n1_med_rodada[index], 6),\n round(self.nq1_med_rodada[index], 6),\n round(self.ns1_med_rodada[index], 6),\n round(self.t2_med_rodada[index], 6),\n round(self.w2_med_rodada[index], 6),\n round(self.x2_med_rodada[index], 6),\n round(self.n2_med_rodada[index], 6),\n round(self.nq2_med_rodada[index], 6),\n round(self.ns2_med_rodada[index], 6),\n round(self.var_w1_med_rodada[index], 6),\n round(self.var_w2_med_rodada[index], 6)])\n\n # acumulando medias totais\n self.x1_med_total += self.x1_med_rodada[index]\n self.w1_med_total += self.w1_med_rodada[index]\n self.nq1_med_total += self.nq1_med_rodada[index]\n self.ns1_med_total += self.ns1_med_rodada[index]\n self.n1_med_total += self.n1_med_rodada[index]\n self.t1_med_total += self.t1_med_rodada[index]\n self.x2_med_total += self.x2_med_rodada[index]\n self.w2_med_total += self.w2_med_rodada[index]\n self.nq2_med_total += self.nq2_med_rodada[index]\n self.ns2_med_total += self.ns2_med_rodada[index]\n self.n2_med_total += self.n2_med_rodada[index]\n self.t2_med_total += self.t2_med_rodada[index]\n self.var_w1_med_total += self.var_w1_med_rodada[index]\n self.var_w2_med_total += self.var_w2_med_rodada[index]\n\n # dividindo medias acumuladas pelo total de rodadas e enfim, calculando a media total de cada metrica\n self.x1_med_total /= self.n_rodadas\n self.w1_med_total /= self.n_rodadas\n self.nq1_med_total /= self.n_rodadas\n self.ns1_med_total /= self.n_rodadas\n self.n1_med_total /= self.n_rodadas\n self.t1_med_total /= self.n_rodadas\n self.x2_med_total /= self.n_rodadas\n self.w2_med_total /= self.n_rodadas\n self.nq2_med_total /= self.n_rodadas\n self.ns2_med_total /= self.n_rodadas\n self.n2_med_total /= self.n_rodadas\n self.t2_med_total /= self.n_rodadas\n self.var_w1_med_total /= self.n_rodadas\n self.var_w2_med_total /= self.n_rodadas\n\n tabela.add_row([\"Media\",\n round(self.t1_med_total, 6),\n round(self.w1_med_total, 6),\n round(self.x1_med_total, 6),\n round(self.n1_med_total, 6),\n round(self.nq1_med_total, 6),\n round(self.ns1_med_total, 6),\n round(self.t2_med_total, 6),\n round(self.w2_med_total, 6),\n round(self.x2_med_total, 6),\n round(self.n2_med_total, 6),\n round(self.nq2_med_total, 6),\n round(self.ns2_med_total, 6),\n round(self.var_w1_med_total, 6),\n round(self.var_w2_med_total, 6)\n ])\n\n print(tabela, \"\\n\")\n\n if not is_deterministico:\n self.calcula_ic()", "def manipular_granos(self, camion):\r\n operaciones = self.operaciones[\"Operaciones manipuleo\"]\r\n operaciones_complementarias = self.operaciones[\"Operaciones complementarias\"]\r\n\r\n # Manipuleo de camion por cargar\r\n if camion.tipo == \"Carga\":\r\n\r\n # Manipuleo de carga a granel seca en almacenes propios\r\n if camion.carga in [\"Harina de Soya - Hi Pro/Pellet de Soya\"]:\r\n\r\n # Si la cola de la tolva es aceptable, o si la cola de la pala mecanica y de las cuadrillas\r\n # son muy largas, o si no se dispone producto en almacen 1, entonces, se trata de cargar a\r\n # partir de un transbordo en sistema mecanizado\r\n if len(self.recursos_atencion[\"Estacion Tolva/Balanza 3\"].cola) <= 10 \\\r\n or (len(operaciones[\"Carga con pala mecanica\"].recurso.cola) > 10 and\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8) \\\r\n or not camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n transbordo = operaciones[\"Transbordo en sistema mecanizado (C)\"]\r\n ejecucion_transbordo = yield self.process(\r\n transbordo.ejecutar(self, camion, 30, self.medios_almacenamiento[\"Tolva\"]))\r\n\r\n # Si no se ejecuta el transbordo, se trata de cargar el camion tomando otras alternativoas\r\n # bajo un orden de prioridad definida a continuación\r\n if ejecucion_transbordo in [\"No ejecutada por recurso\", \"No ejecutada por producto\"]:\r\n\r\n # Si la cola de la pala mecanica es aceptable o la cola de las cuadrillas es muy larga,\r\n # y se dispone producto en almacenes, entonces, se carga con pala mecanica\r\n if (len(operaciones[\"Carga con pala mecanica\"].recurso.cola) <= 10 or\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8) \\\r\n and camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n carga = operaciones[\"Carga con pala mecanica\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # En otro caso, si la cola de las cuadrillas es aceptable y se dipone producto en almacenes,\r\n # entonces, se transborda o carga a pulso\r\n elif len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8 \\\r\n and camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 0))\r\n\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen 1\"], float(\"Inf\")))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # En otro caso, si al menos se dispone producto en almacenes, entonces,\r\n # se carga con tolva desde almacen.\r\n elif camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n carga = operaciones[\"Carga con tolva\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # Si ningun caso anterior fue satisfecho se genera y muestra un error\r\n else:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # En otro caso, si la cola de la pala mecánica es aceptable o la cola de las cuadrillas es muy larga,\r\n # entonces, se carga con pala mecanica.\r\n elif len(operaciones[\"Carga con pala mecanica\"].recurso.cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8:\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n carga = operaciones[\"Carga con pala mecanica\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"].ejecutar(self, camion))\r\n\r\n # En otro caso, si la cola de cuadrillas es aceptable, entonces, se transborda o carga a pulso.\r\n elif len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # Si no hay posibilidad de que arriben camiones para transbordo, se carga a pulso\r\n if len(self.recursos_atencion[\"Estacion Volcadora\"].cola) > 10 \\\r\n and len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # En caso contrario, se transborda o carga a pulso\r\n else:\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 10))\r\n\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen 1\"], float(\"Inf\")))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # Si ningun caso anterior fue satisfecho se genera y muestra un error\r\n else:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # Manipuleo de carga a granel seca en almacenes externos\r\n elif camion.carga in [\"Grano de Soya\"]:\r\n\r\n # Si se dispone algún camion esperando por transbordo, entonces,\r\n # se interrumpe su espera y se transborda a pulso\r\n if camion.dispone_camion_esperando_camion(self):\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 0))\r\n\r\n # Si el camion espera se genera y muestra un error\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # En caso contrario, si la pala mecanica tiene una cola aceptable o la cola de las cuadrillas\r\n # es muy larga, entonces, se carga con pala mecanica\r\n elif len(self.recursos_atencion[\"Pala Mecanica\"].cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Pala Mecanica\"].cola) > 8:\r\n\r\n carga = operaciones[\"Carga con pala mecanica\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen Ext\"]))\r\n\r\n # Si la cola de la pala mecanica es muy larga y la de las cuadrillas es aceptable,\r\n # entonces, tenemos dos casos:\r\n else:\r\n\r\n # Si se dispone producto, se transborda o carga a pulso con poca paciencia\r\n if camion.dispone_producto_espacio_medios_almacenamiento(self):\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 10))\r\n\r\n # Si el camion espero se procede con un tranbordo o carga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen Ext\"], float(\"Inf\")))\r\n\r\n # Si no se dispone producto, se transborda o carga a pulso con mayor paciencia\r\n else:\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 30))\r\n\r\n # Si el camion espero se procede con un tranbordo o carga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen Ext\"], float(\"Inf\")))\r\n\r\n # Manipuleo de camion por descargar\r\n elif camion.tipo == \"Descarga\":\r\n\r\n # Manipuleo de carga a granel en almacenes propios\r\n if camion.carga in [\"Harina de Soya - Hi Pro/Pellet de Soya\"]:\r\n\r\n # Si se dispone espacio en Tolva y, la cola de la volcadora es aceptable o la cola de cuadrillas\r\n # es muy larga, entonces, se descarga a partir de un transbordo en sistema mecanizado.\r\n if (camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Tolva\"]) or\r\n not camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"])) \\\r\n and (len(self.recursos_atencion[\"Estacion Volcadora\"].cola) <= 10 or\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8):\r\n\r\n transbordo = operaciones[\"Transbordo en sistema mecanizado (D)\"]\r\n ejecucion_transbordo = yield self.process(\r\n transbordo.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Tolva\"]))\r\n\r\n # En caso que no se ejecute el transbordo segenera y muestra un error\r\n if ejecucion_transbordo in [\"No ejecutada por recurso\", \"No ejecutada por producto\"]:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # En otro caso, si se dispone espacio en Almacen 1 y, la cola de la volcadora es acepetable o\r\n # la cola de cuadrillas es muy larga, entonces, se descarga con sistema mecanicado a almacen.\r\n elif camion.dispone_producto_espacio_medio_almacenamiento(self.medios_almacenamiento[\"Almacen 1\"]) \\\r\n and (len(self.recursos_atencion[\"Estacion Volcadora\"].cola) <= 10 or\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8):\r\n\r\n descarga = operaciones[\"Descarga con volcadora\"]\r\n yield self.process(\r\n descarga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # En otro caso, si se dispone producto en almacen 1 y la cola de las cuadrillas es aceptable,\r\n # entonces, se transborda o descarga a pulso.\r\n elif camion.dispone_producto_espacio_medio_almacenamiento(self.medios_almacenamiento[\"Almacen 1\"]) \\\r\n and len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n\r\n # Si no hay posibilidad de que arriben camiones para transbordo, se descarga a pulso\r\n if len(self.recursos_atencion[\"Estacion Tolva/Balanza 3\"].cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8 \\\r\n or not camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n yield self.process(\r\n descarga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # En caso contrario, se transborda o descarga a pulso\r\n else:\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 20))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n descarga, self.medios_almacenamiento[\"Almacen 1\"], float(\"Inf\")))\r\n\r\n # En otro caso, si no se dispone producto en almacen 1 y la cola de las cuadrillas es aceptable,\r\n # entonces, se transborda a pulso.\r\n elif not camion.dispone_producto_espacio_medio_almacenamiento(self.medios_almacenamiento[\"Almacen 1\"]) \\\r\n and len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\")))\r\n\r\n # Si ningun caso anterior fue satisfecho se genera y muestra un error\r\n else:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # Manipuleo de carga a granel en almacenes externos\r\n elif camion.carga in [\"Grano de Soya\"]:\r\n\r\n # Si la cola de pala mecanica no es muy larga, se descarga a pulso.\r\n if len(operaciones[\"Carga con pala mecanica\"].recurso.cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8:\r\n\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n yield self.process(\r\n descarga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen Ext\"]))\r\n\r\n # En otro caso, se transborda o descarga a pulso\r\n else:\r\n # Espera camion para realizar transbordo o interrumpe la espera de otro\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 30))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n descarga, self.medios_almacenamiento[\"Almacen Ext\"], float(\"Inf\")))", "def procesar_listado_marcas_leche(lista):\r\n promedio = calcular_promedio(lista)\r\n marca_mayor_stock_valorizado = calcular_mayor_stock_valorizado(lista)\r\n marcas_menor_produccion = calcular_tres_marcas_menor_produccion(lista)\r\n \r\n print(\"El promedio de precios es de: {}\\n\".format(promedio))\r\n print(\"La marca con mayor stock valorizado es: {}\\n\".format(marca_mayor_stock_valorizado))\r\n print(\"Las tres marcas de menor volumen de produccion son: \", marcas_menor_produccion[0], \" \", marcas_menor_produccion[1], \" \",marcas_menor_produccion[2])\r\n \r\n return len(lista)", "def iniciar():\n \n for sala in range(3):\n for fila_letra in range(21):\n for numero_poltrona in range(20):\n poltrona[sala,fila_letra,numero_poltrona],faturamentoSala[sala,fila_letra,numero_poltrona]=0,0.0", "def make_female_m1_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from list.\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m = m2_info[2]\n\n m2_points = m2_info[3]\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI points. (dx, dy)\n Get base point to make AIKAKI shape. (ix, iy)\n \"\"\"\n # SHIGUCHI\n dx_U = m2_p3[0]\n dy_U = m2_p3[1]\n\n dx_L = m3_p2[0]\n dy_L = m3_p2[1]\n\n # AIKAKI\n tx = m1_p0[0]\n ty = (m1_p0[1] + m1_p1[1]) / 2\n\n \"\"\"\n 3 AIKAKI points\n \"\"\"\n y_k = z_m\n\n AIAKAKI_offset = 0.2\n\n # male AIKAKI\n p = (tx, ty)\n p0 = (tx, ty - z_m / 2 + AIAKAKI_offset / 2)\n p1 = (tx + x_m1 / 2, ty - z_m / 2 + AIAKAKI_offset / 2)\n p2 = (tx + x_m1 / 2, ty + z_m / 2 - AIAKAKI_offset / 2)\n p3 = (tx, ty + z_m / 2 - AIAKAKI_offset / 2)\n male_AIKAKI_points = (p0, p1, p2, p3)\n\n # female AIKAKI\n p = (tx, ty)\n p0 = (tx + x_m1, ty + z_m / 2 - AIAKAKI_offset / 2)\n p1 = (tx + x_m1 / 2, ty + z_m / 2 - AIAKAKI_offset / 2)\n p2 = (tx + x_m1 / 2, ty - z_m / 2 + AIAKAKI_offset / 2)\n p3 = (tx + x_m1, ty - z_m / 2 + AIAKAKI_offset / 2)\n female_AIKAKI_points = (p0, p1, p2, p3)\n\n \"\"\"\n 4 Call approriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n\n elif SHIGUCHI_name == 'IRIWA':\n dx = dx_U\n dy = dy_U\n\n m_info = m2_info\n choice = 'UpperRight'\n m2_KUMIKI_points1, m2_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_KUMIKI_points1)\n # rs.AddPolyline(m2_KUMIKI_points2)\n\n m2_KUMIKI_points2.reverse()\n\n dx = dx_L\n dy = dy_L\n\n m_info = m3_info\n choice = 'LowerRight'\n m3_KUMIKI_points1, m3_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_KUMIKI_points1)\n # rs.AddPolyline(m3_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n\n elif SHIGUCHI_name == 'HAKO':\n pass\n\n else:\n sys.exit()\n\n \"\"\"\n 5 Get SEN information.\n \"\"\"\n SEN_info = get_m1_m4_SEN_info(tx, ty, m1_info, y_k)\n\n # upper shape\n upper_shape_upper, upper_shape_lower =\\\n m1_make_upper_shape_points_list(tx, ty, m1_info, SEN_info)\n\n upper_shape_upper_left_row = upper_shape_upper[0]\n upper_shape_upper_right_row = upper_shape_upper[1]\n\n upper_shape_lower_left_row = upper_shape_lower[0]\n upper_shape_lower_right_row = upper_shape_lower[1]\n\n # lower shape\n lower_shape_upper, lower_shape_lower =\\\n m1_make_lower_shape_points_list(tx, ty, m1_info, SEN_info)\n\n lower_shape_upper_left_row = lower_shape_upper[0]\n lower_shape_upper_right_row = lower_shape_upper[1]\n\n lower_shape_lower_left_row = lower_shape_lower[0]\n lower_shape_lower_right_row = lower_shape_lower[1]\n\n # middle shape\n middle_shape_upper, middle_shape_lower =\\\n m1_make_middle_shape_points_list(tx, ty, m1_info, SEN_info)\n\n middle_shape_upper_left_row = middle_shape_upper[0]\n middle_shape_upper_right_row = middle_shape_upper[1]\n\n middle_shape_lower_left_row = middle_shape_lower[0]\n middle_shape_lower_right_row = middle_shape_lower[1]\n\n \"\"\"\n 6 Extend list\n \"\"\"\n\n # Upper\n female_upper_m1 = []\n female_upper_m1.append(m1_p0)\n female_upper_m1.extend(upper_shape_lower_left_row)\n female_upper_m1.extend(upper_shape_upper_left_row)\n female_upper_m1.append(m1_p1)\n\n female_upper_m1.extend(m2_KUMIKI_points2)\n\n female_upper_m1.append(m1_p2)\n female_upper_m1.extend(upper_shape_upper_right_row)\n female_upper_m1.extend(female_AIKAKI_points)\n female_upper_m1.extend(upper_shape_lower_right_row)\n female_upper_m1.append(m1_p3)\n\n female_upper_m1.extend(m3_KUMIKI_points2)\n\n female_upper_m1.append(m1_p0)\n\n # rs.AddPolyline(female_upper_m1)\n\n # Middle\n female_middle_m1 = []\n female_middle_m1.append(m1_p0)\n female_middle_m1.extend(middle_shape_lower_left_row)\n female_middle_m1.extend(middle_shape_upper_left_row)\n female_middle_m1.append(m1_p1)\n\n female_middle_m1.extend(m2_KUMIKI_points2)\n\n female_middle_m1.append(m1_p2)\n female_middle_m1.extend(middle_shape_upper_right_row)\n female_middle_m1.extend(female_AIKAKI_points)\n female_middle_m1.extend(middle_shape_lower_right_row)\n female_middle_m1.append(m1_p3)\n\n female_middle_m1.extend(m3_KUMIKI_points2)\n\n female_middle_m1.append(m1_p0)\n\n # rs.AddPolyline(female_middle_m1)\n\n # Lower\n female_lower_m1 = []\n female_lower_m1.append(m1_p0)\n female_lower_m1.extend(lower_shape_lower_left_row)\n female_lower_m1.extend(lower_shape_upper_left_row)\n female_lower_m1.append(m1_p1)\n\n female_lower_m1.extend(m2_KUMIKI_points2)\n\n female_lower_m1.append(m1_p2)\n female_lower_m1.extend(lower_shape_upper_right_row)\n female_lower_m1.extend(female_AIKAKI_points)\n female_lower_m1.extend(lower_shape_lower_right_row)\n female_lower_m1.append(m1_p3)\n\n female_lower_m1.extend(m3_KUMIKI_points2)\n\n female_lower_m1.append(m1_p0)\n\n # rs.AddPolyline(female_lower_m1)\n\n m1_female_points_list = [female_upper_m1, female_middle_m1, female_lower_m1]\n\n return m1_female_points_list, SEN_info", "def mezclar_bolsa(self):", "def motor_inferencia(x):\n\n # Defino mis operaciones borrosas\n AND = min # Tambien se llama conjuncion o interseccion\n OR = max # Tambien se llama disyuncion o union\n # FUERZA = min # Elijo la conjuncion. Tambien se pueden usar la disyuncion\n\n # --------------------------------------------------------\n # - CALCULO DEL VALOR DE PERTENENCIA DE LOS ANTECEDENTES -\n # --------------------------------------------------------\n\n # Guardo los antecedentes en las variables\n A_MN = []\n A_N = []\n A_Z = []\n A_P = []\n A_MP = []\n\n # Fila 0: P is MN and\n A_MP.append(AND(x[0], x[5])) # V is MN # then F is MP\n A_MP.append(AND(x[0], x[6])) # V is N # then F is MP\n A_MP.append(AND(x[0], x[7])) # V is Z # then F is MP\n A_MP.append(AND(x[0], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[0], x[9])) # V is MP # then F is MP\n\n # Fila 1: P is N and\n A_MN.append(AND(x[1], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[1], x[6])) # V is N # then F is MN\n A_N.append(AND(x[1], x[7])) # V is Z # then F is N\n A_N.append(AND(x[1], x[8])) # V is P # then F is N\n A_N.append(AND(x[1], x[9])) # V is MP # then F is N\n\n # Fila 2: P is Z and\n A_MN.append(AND(x[2], x[5])) # V is MN # then F is MN\n A_N.append(AND(x[2], x[6])) # V is N # then F is N\n A_Z.append(AND(x[2], x[7])) # V is Z # then F is Z\n A_P.append(AND(x[2], x[8])) # V is P # then F is P\n A_MP.append(AND(x[2], x[9])) # V is MP # then F is MP\n\n # Fila 3: P is P and\n A_P.append(AND(x[3], x[5])) # V is MN # then F is P\n A_P.append(AND(x[3], x[6])) # V is N # then F is P\n A_P.append(AND(x[3], x[7])) # V is Z # then F is P\n A_MP.append(AND(x[3], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[3], x[9])) # V is MP # then F is MP\n\n # Fila 4: P is MP and\n A_MN.append(AND(x[4], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[4], x[6])) # V is N # then F is MN\n A_MN.append(AND(x[4], x[7])) # V is Z # then F is MN\n A_MN.append(AND(x[4], x[8])) # V is P # then F is MN\n A_MN.append(AND(x[4], x[9])) # V is MP # then F is MN\n\n # ------------------------------------------------------------------------------------------\n # - COMBINACION DE LOS ANTECEDENTES Y RESOLUCION DE LA IMPLICACION -\n # ------------------------------------------------------------------------------------------\n\n # [ F_MN, F_N, F_Z, F_P, F_MP ]\n F = [OR(A_MN), OR(A_N), OR(A_Z), OR(A_P), OR(A_MP)]\n\n return F", "def chave_uf_ano_mes_de_lista(elemento):\n data,mm,uf = elemento\n anomes = '-'.join(data.split('-')[:2])\n chave = f'{uf}-{anomes}'\n if float(mm) < 0:\n mm = 0.0\n else:\n mm = float(mm)\n return chave,mm", "def llamadas( self ):\n llamadas = []\n for pista in self.pistas:\n\n if self.verbose:\n print( pista.verbose( self.verbose ) )\n \n \"\"\" Generar track p/c pista \"\"\"\n delta = 0\n track = pista.numero\n \n \"\"\" Parametros de Pista Primer articulación de la parte, agregar\n eventos fundamentales: pulso, armadura de clave, compás y programa.\n \"\"\"\n llamadas.append([\n 'addTrackName',\n track,\n delta,\n pista.nombre\n ])\n\n if self.copyright:\n llamadas.append([\n 'addCopyright',\n track,\n delta,\n self.copyright\n ])\n \n \"\"\" Loop principal:\n Genera una secuencia de eventos MIDI lista de articulaciones. \"\"\"\n \n for segmento in pista.segmentos:\n canal = segmento.canal\n #delta += segmento.desplazar\n \n if delta < 0:\n raise ValueError( 'No se puede desplazar antes q el inicio' ) \n pass\n \n \"\"\" Agregar propiedades de segmento. \"\"\"\n \n if segmento.cambia( 'metro' ):\n llamadas.append([\n 'addTimeSignature',\n track,\n delta,\n segmento.metro[ 'numerador' ],\n segmento.metro[ 'denominador' ],\n segmento.metro[ 'relojes_por_tick' ], \n segmento.metro[ 'notas_por_pulso' ]\n ])\n \n if segmento.cambia( 'bpm' ):\n llamadas.append([\n 'addTempo',\n track,\n delta,\n segmento.bpm,\n ])\n \n if segmento.cambia( 'clave' ):\n llamadas.append([\n 'addKeySignature',\n track,\n delta,\n segmento.clave[ 'alteraciones' ],\n 1, # multiplica por el n de alteraciones\n segmento.clave[ 'modo' ]\n ])\n \n if segmento.afinacionNota:\n llamadas.append([\n 'changeNoteTuning',\n track, \n segmento.afinacionNota[ 'afinaciones' ],\n segmento.afinacionNota[ 'canalSysEx' ],\n segmento.afinacionNota[ 'tiempoReal' ],\n segmento.afinacionNota[ 'programa' ],\n ])\n \n if segmento.afinacionBanco:\n llamadas.append([\n 'changeTuningBank',\n track, \n canal,\n delta,\n segmento.afinacionBanco[ 'banco' ],\n segmento.afinacionBanco[ 'ordenar' ],\n ])\n \n if segmento.afinacionPrograma:\n llamadas.append([ \n 'changeTuningProgram',\n track, \n canal,\n delta,\n segmento.afinacionPrograma[ 'programa' ],\n segmento.afinacionPrograma[ 'ordenar' ],\n ])\n \n if segmento.sysEx:\n llamadas.append([\n 'addSysEx',\n track, \n delta, \n segmento.sysEx[ 'fabricante' ],\n segmento.sysEx[ 'playload' ],\n ])\n \n if segmento.uniSysEx:\n llamadas.append([\n 'addUniversalSysEx',\n track, \n delta, \n segmento.uniSysEx[ 'codigo' ],\n segmento.uniSysEx[ 'subCodigo' ],\n segmento.uniSysEx[ 'playload' ],\n segmento.uniSysEx[ 'canal' ],\n segmento.uniSysEx[ 'tiempoReal' ],\n ])\n \n if segmento.NRPN:\n llamadas.append([\n 'makeNRPNCall',\n track, \n canal, \n delta, \n segmento.NRPN[ 'control_msb' ],\n segmento.NRPN[ 'control_lsb' ],\n segmento.NRPN[ 'data_msb' ],\n segmento.NRPN[ 'data_lsb' ],\n segmento.NRPN[ 'ordenar' ],\n ])\n \n if segmento.RPN:\n llamadas.append([\n 'makeRPNCall',\n track, \n canal, \n delta, \n segmento.RPN[ 'control_msb' ],\n segmento.RPN[ 'control_lsb' ],\n segmento.RPN[ 'data_msb' ],\n segmento.RPN[ 'data_lsb' ],\n segmento.RPN[ 'ordenar' ],\n ])\n \n for articulacion in segmento.articulaciones:\n \"\"\" Agrega cualquier cambio de parametro, \n comparar cada uno con la articulacion previa. \"\"\"\n \n if articulacion.cambia( 'bpm' ):\n llamadas.append([\n 'addTempo',\n track,\n delta,\n articulacion.bpm,\n ])\n \n if articulacion.cambia( 'programa' ):\n llamadas.append([\n 'addProgramChange',\n track,\n canal, \n delta, \n articulacion.programa\n ])\n \n if articulacion.letra:\n llamadas.append([\n 'addText',\n track,\n delta,\n articulacion.letra\n ])\n \n if articulacion.tono:\n llamadas.append([\n 'addPitchWheelEvent',\n track,\n canal, \n delta, \n articulacion.tono\n ])\n \n \"\"\" Agregar nota/s (altura, duracion, dinamica).\n Si existe acorde en la articulación armar una lista con cada voz\n superpuesta. o una lista de solamente un elemento. \"\"\"\n voces = [ articulacion.altura ]\n if articulacion.acorde:\n voces = articulacion.acorde \n \n for voz in voces:\n if articulacion.dinamica:\n llamadas.append([\n 'addNote',\n track, \n canal, \n voz, \n delta, \n articulacion.duracion, \n articulacion.dinamica\n ])\n #else:\n # print('eo')\n \n if articulacion.controles:\n \"\"\" Agregar cambios de control \"\"\"\n for control in articulacion.controles:\n for control, valor in control.items():\n llamadas.append([\n 'addControllerEvent',\n track, \n canal, \n delta, \n control,\n valor, \n ])\n \n delta += articulacion.duracion\n return llamadas", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def __init__(self, marqueur, allele, hauteur, concordance_mere_foetus, informatif, num_foetus, contamination, taux):\n\n super().__init__(marqueur, allele, hauteur, informatif)\n self.num_foetus = num_foetus\n self.contamination = contamination\n self.taux = taux\n self.concordance_mere_foetus = concordance_mere_foetus", "def verificacion(autor):\n myCont=Contribucion.obInstances()\n autor.listcal=[]\n for instance in myCont:\n autor.verificarCalificacion(instance)\n\n publicaciones=autor.publicaciones\n listcal=autor.listcal\n calMax=autor.calificacionMax\n autor.calcularPromedio()\n promedio=autor.promedioCalificacion\n return [publicaciones,listcal,calMax,promedio]", "def pacMare(date, estac):\n monthList = [\"JAN\", \"FEV\", \"MAR\", \"ABR\", \"MAI\", \"JUN\", \"JUL\",\n \"AGO\", \"SET\", \"OUT\", \"NOV\", \"DEZ\"]\n an = date.year\n Mesl = date.month\n strmes = monthList[Mesl-1]\n di = date.day\n data1 = \"%s/%s/%s\" %(di, Mesl, an)\n\n DT = 1\n HI = -3\n d0 = 1\n\n estacoes = Estacao()\n constantes = Constantes()\n cadastro = Cadastro()\n combinacoes = Combinacoes()\n\n f = estacoes.data['name'].index(estac)\n Cod = estacoes.data['ID'][f]\n LA1 = estacoes.data['latG'][f]\n LA2 = estacoes.data['latM'][f]\n LO1 = estacoes.data['lonG'][f]\n LO2 = estacoes.data['lonM'][f]\n nc = estacoes.data['ncomp'][f]\n NM = estacoes.data['nm'][f]\n fu = estacoes.data['fuso'][f]\n ca = estacoes.data['carta'][f]\n hemlat = estacoes.data['hemlat'][f]\n hemlon = estacoes.data['hemlon'][f]\n \n infoList = []\n lat = base10Tobase60(lat=base60Tobase10(LA1, LA2, hemlat))\n lon = base10Tobase60(lon=base60Tobase10(LO1, LO2, hemlon))\n latSTR = u\"Lat: %s\" % lat\n lonSTR = u\"Lon: %s\" % lon\n ncSTR = u\"Componentes: %s\" %(nc)\n nmSTR = u\"Nível Médio: %s cm\" %(int(NM))\n fuSTR = u\"Fuso: - %sh\" %(int(fu))\n caSTR = u\"Número Carta: %s\" %(ca)\n\n infoList.append(latSTR)\n infoList.append(lonSTR)\n infoList.append(ncSTR)\n infoList.append(nmSTR)\n infoList.append(fuSTR)\n infoList.append(caSTR)\n\n f = constantes.data['ID'].index(Cod)\n ai = constantes.data['const'][ f:f+nc ]\n h = constantes.data['amp'][ f:f+nc ]\n G = constantes.data['phase'][ f:f+nc ]\n HH = h[:]\n GG = G[:]\n\n MK, constID = [],[]\n for k in range(nc):\n f = cadastro.data['const'].index(ai[k])\n MK.append(cadastro.data['M'][f])\n constID.append(cadastro.data['cod'][f])\n MK = str2int(MK)\n constID = str2int(constID)\n\n BB, CC = [],[]\n for k in range(nc):\n f = combinacoes.data['ID'].index(constID[k])\n aux = combinacoes.data['subs'][ f: f+MK[k] ]\n aux = str2float(aux)\n BB.append(aux)\n aux = combinacoes.data['comb'][ f: f+MK[k] ]\n aux = str2float(aux)\n CC.append(aux)\n\n cdat = open(web2pyPath + \"modules/data/Vdata.txt\")\n V = []\n for line in cdat.readlines():\n line2 = line.strip('\\r\\n').split(',')\n line2 = str2float(line2)\n V.append(line2)\n\n D = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n n = 30\n\n # calculo dos elementos astronomicos\n MB = float(an % 4)\n MC = float(an % 100)\n MD = float(an % 400)\n dd = float(di)\n\n if MB == 0 and MC != 0 or MD == 0:\n D[2] = 29\n\n i1 = float(an / 100)\n i2 = i1 - 19\n if i2 != 0:\n t1 = i2\n j1 = abs(i2)\n c3 = j1 / i2\n t2 = t1 * t1 * c3\n c1 = int(j1 * 0.75 + 0.5) * c3\n else:\n t1 = 0.\n t2 = 0.\n c1 = 0.\n\n s0 = 277.0224 + 307.8831 * t1 - 0.0011 * t2 - 13.1764 * c1\n h0 = 280.1895 + 0.7689 * t1 + 0.0003 * t2 - 0.9856 * c1\n p0 = 334.3853 + 109.034 * t1 - 0.0103 * t2 - 0.1114 * c1\n nl = 100.7902 + 134.142 * t1 - 0.0021 * t2 - 0.053 * c1\n P1 = 281.2208 + 1.7192 * t1 + 0.00045 * t2 - 0.000047 * c1\n\n for i in range(Mesl):\n di = float(di + D[i])\n\n # bug de 2001\n if an <= 2000:\n di = di - 1 \n\n IA = i1 * 100\n BI = an - IA\n\n AI = int((BI - 1) * 0.25); AI = float(AI)\n if MD == 0: AI = AI + 1\n AD = AI + di\n N2 = n * DT * 0.5\n AV = N2\n SN = AV / 10000\n b = [None]\n b.append( s0 + 129.38481 * BI + 13.1764 * AD )\n b.append( h0 - 0.23872 * BI + 0.98565 * AD )\n b.append( p0 + 40.66249 * BI + 0.1114 * AD )\n b.append(None)\n b.append( nl + 19.32818 * BI + 0.05295 * AD )\n b.append( P1 + 0.01718 * BI + 0.000047 * AD )\n b[0] = b[2] - b[1]\n b[4] = 90.\n b.append( b[3] + N2 * 0.00464183 )\n b.append( b[5] + N2 * 0.00220641 )\n b.append( b[6] + N2 * 0.00000196 )\n\n a = [ [0.,1.,0.], [0.,2.,0.], [0.,3.,0.], [0.,0.,2.], [0.,1.,2.], [1.,0.,-1.], \n [2.,-1.,-1.], [2.,-1.,0.], [2.,-1.,1.], [2.,0.,0.], [2.,1.,0.], \n [2.,2.,0.], [2.,3.,0.] ]\n\n b[0] = b[0] + HI * 14.49205211\n b[1] = b[1] + HI * 0.54902653\n b[2] = b[2] + HI * 0.0410686\n b[3] = b[3] + HI * 0.00464183\n b[5] = b[5] + HI * 0.00220641\n b[6] = b[6] + HI * 0.00000196\n\n z, Q = [], []\n for i in range(13):\n s = 0.\n for J in range(3):\n s = s + a[i][J] * b[J + 7]\n \n XX = s * 0.017453\n z.append(np.cos(XX))\n Q.append(np.sin(XX))\n\n W = []\n for i in range(37):\n WQ = 0.\n for J in range(5):\n WQ = WQ + V[i][J] * b[J]\n \n if i == 13 or i == 30:\n W.append( WQ + b[9] )\n elif i == 17 or i == 32:\n W.append( WQ - b[9] )\n else:\n W.append(WQ)\n\n F, U = [], []\n for k in range(38):\n F.append(None) # apenas para facilitar a copia do codigo em VB\n U.append(None) # depois, ambos serao popped-up\n z.insert(0, None) # idem\n Q.insert(0, None) # idem\n\n F[1] = 1\n F[2] = 1\n F[3] = 1 - 0.0307 * z[1] + 0.0007 * z[2] - 0.0534 * z[10] - 0.0218 * z[11] - 0.0059 * z[12]\n F[4] = 1 + 0.4142 * z[1] + 0.0377 * z[2] - 0.0008 * z[3] - 0.0028 * z[8] + 0.0431 * z[10] - 0.0023 * z[11]\n F[5] = 1 + 0.4141 * z[1] + 0.0384 * z[2] - 0.003 * z[7] - 0.003 * z[9] + 0.0179 * z[10] - 0.004 * z[12] - 0.0017 * z[13]\n F[6] = 1 + 0.1885 * z[1] - 0.0063 * z[2] - 0.0063 * z[12]\n F[7] = 1 + 0.1884 * z[1] - 0.0061 * z[2] - 0.0087 * z[10]\n F[8] = 1 + 0.1884 * z[1] - 0.0057 * z[2] + 0.0007 * z[6] - 0.0028 * z[10] - 0.0039 * z[12] - 0.0007 * z[13]\n F[9] = 1 + 0.1881 * z[1] - 0.0058 * z[2] - 0.0576 * z[10] + 0.0175 * z[11]\n F[10] = 1 + 0.1885 * z[1] - 0.0058 * z[2] + 0.0001 * z[8] - 0.0054 * z[10] - 0.001 * z[11]\n F[11] = 1 - 0.2454 * z[1] - 0.0142 * z[2] + 0.0445 * z[10]\n F[12] = 1 + 0.1714 * z[1] - 0.0054 * z[2] + 0.3596 * z[10] + 0.0664 * z[11] - 0.0057 * z[12]\n F[13] = 1 + 0.1905 * z[1]\n F[14] = 1 - 0.0078 * z[1]\n F[15] = 1 - 0.0112 * z[1] + 0.0007 * z[2] - 0.0004 * z[4] - 0.0015 * z[10] - 0.0003 * z[11]\n F[16] = 1\n F[17] = 1 + 0.1158 * z[1] - 0.0029 * z[2] + 0.0001 * z[11]\n F[18] = 1 + 0.019 * z[1]\n F[19] = 1 - 0.0384 * z[1] - 0.0185 * z[2] + 0.0132 * z[4] + 0.0105 * z[8] + 0.0344 * z[10]\n F[20] = 1 + 0.1676 * z[1] + 0.03 * z[11]\n F[21] = 1 + 0.1685 * z[1] - 0.0047 * z[2] - 0.0152 * z[10] - 0.0098 * z[11] - 0.0057 * z[12]\n F[22] = 1 + 0.6398 * z[1] + 0.1342 * z[2] + 0.008500001 * z[3] + 0.0296 * z[8] + 0.1496 * z[10] - 0.0037 * z[11]\n F[23] = 1 - 0.0337 * z[1]\n F[24] = 1 - 0.0374 * z[1] - 0.061 * z[12]\n F[25] = 1 - 0.0375 * z[1]\n F[26] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0007 * z[6] - 0.0039 * z[12]\n F[27] = 1 - 0.0373 * z[1] + 0.0042 * z[10] - 0.0036 * z[11]\n F[28] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0005 * z[10] - 0.0001 * z[11]\n F[29] = 1 - 0.0448 * z[1]\n F[30] = 1 - 0.0367 * z[1] + 0.0047 * z[8] - 0.2505 * z[10] - 0.1102 * z[11] - 0.0156 * z[12]\n F[31] = 1\n F[32] = 1 - 0.0022 * z[1]\n F[33] = 1 - 0.2535 * z[4] + 0.0141 * z[5]\n F[34] = 1 + 0.2852 * z[1] + 0.0324 * z[2]\n F[35] = 1 + 0.4389 * z[1] + 0.0487 * z[2] + 0.0487 * z[10] + 0.065 * z[11]\n F[36] = 1 + 0.4168 * z[1] + 0.0466 * z[2] - 0.078 * z[10]\n F[37] = 1 - 0.0564 * z[1]\n\n U[1] = 0\n U[2] = 0\n U[3] = 0.0007 * Q[1] - 0.0008 * Q[2] - 0.0534 * Q[10] - 0.0218 * Q[11] - 0.0059 * Q[12]\n U[4] = 0.4142 * Q[1] + 0.0377 * Q[2] - 0.0008 * Q[3] + 0.0027 * Q[8] - 0.0432 * Q[10] + 0.0022 * Q[11]\n U[5] = 0.4142 * Q[1] + 0.0384 * Q[2] + 0.003 * Q[7] + 0.003 * Q[9] - 0.018 * Q[10] - 0.004 * Q[12] - 0.0017 * Q[13]\n U[6] = -0.1885 * Q[1] + 0.0062 * Q[2] + 0.0062 * Q[12]\n U[7] = -0.1884 * Q[1] + 0.006 * Q[2] - 0.0087 * Q[10]\n U[8] = -0.1884 * Q[1] + 0.0057 * Q[2] - 0.0008 * Q[6] - 0.0028 * Q[10] + 0.0039 * Q[12] + 0.0007 * Q[13]\n U[9] = -0.1882 * Q[1] + 0.0057 * Q[2] - 0.0576 * Q[10] + 0.0175 * Q[11]\n U[10] = -0.1885 * Q[1] + 0.0057 * Q[2] + 0.0001 * Q[8] - 0.0064 * Q[10] - 0.001 * Q[11]\n U[11] = -0.1886 * Q[1] - 0.0142 * Q[2] - 0.0446 * Q[10]\n U[12] = -0.2294 * Q[1] - 0.3596 * Q[10] - 0.0665 * Q[11] + 0.0057 * Q[12]\n U[13] = 0.246 * Q[1]\n U[14] = 0.0077 * Q[1]\n U[15] = 0.0111 * Q[1] - 0.0008 * Q[2] - 0.0004 * Q[4] - 0.0015 * Q[10] - 0.0003 * Q[11]\n U[16] = 0\n U[17] = 0.1554 * Q[1] - 0.003 * Q[2] - 0.0002 * Q[11]\n U[18] = 0.019 * Q[1]\n U[19] = -0.0384 * Q[1] - 0.0185 * Q[2] - 0.0132 * Q[4] - 0.0106 * Q[8] - 0.0344 * Q[10]\n U[20] = 0.231 * Q[1] - 0.03 * Q[11]\n U[21] = 0.2274 * Q[1] - 0.0047 * Q[2] - 0.0152 * Q[10] - 0.0098 * Q[11] - 0.0057 * Q[12]\n U[22] = 0.6398 * Q[1] + 0.1342 * Q[2] - 0.0296 * Q[8] - 0.1497 * Q[10] + 0.0037 * Q[11]\n U[23] = 0.0373 * Q[1]\n U[24] = 0.0373 * Q[1] + 0.006 * Q[12]\n U[25] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[26] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[27] = 0.0373 * Q[1] + 0.0042 * Q[10] + 0.0036 * Q[11]\n U[28] = 0.0373 * Q[1] - 0.0005 * Q[2] + 0.0005 * Q[9] + 0.0001 * Q[11]\n U[29] = 0.0487 * Q[1]\n U[30] = 0.0366 * Q[1] + 0.0047 * Q[8] - 0.2505 * Q[9] - 0.1102 * Q[11]\n U[31] = 0\n U[32] = -0.0022 * Q[1]\n U[33] = -0.2535 * Q[4] + 0.0141 * Q[5]\n U[34] = 0.3108 * Q[1] + 0.0324 * Q[2]\n U[35] = 0.4389 * Q[1] + 0.0487 * Q[2] - 0.0488 * Q[9] - 0.065 * Q[11]\n U[36] = 0.4542 * Q[1] + 0.0466 * Q[2] - 0.0078 * Q[10]\n U[37] = 0.0563 * Q[1]\n\n z.pop(0)\n Q.pop(0)\n F.pop(0)\n U.pop(0)\n AV = n * DT * 0.5\n\n for i in range(37):\n XX = F[i]\n YY = U[i]\n F[i] = np.sqrt( XX ** 2 + YY ** 2 )\n U[i] = W[i] + np.arctan(YY / XX) * 57.29578\n U[i] = U[i] - int(U[i] / 360) * 360\n if U[i] < 0: U[i] = U[i] + 360\n\n\n # calculo das alturas\n HC, GC = [],[]\n for k in range(110):\n HC.append(0)\n GC.append(0)\n\n for i in range(nc):\n s = 0.\n WQ = 0.\n T = 1.\n\n for J in range(MK[i]):\n jj = int(BB[i][J])\n kk = CC[i][J]\n T = T * F[jj-1] ** abs(kk)\n s = s + U[jj-1] * kk\n WQ = WQ + V[jj-1][5] * kk\n ZQ = s\n \n h[i] = T * h[i]\n s = s - G[i]\n if s < 0: s = s + 360.\n G[i] = s\n try: \n W[i] = WQ * DT\n except IndexError:\n W.append( WQ * DT )\n HC[i] = T * HC[i]\n ZQ = ZQ - GC[i]\n if ZQ < 0: ZQ = ZQ + 360.\n GC[i] = ZQ\n\n x, Y2, y = [],[],[]\n MM = 0\n for i in range(n):\n s = 0.\n ZQ = 0.\n\n for j in range(nc):\n AA = G[j] * 0.017453\n s = s + h[j] * np.cos(AA)\n G[j] = G[j] + W[j]\n AC = GC[j] * 0.017453\n ZQ = ZQ + HC[j] * np.cos(AC)\n GC[j] = GC[j] + W[j]\n\n x.append(s + NM)\n Y2.append(x[i])\n y.append(ZQ + MM)\n\n x = np.array(x, dtype=np.float32)\n x = x/100.\n h = x[3:-3]\n hours = np.arange(24)\n years, months, days = 0*hours+an, 0*hours+Mesl, 0*hours+int(dd)\n time = []\n for year, month, day, hour in zip(years, months, days, hours):\n time.append( dt.datetime(year, month, day, hour) )\n\n time = mpldates.date2num(time)\n time2 = np.linspace(time[0], time[-1], 500)\n\n interp = interp1d(time, h, kind='cubic')\n h2 = interp(time2)\n\n dh = np.gradient(h2)\n dhSign = dh > 0\n # gathering pairs\n pairs = []\n for k in range(len(dh)-1):\n pairs.append([dhSign[k], dhSign[k+1]])\n\n f = []\n for k in range(len(pairs)):\n if pairs[k] == [True, False] or pairs[k] == [False, True]:\n f.append(k)\n\n datas = mpldates.num2date(time2[f])\n hora = []\n for data in datas:\n hora.append(\"%02i:%02i\" %(data.hour, data.minute))\n altura = h2[f]\n altura = ['%.1f' % a for a in altura]\n\n return infoList, hora, altura, time2, h2", "def make_female_m4_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from list.\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m = m2_info[2]\n\n m2_points = m2_info[3]\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI points. (dx, dy)\n Get base point to make AIKAKI shape. (ix, iy)\n \"\"\"\n # SHIGUCHI\n dx_U = m2_p0[0]\n dy_U = m2_p0[1]\n\n dx_L = m3_p1[0]\n dy_L = m3_p1[1]\n\n # AIKAKI\n tx = m4_p0[0]\n ty = (m4_p0[1] + m4_p1[1]) / 2\n\n \"\"\"\n 3 AIKAKI points\n \"\"\"\n y_k = z_m\n\n AIKAKI_offset = 0.2\n\n # male AIKAKI\n p = (tx, ty)\n p0 = (tx, ty - z_m / 2 + AIKAKI_offset / 2)\n p1 = (tx - x_m4 / 2, ty - z_m / 2 + AIKAKI_offset / 2)\n p2 = (tx - x_m4 / 2, ty + z_m / 2 - AIKAKI_offset / 2)\n p3 = (tx, ty + z_m / 2 - AIKAKI_offset / 2)\n male_AIKAKI_points = (p0, p1, p2, p3)\n\n # female AIKAKI\n p = (tx, ty)\n p0 = (tx - x_m4, ty + z_m / 2 - AIKAKI_offset / 2)\n p1 = (tx - x_m4 / 2, ty + z_m / 2 - AIKAKI_offset / 2)\n p2 = (tx - x_m4 / 2, ty - z_m / 2 + AIKAKI_offset / 2)\n p3 = (tx - x_m4, ty - z_m / 2 + AIKAKI_offset / 2)\n female_AIKAKI_points = (p0, p1, p2, p3)\n\n \"\"\"\n 4 Call approriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n\n elif SHIGUCHI_name == 'IRIWA':\n dx = dx_U\n dy = dy_U\n\n m_info = m2_info\n choice = 'UpperLeft'\n m2_KUMIKI_points1, m2_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_KUMIKI_points1)\n # rs.AddPolyline(m2_KUMIKI_points2)\n\n m2_KUMIKI_points2.reverse()\n\n dx = dx_L\n dy = dy_L\n\n m_info = m3_info\n choice = 'LowerLeft'\n m3_KUMIKI_points1, m3_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_KUMIKI_points1)\n # rs.AddPolyline(m3_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n\n elif SHIGUCHI_name == 'HAKO':\n pass\n\n else:\n sys.exit()\n\n \"\"\"\n 5 Get SEN information.\n \"\"\"\n SEN_info = get_m1_m4_SEN_info(tx, ty, m4_info, y_k)\n\n # upper shape\n upper_shape_upper, upper_shape_lower =\\\n m4_make_upper_shape_points_list(tx, ty, m4_info, SEN_info)\n\n upper_shape_upper_left_row = upper_shape_upper[0]\n upper_shape_upper_right_row = upper_shape_upper[1]\n\n upper_shape_lower_left_row = upper_shape_lower[0]\n upper_shape_lower_right_row = upper_shape_lower[1]\n\n # lower shape\n lower_shape_upper, lower_shape_lower =\\\n m4_make_lower_shape_points_list(tx, ty, m4_info, SEN_info)\n\n lower_shape_upper_left_row = lower_shape_upper[0]\n lower_shape_upper_right_row = lower_shape_upper[1]\n\n lower_shape_lower_left_row = lower_shape_lower[0]\n lower_shape_lower_right_row = lower_shape_lower[1]\n\n # middle shape\n middle_shape_upper, middle_shape_lower =\\\n m4_make_middle_shape_points_list(tx, ty, m4_info, SEN_info)\n\n middle_shape_upper_left_row = middle_shape_upper[0]\n middle_shape_upper_right_row = middle_shape_upper[1]\n\n middle_shape_lower_left_row = middle_shape_lower[0]\n middle_shape_lower_right_row = middle_shape_lower[1]\n\n \"\"\"\n 6 Extend list\n \"\"\"\n # Upper\n female_upper_m4 = []\n female_upper_m4.append(m4_p0)\n female_upper_m4.extend(upper_shape_lower_right_row)\n female_upper_m4.extend(upper_shape_upper_right_row)\n female_upper_m4.append(m4_p1)\n\n female_upper_m4.extend(m2_KUMIKI_points2)\n\n female_upper_m4.append(m4_p2)\n female_upper_m4.extend(upper_shape_upper_left_row)\n female_upper_m4.extend(female_AIKAKI_points)\n female_upper_m4.extend(upper_shape_lower_left_row)\n female_upper_m4.append(m4_p3)\n\n female_upper_m4.extend(m3_KUMIKI_points2)\n\n female_upper_m4.append(m4_p0)\n\n # rs.AddPolyline(female_upper_m4)\n\n # Middle\n female_middle_m4 = []\n female_middle_m4.append(m4_p0)\n female_middle_m4.extend(middle_shape_lower_right_row)\n female_middle_m4.extend(middle_shape_upper_right_row)\n female_middle_m4.append(m4_p1)\n\n female_middle_m4.extend(m2_KUMIKI_points2)\n\n female_middle_m4.append(m4_p2)\n female_middle_m4.extend(middle_shape_upper_left_row)\n female_middle_m4.extend(female_AIKAKI_points)\n female_middle_m4.extend(middle_shape_lower_left_row)\n female_middle_m4.append(m4_p3)\n\n female_middle_m4.extend(m3_KUMIKI_points2)\n\n female_middle_m4.append(m4_p0)\n\n # rs.AddPolyline(female_middle_m4)\n\n # Lower\n female_lower_m4 = []\n female_lower_m4.append(m4_p0)\n female_lower_m4.extend(lower_shape_lower_right_row)\n female_lower_m4.extend(lower_shape_upper_right_row)\n female_lower_m4.append(m4_p1)\n\n female_lower_m4.extend(m2_KUMIKI_points2)\n\n female_lower_m4.append(m4_p2)\n female_lower_m4.extend(lower_shape_upper_left_row)\n female_lower_m4.extend(female_AIKAKI_points)\n female_lower_m4.extend(lower_shape_lower_left_row)\n female_lower_m4.append(m4_p3)\n\n female_lower_m4.extend(m3_KUMIKI_points2)\n\n female_lower_m4.append(m4_p0)\n\n # rs.AddPolyline(female_lower_m4)\n\n\n m4_female_points_list = [female_upper_m4, female_middle_m4, female_lower_m4]\n\n return m4_female_points_list, SEN_info", "def crea_falla( lats, lons, prof, dip, strike, latini, latfin, area_sf, profundidad, razon_aspecto ):\n \n # se pasa los arrays de lats y lons a arrays unidimensionales que contienen las coordenadas sin repeticion\n\n # longitudes\n vector_lon_input = lons[0,:] # primera fila de matriz de lons, columnas se repiten\n # se chequea si son crecientes monotonos, util para interpolacion \n if all( x < y for x, y in zip( vector_lon_input, vector_lon_input[1:] ) ):\n vector_lon_input = vector_lon_input\n else:\n vector_lon_input = vector_lon_input[::-1]\n\n # latitudes\n vector_lat_input = lats[:,0] # primera columna de matriz de lats, filas se repiten\n # se chequea si son crecientes monotonos, util para interpolacion \n if all( x < y for x, y in zip( vector_lat_input, vector_lat_input[1:] ) ):\n vector_lat_input = vector_lat_input\n else:\n vector_lat_input = vector_lat_input[::-1]\n\n\n lim_norte = latini # nuevo limite superior\n dif_lim_norte = np.abs( lats-lim_norte ) # diferencias entre array de latitudes y valor del limite superior\n idx_lim_norte = ( np.where( dif_lim_norte == dif_lim_norte.min() )[0][0], np.where( dif_lim_norte == dif_lim_norte.min() )[1][0] )# indice del valor de Slab2.0 que mas se aproxima \n\n lim_sur = latfin # nuevo limite inferior\n dif_lim_sur = np.abs( lats-lim_sur ) # diferencias entre array de latitudes y valor del limite inferior\n idx_lim_sur = ( np.where( dif_lim_sur == dif_lim_sur.min() )[0][0], np.where( dif_lim_sur == dif_lim_sur.min() )[1][0] )# indice del valor de Slab2.0 que mas se aproxima \n\n # se calcula la distancia entre los limites (largo de la falla) en metros\n largo_falla = Geodesic.WGS84.Inverse(lats[idx_lim_norte], lons[idx_lim_norte], lats[idx_lim_sur], lons[idx_lim_sur] )[ \"s12\" ]\n largo_subfalla = np.sqrt( area_sf ) # subfallas cuadradas\n n_fallas_filas = np.floor_divide( largo_falla, largo_subfalla ) # cantidad de fallas en sentido norte - sur \n # a partir del numero de fallas en el sentido norte sur (ctdad de latitudes) se crea un vector de latitudes equidistantes\n lats_fallas = np.reshape( np.linspace( lim_norte, lim_sur, int( n_fallas_filas ) ),( int( n_fallas_filas ),1 ) )\n \n # se busca la latitud del medio para referenciarla a la profundidad deseada\n if len(lats_fallas)%2 != 0:\n lat_mediana = lats_fallas[ np.floor_divide( len( lats_fallas ), 2) ]\n else:\n lat_mediana = lats_fallas[ np.floor_divide( len( lats_fallas ), 2) - 1 ]\n\n # busca indice de la latitud del medio\n dif_lat_mediana = np.abs( lats - lat_mediana )\n # primer indice, muestra la linea de profundidades para esta latitud\n idx_lat_mediana = np.where( dif_lat_mediana == dif_lat_mediana.min() )[0][0] \n # se busca indice de la profundidad en la linea de la latitud media\n dif_profundidad = np.abs( profundidad - prof[ idx_lat_mediana, ] )\n idx_profundidad = np.where( dif_profundidad == dif_profundidad.min() )[0][0]\n \n # indice elemento central de la falla creada, a partir de la latitud central y la profundidad\n idx_subfalla_central = ( idx_lat_mediana, idx_profundidad )\n\n # longitud de la subfalla central\n lon_subfalla_central = lons[ idx_subfalla_central ]#[0][0]\n # profundidad de la subfalla central (punto con la profundidad mas cercana a la ingresada)\n prof_subfalla_central = prof[ idx_subfalla_central ]#[0][0]\n\n # se busca los indices de los elementos mas cercanos a las latitudes de las fallas creadas por el linespace\n dif_lats = np.ones( (len( lats_fallas ), ) + np.shape( lats ) ) # inicializacion de array para diferencias de latitudes\n for i in range( len( lats_fallas ) ):\n dif_lats[i] = np.abs( lats - lats_fallas[i] )\n \n idx_fallas = np.ones( (len( lats_fallas ), ) + ( 1,2 ) ) # inicializacion de array con los indices de las latitudes \n for j in range( len( lats_fallas ) ):\n idx_fallas[j] = ( np.where( dif_lats[j] == dif_lats[j].min() )[0][0], np.where( dif_lats[j] == dif_lats[j].min() )[1][0] )\n \n # ancho de la falla\n ancho_falla = largo_falla/razon_aspecto\n n_fallas_columnas = np.floor_divide( ancho_falla, largo_subfalla ) # numero de subfallas en el sentido este-oeste\n # completar array de latitudes con el nuevo ancho\n #matriz_latitudes = np.reshape(np.tile(lats_fallas, int(n_fallas_columnas)),(int(n_fallas_columnas),(len(lats_fallas))))\n matriz_latitudes = np.tile( lats_fallas, int( n_fallas_columnas ) )\n # creacion de array con longitudes a usarse\n # calculo de longitudes de los centros de las subfallas a partir del ancho de la falla\n # es necesario saber si la cantidad es par o impar\n if n_fallas_columnas%2 != 0:\n mitad_ancho = ancho_falla / 2 # en metros\n n_fallas_xlado = int( n_fallas_columnas ) // 2 # cantidad de subfallas a ambos lados de falla central\n lon_limite_oeste = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 270, mitad_ancho )[ \"lon2\" ]\n lon_limite_este = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 90, mitad_ancho )[ \"lon2\" ]\n lons_subfallas_oeste = np.linspace( lon_limite_oeste, lon_subfalla_central, ( n_fallas_xlado + 1 ) )\n lons_subfallas_este = np.linspace( lon_subfalla_central, lon_limite_este, ( n_fallas_xlado + 1 ) )\n lons_subfallas = np.append( lons_subfallas_oeste[:-1], lons_subfallas_este ) # vector con las longitudes de las subfallas\n lons_subfallas = np.reshape( lons_subfallas, ( 1, int( n_fallas_columnas ) ) )\n else:\n mitad_ancho = ancho_falla / 2 \n n_fallas_oeste = int( n_fallas_columnas ) / 2 - 1 # -1 para no contar 2 veces la subfalla del medio\n n_fallas_este = int( n_fallas_columnas ) / 2\n lon_limite_oeste = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 270, ( mitad_ancho - largo_subfalla ) )[ \"lon2\" ]\n lon_limite_este = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 90, mitad_ancho )[ \"lon2\" ]\n lons_subfallas_oeste = np.linspace( lon_limite_oeste, lon_subfalla_central, ( int( n_fallas_oeste ) + 1 ) )\n lons_subfallas_este = np.linspace( lon_subfalla_central, lon_limite_este, ( int( n_fallas_este ) + 1 ) )\n lons_subfallas = np.append( lons_subfallas_oeste[:-1], lons_subfallas_este ) # vector con las longitudes de las subfallas\n lons_subfallas = np.reshape( lons_subfallas, ( 1, int( n_fallas_columnas ) ) )\n\n # creacion de matriz de longitudes\n matriz_longitudes = np.tile( lons_subfallas, ( int( n_fallas_filas ), 1 ) ) # matriz con longitudes de las subfallas\n\n # se debe encontrar las profundidades, dips y strikes correspondientes a estas latitudes y longitudes de cada subfalla\n # profundidades correspondientes a cada subfalla:\n # se interpolara para encontrar los valores de profundidad correspondientes a cada subfalla\n \n vec_lons_subfallas_todas = np.reshape( matriz_longitudes, \n ( int( n_fallas_filas * n_fallas_columnas ), ) ) # vector con todos los elementos de la matriz de longitudes de las subfallas creadas\n vec_lats_subfallas_todas = np.reshape( matriz_latitudes, \n ( int( n_fallas_filas * n_fallas_columnas ), ) ) # vector con todos los elementos de la matriz de latitudes de las subfallas creadas\n\n\n # objeto de interpolacion de profundidades\n profs_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), prof )\n # inicializacion array de valores interpolados de profundidades\n prof_subfallas = np.ones( ( int( n_fallas_columnas * n_fallas_filas ), 1) )\n for p in range( int( n_fallas_columnas*n_fallas_filas ) ):\n prof_subfallas[p] = profs_int( ( vec_lats_subfallas_todas[p], vec_lons_subfallas_todas[p] ) )\n prof_subfallas = np.reshape( prof_subfallas, ( int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n \n # dips correspondientes a cada subfalla:\n # se interpolara para encontrar los valores de dip correspondientes a cada subfalla\n\n # objeto de interpolacion de dips\n dips_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), dip )\n # inicializacion array de valores interpolados de dip\n dip_subfallas = np.ones( ( int( n_fallas_columnas * n_fallas_filas ), 1) )\n for d in range( int( n_fallas_columnas * n_fallas_filas ) ):\n dip_subfallas[d] = dips_int( ( vec_lats_subfallas_todas[d], vec_lons_subfallas_todas[d] ) )\n dip_subfallas = np.reshape( dip_subfallas, (int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n \n # strike correspondiente a cada subfalla:\n # se interpolara para encontrar los valores de strike correspondientes a cada subfalla\n\n # objeto de interpolacion de strikes\n strikes_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), strike )\n # inicializacion array de valores interpolados de strike\n strike_subfallas = np.ones( ( int( n_fallas_columnas*n_fallas_filas ), 1) )\n for s in range( int( n_fallas_columnas*n_fallas_filas ) ):\n strike_subfallas[s] = strikes_int( ( vec_lats_subfallas_todas[s], vec_lons_subfallas_todas[s] ) )\n strike_subfallas = np.reshape( strike_subfallas, ( int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n # revisar, quiza sea necesario invertir los valores de la latitud\n\n\n\n\n return largo_falla, matriz_longitudes, matriz_latitudes, prof_subfallas, dip_subfallas, strike_subfallas", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def busqueda_por_atributo(self, atributo, valor):\n\n paquetes = []\n\n if atributo == \"Número de dormitorios\":\n for casa in self.casas:\n if casa.numero_dormitorios >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)\n if atributo == \"Número de baños\":\n for casa in self.casas:\n if casa.numero_banos >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)\n if atributo == \"Numero de cocinas\":\n for casa in self.casas:\n if casa.numero_cocinas >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)", "def NuevaPartida(self,):\n\t\"\"\" Numeros Disponibles \"\"\"\n\tDisponibles[0] = True\n\tDisponibles[1] = True\n\tDisponibles[2] = True\n\tDisponibles[3] = True\n\tDisponibles[4] = True\n\tDisponibles[5] = True\n\t\"\"\" Jugador Uno \"\"\"\n\tJ1[0] = 0\n\tJ1[1] = 0\n\tJ1[2] = 0\n\tJ1[3] = 0\n\tJ1[4] = 0\n\tJ1[5] = 0\n\t\"\"\" Jugador Dos \"\"\"\n\tJ2[0] = 0\n\tJ2[1] = 0\n\tJ2[2] = 0\n\tJ2[3] = 0\n\tJ2[4] = 0\n\tJ2[5] = 0\n\t\"\"\" Jugador Tres \"\"\"\n\tJ3[0] = 0\n\tJ3[1] = 0\n\tJ3[2] = 0\n\tJ3[3] = 0\n\tJ3[4] = 0\n\tJ3[5] = 0\n\t\"\"\" Jugador Cuatro \"\"\"\n\tJ4[0] = 0\n\tJ4[1] = 0\n\tJ4[2] = 0\n\tJ4[3] = 0\n\tJ4[4] = 0\n\tJ4[5] = 0", "def entrenamiento(self,cantidad_autovectores,porcentaje_prueba):\n self.gestor_muestra.cargar(porcentaje_prueba)\n self.gestor_pca.entrenamiento(self.gestor_muestra.muestra,cantidad_autovectores)\n sujeto=[]\n aciertos=0\n for i in self.gestor_muestra.muestra.sujetos:\n sujeto.append([i.nombre,0,0,0,0,0,0])\n cant_muestras = 0\n for i in self.gestor_muestra.muestra.sujetos:\n \n for j in i.imagenes_prueba:\n cant_muestras+=1\n x,y,z=self.gestor_pca.identificacion_sujeto(j)\n if i.nombre == y:\n \n sujeto[int(i.nombre[1:])-1][3]+=1\n aciertos+=1\n else:\n sujeto[int(i.nombre[1:])-1][1]+=1\n sujeto[int(y[1:])-1][2]+=1\n\n for i in sujeto:\n \n try:\n i[4]=i[3]/(i[3]+i[1])\n except:\n i[4]=None\n \n try:\n i[5]=i[3]/(i[3]+i[2])\n except:\n i[5]=None\n \n try:\n i[6]=(2*i[5]*i[4])/(i[4]+i[5])\n except:\n i[6]=None\n \n return sujeto", "def crea_falla_fosa(lats, lons, prof, dip, strike, latini, latfin, area_sf, razon_aspecto):\n # ruta del archivo de la fosa\n ruta_fosa = \"../Slab/\"\n # archivo fosa ( primera columna: longitudes, segunda columna: latitudes)\n arc_fosa = ruta_fosa + \"SAM2.txt\"\n lonfosa, latfosa = carga_fosa(arc_fosa)\n # largo de falla\n largo_falla_completa = Geodesic.WGS84.Inverse(limnorte, -72, limsur, -72)[ \"s12\" ]\n # tamano subfallas (subfallas cuadradas)\n tamano_subfalla = np.sqrt(area_sf)\n # cantidad de fallas\n cant_subfallas_lats = largo_falla_completa // tamano_subfalla\n cant_subfallas_lons = largo_falla_completa // razon_aspecto\n # latitudes subfallas\n lats_subfallas = np.flip(np.linspace(limsur, limnorte, int(cant_subfallas_lats))) \n\n # longitudes subfallas\n ancho_falla = tamano_subfalla * cant_subfallas_lons\n # interpolacion de longitud de la fosa para las latitudes limites\n interpolador_lons_fosa = interp1d(latfosa, lonfosa)\n interpolador_lats_fosa = interp1d(lonfosa, latfosa)\n # longitud de la fosa a la latitud de cada subfalla\n lons_fosa_para_subfallas = interpolador_lons_fosa(lats_subfallas)\n # teniendo las longitudes de la fosa para las latitudes de las subfallas se tiene\n # el limite oeste de la falla, falta encontrar el limite este. Como se conoce el ancho de la falla, \n # basta encontrar la longitud de este ancho para cada latitud\n lons_limite_este = np.ones(np.shape(lons_fosa_para_subfallas))\n\n for ilon in range(len(lons_limite_este)):\n lons_limite_este[ilon] = Geodesic.WGS84.Direct(lats_subfallas[ilon], lons_fosa_para_subfallas[ilon], 90, ancho_falla)[ \"lon2\" ]\n\n # teniendo los limites este y oeste, basta encontrar las longitudes de las subfallas intermedias\n\n array_lons = np.ones((int(n_subfallas_lats),int(n_subfallas_lons))) # LONS SUBFALLAS\n for jlat in range(int(n_subfallas_lats)):\n array_lons[jlat,:] = np.linspace(lons_fosa_para_subfallas[jlat],lons_limite_este[jlat],int(n_subfallas_lons))\n\n array_lats = np.tile(np.reshape(lats_subfallas,(int(n_subfallas_lats),1)),int(n_subfallas_lons)) # LATS SUBFALLAS", "def intenta_adelantar_camion_operacion(self, sistema, operacion):\r\n if not self.dispone_producto_espacio_sistema(sistema):\r\n\r\n if any(c.dispone_producto_espacio_sistema(sistema) or c.manipulado.triggered\r\n for c in operacion.recurso.cola_detras_de_camion(self)):\r\n camion_adelantado = [c for c in operacion.recurso.cola_detras_de_camion(self)\r\n if c.dispone_producto_espacio_sistema(sistema) or c.manipulado.triggered][0]\r\n\r\n operacion.recurso.cola.remove(camion_adelantado)\r\n operacion.recurso.cola = \\\r\n operacion.recurso.cola[0:operacion.recurso.cola.index(self)] \\\r\n + [camion_adelantado] + operacion.recurso.cola[operacion.recurso.cola.index(self):]\r\n\r\n print str(camion_adelantado) + \" adelantado bajo criterio de \" + str(self) + \" \" + str(sistema.now)\r\n print \"\\t\" + str(operacion.recurso.nombre) + \": \" \\\r\n + str(operacion.recurso.cola) + \" Hora: \" + str(sistema.now)\r\n else:\r\n\r\n if any(c.manipulado.triggered\r\n for c in operacion.recurso.cola_detras_de_camion(self)):\r\n camion_adelantado = [c for c in operacion.recurso.cola_detras_de_camion(self)\r\n if c.manipulado.triggered][0]\r\n\r\n operacion.recurso.cola.remove(camion_adelantado)\r\n operacion.recurso.cola = \\\r\n operacion.recurso.cola[0:operacion.recurso.cola.index(self)] \\\r\n + [camion_adelantado] + operacion.recurso.cola[operacion.recurso.cola.index(self):]\r\n\r\n print str(camion_adelantado) + \" adelantado bajo criterio de \" + str(self) + \" \" + str(sistema.now)\r\n print \"\\t\" + str(operacion.recurso.nombre) + \": \" \\\r\n + str(operacion.recurso.cola) + \" Hora: \" + str(sistema.now)", "def calcular_promedio(lista):\r\n suma = 0\r\n promedio = 0\r\n \r\n for marca in lista:\r\n suma += marca[1]\r\n \r\n promedio = suma//len(lista)\r\n \r\n return promedio", "def manipular_sacos(self, camion):\r\n operaciones = self.operaciones[\"Operaciones manipuleo\"]\r\n\r\n # Manipuleo de camion por cargar\r\n if camion.tipo == \"Carga\":\r\n\r\n # Espera camion para realizar transbordo o interrumpe la espera de otro\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 100))\r\n\r\n # Si el camion espera procede con un tranbordo o carga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Sacos\"]\r\n carga = operaciones[\"Carga a pulso - Sacos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, 200,\r\n carga, self.medios_almacenamiento[\"Almacen 2\"], 200))\r\n\r\n # Manipuleo de camion por descargar\r\n elif camion.tipo == \"Descarga\":\r\n\r\n # Espera camion para realizar transbordo o interrumpe la espera de otro\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 100))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Sacos\"]\r\n descarga = operaciones[\"Descarga a pulso - Sacos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, 200,\r\n descarga, self.medios_almacenamiento[\"Almacen 2\"], 200))", "def calculDeFraisPortuaire():\n TARIFMENSUEL1 = 100\n TARIFMENSUEL2 = 200\n TARIFMENSUEL3 = 400\n TARIFMENSUEL4 = 600\n TAXESPECIALEANNUELLEVOILIERCATEGORIE1 = 100\n TAXESPECIALEANNUELLEVOILIERCATEGORIE2 = 150\n TAXESPECIALEANNUELLEVOILIERCATEGORIE3 = 250\n \n coutMensuel = 0\n coutAnnuel = 0\n taxeSpecialeAnnuelle = 0\n nomDuVoilier = input(\"ENTREZ le nom du voilier: \")\n longueur = float(input(\"Entrez la longueur du voilier: \"))\n categorie = int(input(\"Entrez la categorie du voilier 1 2 ou 3 : \"))\n \n if(longueur<5):\n coutMensuel = TARIFMENSUEL1\n elif(longueur<=10):\n coutMensuel = TARIFMENSUEL2\n elif(longueur<=12):\n coutMensuel = TARIFMENSUEL3\n else:\n coutMensuel = TARIFMENSUEL4\n \n if(categorie==1):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE1\n elif(categorie==2):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE2\n elif(categorie==3):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE3\n \n coutAnnuel = taxeSpecialeAnnuelle+coutMensuel*12\n \n return \"le coût annuel d’une place au port pour le voilier \"+nomDuVoilier+\" est de \"+ str(coutAnnuel)+\" euros\"", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja" ]
[ "0.5861771", "0.5792501", "0.5622451", "0.56105626", "0.5542907", "0.5492034", "0.547894", "0.54629517", "0.54576236", "0.54318184", "0.54252744", "0.539618", "0.53948766", "0.5382875", "0.53711325", "0.5367106", "0.5332052", "0.53252435", "0.53132993", "0.5304049", "0.5303446", "0.5292104", "0.52889866", "0.5278613", "0.52641195", "0.5253859", "0.5251727", "0.5248348", "0.5240608", "0.5240608" ]
0.78076255
0
Este metodo lo usaremos para printear el mapa, pondremos un titulo y luego mediante join printearemos la listamapa
def game (self,mapa): self.titulo() for fila in mapa: print("".join(fila))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_map():\r\n for row in range(0, len(map1)): #for all rows\r\n for column in range(0, len(map1[0])): #for all columns\r\n print(map1[row][column], end=' ')\r\n print()", "def display_map(map):\n for row in map:\n line = \"\"\n for point in row:\n line += point.display_point()\n print(line)", "def display(self):\n logging.info(\"Display Carte : {}\".format(self.name))\n for row in self.map:\n #print(row)\n for cell in row:\n print(cell, end = \"\")\n print(\"\")", "def map_room_list():\n for room in map_rooms:\n print(f\"{room}: \")\n for description in map_rooms[room]:\n print(f\"{description} - {map_rooms[room][description]}\")", "def print_map():\n printmap = []\n for y in range(max_height):\n line = list(tracks[:, y])\n for cart in sorted(carts, key=lambda c: c[0]):\n if cart[0][1] == y:\n line[cart[0][0]] = cart_icons[cart[1] - 1]\n printmap.append(''.join(line))\n for line in printmap:\n print(line)", "def show_map(self):\n for line in self.map:\n print(\"\".join(line))", "def print_map(road_map):\n # For each city index in the road map\n for i in range(len(road_map)):\n # Identify the index of the current city and next city\n a = i\n b = (i+1) % len(road_map)\n # Calculate distance between the current city and next city\n distance = euler_dist(road_map[a][2],road_map[b][2],road_map[a][3],road_map[b][3])\n print('{0:15} {1:3} {2:15} {3:15}'.format(road_map[a][1],'->',road_map[b][1],round(distance,2)))", "def show_map(map_):\n for r in map_.matrix:\n print(''.join(r))\n print()", "def print_map(self):\n for row in self.world_map:\n for cell in row:\n print(cell, end=\"\")\n print()", "def printMapDijkstra(catalog, path):\n\n # Datos de la primera capital al primer lp (Las captales van de rojo!)\n firstElement = stack.pop(path)\n vertexA = firstElement['vertexA'] # Primera Capital\n vertexB = firstElement['vertexB'].split('-')[:1][0]\n vertexA_info = mp.get(catalog['landingpoints'], vertexA)['value']['info']\n vertexB_info = mp.get(catalog['landingpoints'], vertexB)['value']['info']\n\n item_map = folium.Map(location=[float(vertexA_info['latitude']), float(vertexA_info['longitude'])], zoom_start=7)\n\n folium.Marker(location=[float(vertexA_info['latitude']), float(vertexA_info['longitude'])], tooltip=vertexA_info['name'], icon=folium.Icon(color='darkred', icon='cloud')).add_to(item_map)\n\n folium.Marker(location=[float(vertexB_info['latitude']), float(vertexB_info['longitude'])], tooltip=vertexB_info['name'], icon=folium.Icon(color='lightgray')).add_to(item_map)\n\n loc = [(float(vertexA_info['latitude']), float(vertexA_info['longitude'])), (float(vertexB_info ['latitude']), float(vertexB_info['longitude']))]\n folium.PolyLine(loc,\n color='gray',\n weight=5,\n tooltip=str(firstElement['weight'])+' km',\n opacity=0.6).add_to(item_map)\n\n while stack.size(path) > 1:\n # Conexiones entre todos los lps del medio (mismo color para todos)\n element = stack.pop(path)\n vertexA = element['vertexA'].split('-')[:1][0]# Primera Capital\n vertexB = element['vertexB'].split('-')[:1][0]\n \n vertexA_info = mp.get(catalog['landingpoints'], vertexA)['value']['info']\n vertexB_info = mp.get(catalog['landingpoints'], vertexB)['value']['info']\n\n folium.Marker(location=[float(vertexA_info['latitude']), float(vertexA_info['longitude'])], tooltip=vertexA_info['name'], icon=folium.Icon(color='lightgray')).add_to(item_map)\n folium.Marker(location=[float(vertexB_info['latitude']), float(vertexB_info['longitude'])], tooltip=vertexB_info['name'], icon=folium.Icon(color='lightgray')).add_to(item_map)\n folium.PolyLine([(float(vertexA_info['latitude']), float(vertexA_info['longitude'])), (float(vertexB_info['latitude']), float(vertexB_info['longitude']))],\n color='gray',\n weight=5,\n tooltip=str(element['weight']) +' km',\n opacity=0.6).add_to(item_map)\n \n\n # Datos de el ulitmo LP a la capital (las capitales van de rojo)\n lastElement = stack.pop(path)\n vertexA = lastElement['vertexA'].split('-')[:1][0] \n vertexB = lastElement['vertexB'] # Ultima Capital\n vertexA_info = mp.get(catalog['landingpoints'], vertexA)['value']['info']\n vertexB_info = mp.get(catalog['landingpoints'], vertexB)['value']['info']\n\n folium.Marker(location=[float(vertexB_info['latitude']), float(vertexB_info['longitude'])], tooltip=vertexB_info['name'], icon=folium.Icon(color='darkred', icon='cloud')).add_to(item_map)\n folium.Marker(location=[float(vertexA_info['latitude']), float(vertexA_info['longitude'])], tooltip=vertexA_info['name'], icon=folium.Icon(color='lightgray')).add_to(item_map)\n folium.PolyLine([(float(vertexA_info['latitude']), float(vertexA_info['longitude'])), (float(vertexB_info['latitude']), float(vertexB_info['longitude']))],\n color='gray',\n weight=5,\n tooltip=str(lastElement['weight']) +' km',\n opacity=0.6).add_to(item_map)\n\n\n item_map.save('Req 3.html')", "def view_map():\n print(\"\"\"\n ____________________________________Client Rooms______________________\n| |1 Locker Rooms 2| 1 | 2 | |\n| |_________ ________| | | Dance |\n| | | |__| |__| Floor |\n| | | Hall |\n| Garage | Front | _______|_______ |\n| | Lobby | | |_ ____________|\n| | | | Storage |\n| | | Lounge |______________|\n| ______________ Car\n|___________________Front Entrance______________________| Allyway\n\"\"\")", "def print_table(table, title_list):\n\n # your goes code\n \n table.insert(0, title_list)\n # title listet 0.helyre teszi\n # your code\n\n lenght_list = [] # tartalmazza az összes szót\n for lines in table:\n for items in lines:\n lenght_list.append(items)\n\n longest_words_length = len(max(lenght_list, key=len))\n multiplier = len(title_list)*(longest_words_length+1)\n\n for sublist in table:\n print(\"|\\n|\", \"-\"*multiplier, \"|\")\n\n for j in sublist:\n print(\"|\", j, end = \" \"*(longest_words_length-len(j)))\n\n print(\"|\\n|\",\"-\"*multiplier, \"|\")", "def print_map(self):\n y_max,x_max = map(max, zip(*self.mp.keys()))\n for row in range(0,y_max+1):\n msg = []\n for k in range(0,x_max+1):\n msg.append(chr(self.mp[row,k]))\n print(\"\".join(msg))", "def print_local_map(self):\n size = 15\n size_half = int(size/2)\n temp_map = []\n for i in range(size):\n map_row = []\n for j in range(size):\n coords = (self.rob_pos[0] + i-size_half,\n self.rob_pos[1] + j-size_half) \n\n if(self.check_limits(coords)):\n if self.rob_pos[0]==coords[0] and self.rob_pos[1]==coords[1]:\n map_row.append(\"R\")\n else:\n map_row.append(self.map[coords[0]][coords[1]])\n temp_map.append(map_row)\n \n #print map upside down cause thats how its saved....\n for i in range(14,-1,-1):\n rospy.logdebug(temp_map[i])", "def print_map(road_map):\n \n distances = distances_and_limits(road_map)[0]\n\n print('*** THE BEST ROUTE FOUND (TOTAL DISTANCE {}) ***'.format(round(sum(distances),1)))\n print('')\n\n for i, location in enumerate(road_map):\n print('Trip #{}: {}, {} ----> {}, {}'.format(i+1, \\\n road_map[(i-1) % len(road_map)][1], road_map[(i-1) % len(road_map)][0], \\\n road_map[i][1], road_map[i][0]))\n print('Distance = {}'.format(round(distances[i],2)))\n print('')\n print('____________________________________________________________________')\n print('')", "def dibujar_tablero(tablero):\n for fila in tablero:\n print(\"|\", \"|\".join(fila), \"|\", sep=\"\")\n print(\"\")", "def crear_mapa (self, ancho = 40 , largo = 40):\n\n for i in range (largo):\n\n a = \" \"\n b = []\n for z in range (ancho):\n b.append(a)\n kasino.mapa.append(b)\n\n for i in range (1,ancho -1):\n kasino.mapa[0][i]=\"═══\"\n kasino.mapa[largo-1][i]=\"═══\"\n\n for i in range (1,largo -1):\n kasino.mapa[i][0]= \" ║\"\n kasino.mapa[i][ancho-1]= \"║\"\n \n kasino.mapa [0][0]=\" ╔\"\n kasino.mapa [0][ancho-1]=\"╗\"\n kasino.mapa [largo-1][0]=\" ╚\"\n kasino.mapa [largo-1][ancho-1]=\"╝\"", "def print_board(self, board):\n\n for i in range(0, len(self.row_map.keys())):\n for j in range(0, len(self.row_map.keys())):\n print(\" | {:>2}\".format(board[self.row_map[i + 1] + str(j + 1)]), end='')\n print(\"\\n\")\n print(\" --------------------- \")", "def print_cities(road_map): \n\n print('*** THE ORIGINAL MAP (TOTAL DISTANCE {}) ***'.format(compute_total_distance(road_map)))\n print('')\n for i, location in enumerate(road_map):\n try:\n print('- {}, {}: ({},{})'.format(location[1], \\\n location[0], round(location[2],1), round(location[3],1)))\n except Exception as e:\n print('Error with print_cities: '+str(e))\n print('____________________________________________________________________')\n print('')", "def print_pessoas() -> None:\n lista_pessoas = select_todos_registros('pessoa')\n\n print_colunas('pessoa')\n for pessoa in lista_pessoas:\n print(*pessoa, sep=', ')", "def pauta(lst_splen, lst_pauta):\n\n tmp=''\n\n #inicio do bloco \n tmp+='\\t<story>\\n'\n\n for dicsp in lst_splen:\n\n #sessao plenaria\n if dicsp['sessao']!=None:\n tmp+='\\t\\t<para style=\"P0\">' + dicsp['sessao'].replace('&','&amp;') +', EM ' + dicsp['datasessao'].replace('&','&amp;')+ '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n if dicsp['ind_audiencia'] == 1:\n tmp+='\\t\\t<para style=\"P1\"></para>\\n'\n else:\n tmp+='\\t\\t<para style=\"P1\">(Pauta da Ordem do Dia)</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"12\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n\n #inicio do bloco que contem os flowables\n \n for dic in lst_pauta:\n #espaco inicial\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"10\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n #condicao para a quebra de pagina\n tmp+='\\t\\t<condPageBreak height=\"5mm\"/>\\n'\n\n #pauta\n if dic['num_ordem']!=None:\n tmp+='\\t\\t<para style=\"P4\"><font color=\"#222\"><b>Item nº ' + str(dic['num_ordem']) + '</b></font></para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n if dic['id_materia']!=None:\n if dic['cod_materia']!='':\n tmp+='\\t\\t<para style=\"P4\"><b><font color=\"#126e90\"><u>' + dic['link_materia']+'</u></font> - '+ dic['nom_autor'] + '</b></para>\\n'\n if dic['cod_parecer']!='': \n tmp+='\\t\\t<para style=\"P4\"><b><font color=\"#126e90\"><u>' + dic['link_materia']+'</u></font> - '+ dic['nom_autor'] + ', que '+ dic['txt_materia'] + '</b></para>\\n'\n tmp+='\\t\\t<para style=\"P3\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n if dic['txt_ementa']!=None:\n tmp+='\\t\\t<para style=\"P3\">' + dic['txt_ementa'].replace('&','&amp;') + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['des_turno']!='':\n tmp+='\\t\\t<para style=\"P3\"><b>Turno</b>: '+ dic['des_turno'] +' | <b>Quorum</b>: '+ dic['des_quorum']+' | <b>Tipo de Votação</b>: '+ dic['tip_votacao'] + '' + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"8\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['parecer']!= 0 and dic['parecer']!= '':\n tmp+='\\t\\t<para style=\"P3\"><b><u>PARECERES:</u></b></para>\\n\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n for item in dic['pareceres']:\n tmp+='\\t\\t<para style=\"P3\"><b><font color=\"#126e90\">' + item[\"link_materia\"] + '</font> - ' + item[\"conclusao\"] + '</b> ' + item[\"relatoria\"] + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['substitutivo']!= 0 and dic['substitutivo']!= '':\n tmp+='\\t\\t<para style=\"P3\"><b><u>SUBSTITUTIVOS:</u></b></para>\\n\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n' \n for substitutivo in dic['substitutivos']:\n tmp+='\\t\\t<para style=\"P3\"><b><font color=\"#126e90\">' + substitutivo[\"id_substitutivo\"] + '</font> - ' + substitutivo[\"autoria\"] + '</b> - ' + substitutivo[\"txt_ementa\"] + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['emenda']!= 0 and dic['emenda']!= '':\n tmp+='\\t\\t<para style=\"P3\"><b><u>EMENDAS:</u></b></para>\\n\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n' \n for emenda in dic['emendas']:\n tmp+='\\t\\t<para style=\"P3\"><b><font color=\"#126e90\">' + emenda[\"id_emenda\"] + '</font> - ' + emenda[\"autoria\"] + '</b> - ' + emenda[\"txt_ementa\"] + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n return tmp", "def display_map(data_map, clear):\n # if clear:\n #clear_output()\n\n # Check which player have to play and define displaying constants.\n player = 'player' + str((data_map['main_turn'] % 2) + 1)\n ennemy = 'player' + str(2 - (data_map['main_turn'] % 2))\n ui_color = data_map[player + 'info'][0]\n\n data_cell = {'ui_color': ui_color}\n\n # Generate the units to be displayed.\n for i in range(1, data_map['map_size'] + 1):\n for j in range(1, data_map['map_size'] + 1):\n\n # Coloration black/white of the cells.\n background_cell = ''\n if (i + j) % 2 == 0:\n background_cell = Back.WHITE\n\n if (i, j) in data_map['player1']:\n data_cell['(' + str(i) + ',' + str(j) + ')'] = data_map['player1'][(i, j)][1] + background_cell + ' ☻' + str(data_map['player1'][(i, j)][0]) + (str(data_map['player1'][(i, j)][2]) + ' ')[:2]\n elif (i, j) in data_map['player2']:\n data_cell['(' + str(i) + ',' + str(j) + ')'] = data_map['player2'][(i, j)][1] + background_cell + ' ☻' + str(data_map['player2'][(i, j)][0]) + (str(data_map['player2'][(i, j)][2]) + ' ')[:2]\n else:\n data_cell['(' + str(i) + ',' + str(j) + ')'] = background_cell + (' ' * 5)\n\n # Generate the statistics to be displayed.\n player1_cell = data_map[player].keys()\n cell1_couter = 0\n player2_cell = data_map[ennemy].keys()\n cell2_couter = 0\n unit_name = {'E': 'Elf', 'D': 'Dwarf'}\n\n for i in range(1, 5):\n for j in range(1, 3):\n if len(player1_cell) > cell1_couter:\n data_cell['stat' + str(i) + str(j)] = (('0' + str(player1_cell[cell1_couter][0]))[-2:] + '-' + ('0' + str(player1_cell[cell1_couter][1]))[-2:] + ' ' + unit_name[data_map[player][player1_cell[cell1_couter]][0]] + ' hp: ' + str(data_map[player][player1_cell[cell1_couter]][2]) + ' ' * 20)[:20]\n cell1_couter += 1\n else:\n data_cell['stat' + str(i) + str(j)] = ' ' * 20\n for j in range(3, 5):\n if len(player2_cell) > cell2_couter:\n data_cell['stat' + str(i) + str(j)] = (('0' + str(player2_cell[cell2_couter][0]))[-2:] + '-' + ('0' + str(player2_cell[cell2_couter][1]))[-2:] + ' ' + unit_name[data_map[ennemy][player2_cell[cell2_couter]][0]] + ' hp: ' + str(data_map[ennemy][player2_cell[cell2_couter]][2]) + ' ' * 20)[:20]\n cell2_couter += 1\n else:\n data_cell['stat' + str(i) + str(j)] = ' ' * 20\n\n # Generate the title of the map to be displayed.\n data_cell['turn'] = str(data_map['main_turn']/2 + 1)\n data_cell['playername'] = data_map[player + 'info'][1]\n data_cell['blank'] = ((data_map['map_size'] * 5) - 19 - len(data_cell['turn']) - len(data_cell['playername'])) * ' '\n\n # Print the top of the UI.\n for line in data_map['data_ui']:\n print line % data_cell", "def map_picture(the_map, p):\n xy = (p.location[0] - 2, p.location[1] + 2)\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append(\"|{!s:9}{}|\".format(the_map[coordinates].square_type, star))\n else:\n row.append(\"|{!s:12}|\".format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))", "def cargar_mapa (self):\n\n stream_cargar = open ('yo_mapa.txt', 'rt',encoding=\"utf-8\")\n mapa=stream_cargar.readlines()\n \n a = mapa[0].split(\"X\")\n mapa__I=[]\n mapa__D=[]\n toca = \"izda\"\n for lista in a:\n pasar=\"X\"\n linea1=[]\n trozo=\"\"\n for i in lista:\n if pasar==\"X\":\n \n borrar = [\"[\",\"'\"]\n if i in borrar:\n pass\n elif i == \",\" or i == \"]\":\n linea1.append(trozo)\n trozo=\"\"\n pasar=\"V\"\n elif i == \"S\":\n toca=\"dxa\"\n else:\n trozo+=i\n\n else:\n pasar=\"X\"\n pass\n if toca == \"izda\":\n mapa__I.append(linea1)\n else:\n mapa__D.append(linea1)\n\n mapa_cargado=[]\n for i in range (len(mapa__I)):\n\n mapa_cargado.append(mapa__I[i]+mapa__D[i]) \n\n stream_cargar=(close)\n return mapa_cargado", "def print_interlinks(headers_map, interlink_map):\n\n print(\"\\nInterlinks\")\n for i in interlink_map.keys():\n print(\"Key:\", i.hex()[:6], \"| Level:\", int(headers_map[i].compute_level()))\n print_tuple(interlink_map[i])\n print(\"+\" * 32)", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def mapa(self, msg, match):\n\n for linha in self.mapa_inteiros:\n yield \" \".join(map(str, linha))", "def intercambiar(mapa, mapa2):\n for e in mapa.bloqueadas:\n mapa2.bloqueadas.append(e)", "def print_m(seq1, seq2, m):\n seq1 = '-' + seq1; seq2 = '-' + seq2\n print()\n print(' '.join(['%3s' % i for i in ' '+seq2]))\n for i, p in enumerate(seq1):\n line = [p] + [m[i][j] for j in range(len(seq2))]\n print(' '.join(['%3s' % i for i in line]))\n print()\n return", "def printMap(values, klab, vlab, precision, offset=16):\n\tprint(klab.ljust(offset, \" \") + vlab)\n\tfor k in values.keys():\n\t\tv = values[k]\n\t\tks = toStr(k, precision).ljust(offset, \" \")\n\t\tvs = toStr(v, precision)\n\t\tprint(ks + vs)" ]
[ "0.6373207", "0.63231426", "0.6191893", "0.6182269", "0.6169301", "0.6159255", "0.6086893", "0.60857475", "0.60446316", "0.5992802", "0.59581316", "0.5861561", "0.5839619", "0.5816608", "0.5770234", "0.57276624", "0.56045544", "0.5600139", "0.55909145", "0.55598956", "0.5483897", "0.54473555", "0.5445001", "0.5440089", "0.54341996", "0.5367301", "0.53533494", "0.5335075", "0.5322953", "0.5269959" ]
0.6927792
0
Usamos este metodo para igualar la variable mapa kasino a lo escrito en el metodo casino.guardar en 1 documento y asi rcuperar datos . separamos el str con un split usando las X escritas , recorremos el str y usamos las comas para recuperar los elementos de la lineas de mapa, [][] es decir de la lista de listas.La S nos distingue entre las dos mitades del mapa, luegojuntamos las dos mitades en un solo mapa otra vez , cerramos el stream y hacemos return de el mapa cargado
def cargar_mapa (self): stream_cargar = open ('yo_mapa.txt', 'rt',encoding="utf-8") mapa=stream_cargar.readlines() a = mapa[0].split("X") mapa__I=[] mapa__D=[] toca = "izda" for lista in a: pasar="X" linea1=[] trozo="" for i in lista: if pasar=="X": borrar = ["[","'"] if i in borrar: pass elif i == "," or i == "]": linea1.append(trozo) trozo="" pasar="V" elif i == "S": toca="dxa" else: trozo+=i else: pasar="X" pass if toca == "izda": mapa__I.append(linea1) else: mapa__D.append(linea1) mapa_cargado=[] for i in range (len(mapa__I)): mapa_cargado.append(mapa__I[i]+mapa__D[i]) stream_cargar=(close) return mapa_cargado
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mapa(self, msg, match):\n\n for linha in self.mapa_inteiros:\n yield \" \".join(map(str, linha))", "def _process_strings(line,\n lang_nlp,\n get_lemmas,\n get_pos,\n remove_stopwords,\n replace_stopwords,\n get_maps):\n\n # strip, replace special tokens\n orig_line = line\n line = line.strip()\n line = re.sub(r'&apos;', '\\'', line.strip())\n line = re.sub(r'&quot;', '\\\"', line.strip())\n # Tokenize etc.\n line_nlp = lang_nlp(line)\n spacy_tokens = [elem.text for elem in line_nlp]\n spacy_tokens_lower = [elem.text.lower() for elem in line_nlp]\n spacy_lemmas = None\n spacy_pos = None\n if get_lemmas:\n spacy_lemmas = list()\n for elem in line_nlp:\n if elem.lemma_ == '-PRON-' or elem.lemma_.isdigit():\n spacy_lemmas.append(elem.lower_)\n else:\n spacy_lemmas.append(elem.lemma_.lower().strip())\n if get_pos:\n spacy_pos = [elem.pos_ for elem in line_nlp]\n\n # Generate a mapping between whitespace tokens and SpaCy tokens\n ws_tokens = orig_line.strip().split()\n ws_tokens_lower = orig_line.strip().lower().split()\n ws_to_spacy_map = dict()\n spacy_to_ws_map = dict()\n if get_maps:\n ws_loc = 0\n ws_tok = ws_tokens[ws_loc]\n\n for spacy_loc, spacy_tok in enumerate(spacy_tokens):\n while True:\n # Map whitespace tokens to be identical to spacy tokens\n ws_tok = re.sub(r'&apos;', '\\'', ws_tok)\n ws_tok = re.sub(r'&quot;', '\\\"', ws_tok)\n\n if spacy_tok == ws_tok or spacy_tok in ws_tok:\n # Terminate\n if ws_loc >= len(ws_tokens):\n break\n\n # Extend maps\n if not ws_to_spacy_map.get(ws_loc, None):\n ws_to_spacy_map[ws_loc] = list()\n ws_to_spacy_map[ws_loc].append(spacy_loc)\n if not spacy_to_ws_map.get(spacy_loc, None):\n spacy_to_ws_map[spacy_loc] = list()\n spacy_to_ws_map[spacy_loc].append(ws_loc)\n\n # Move pointer\n if spacy_tok == ws_tok:\n ws_loc += 1\n if ws_loc < len(ws_tokens):\n ws_tok = ws_tokens[ws_loc]\n else:\n ws_tok = ws_tok[len(spacy_tok):]\n break\n else:\n ws_loc += 1\n\n # Assert full coverage of whitespace and SpaCy token sequences by the mapping\n ws_covered = sorted(list(ws_to_spacy_map.keys()))\n spacy_covered = sorted(list(set(list([val for val_list in ws_to_spacy_map.values() for val in val_list]))))\n assert ws_covered == [n for n in range(len(ws_tokens))], \\\n 'WS-SpaCy mapping does not cover all whitespace tokens: {}; number of tokens: {}'\\\n .format(ws_covered, len(ws_tokens))\n assert spacy_covered == [n for n in range(len(spacy_tokens))], \\\n 'WS-SpaCy mapping does not cover all SpaCy tokens: {}; number of tokens: {}' \\\n .format(spacy_covered, len(spacy_tokens))\n\n if remove_stopwords:\n # Filter out stopwords\n nsw_spacy_tokens_lower = list()\n nsw_spacy_lemmas = list()\n for tok_id, tok in enumerate(spacy_tokens_lower):\n if tok not in STOP_WORDS:\n nsw_spacy_tokens_lower.append(tok)\n if get_lemmas:\n nsw_spacy_lemmas.append(spacy_lemmas[tok_id])\n else:\n if replace_stopwords:\n nsw_spacy_tokens_lower.append('<STPWRD>')\n if get_lemmas:\n nsw_spacy_lemmas.append('<STPWRD>')\n\n spacy_tokens_lower = nsw_spacy_tokens_lower\n if get_lemmas:\n spacy_lemmas = nsw_spacy_lemmas\n\n return line_nlp, spacy_tokens_lower, spacy_lemmas, spacy_pos, ws_tokens, ws_tokens_lower, ws_to_spacy_map, \\\n spacy_to_ws_map", "def test_map(data):\n k, v = data\n for s in split_into_sentences(v):\n for w in split_into_words(s.lower()):\n yield (w, \"\")", "def map1(inKey, inVal):\n filename, linenum = inKey[0], inKey[1]\n s = inVal[0].lower().strip()\n s = re.sub(\"['\\\"]\", \"\", s)\n s = re.sub(\"[^A-Za-z0-9']\", \" \", s)\n words = [w for w in s.split(\" \") if w != \"\"]\n result = []\n for i in range(4, len(words)):\n temp = [words[c] for c in range(i-4,i+1) if words[c][0].isalpha()]\n if len(temp) == 5: result.append((temp,[\"1\",filename,linenum]))\n return result", "def mapper(self, _, line):\n linea = line.split()\n causa, fallecidos = linea[0], linea[1]\n fallecidos_f = float(fallecidos)\n \n yield causa, (1, round(fallecidos_f))", "def map():", "def inner_map_function(s):\n\n return f(inner_strip(s))", "def extrairFrase(self, documento):\n unicWords = self.unicWords()\n doc = set(documento)\n caracteristicas ={}\n for palavras in unicWords:\n caracteristicas['%s'%palavras]=(palavras in doc)\n return caracteristicas", "def sanitize (str_ar):\n val = len(str_ar[0].split())\n map_data = map.Map(val, len(str_ar), [])\n for line in str_ar:\n line_s = line.split()\n if len(line_s) != val:\n print(\"File not formatted correctly\")\n return None\n map_data.array.extend(line_s)\n if map_data.height * map_data.width > 50:\n print(\"You are not allowed more than 50 data points\")\n return None\n for val in map_data.array:\n if not val.isdigit():\n print(\"The file can only contain positive integers\")\n return None\n return map_data", "def replaceLetra(a): \n global palavraOculta\n palavraSecreta = list(palavraOculta)\n\n for m in re.finditer(a, palavraDoJogo):\n palavraSecreta[m.start()] = a\n\n palavraOculta = \"\".join(palavraSecreta)", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def get_aui_str_mapping(input_file=None):\n input_file = os.path.join(DATA_DIR, \"umls\", \"MRCONSO.RRF\") if input_file is None else input_file\n mapping = {}\n with open(input_file, 'r') as f:\n for line in f:\n line_array = line.split(\"|\")\n if line_array[MRCONSO_SAB_INDEX] == 'MSH' and line_array[MRCONSO_SDUI_INDEX].strip() != \"\":\n mapping[line_array[MRCONSO_AUI_INDEX]] = line_array[MRCONSO_STR_INDEX]\n return mapping", "def _tokenize4map(param):\n return _tokenize(*param)", "def analysis_to_subword_dicts(ana):\n return map(pairs_to_dict, chunk_subwords(analysis_to_pairs(ana)))", "def procesar_linea(separador,linea):\n return (linea.rstrip('\\n')).split(separador)", "def parseDuMap(output):\n #z00du00(DB-SL-MSL-CH-SCH) : 00-00-0-0-0 01-01-0-0-0 04-04-2-0-0 05-05-2-0-0\n # 02-02-1-1-0 03-03-1-1-0 02-02-1-0-0 03-03-1-0-0\n duMap = {}\n for l in output:\n \n l_a = l.split(\":\")\n #print l_a\n #sys.exit(1)\n du = l_a[0]\n # string of 00-00-0-0-0 01-01-0-0-0\n sbChs = l_a[1]\n \n #z00du00(DB-SL-MSL-CH-SCH)\n # get 0 and from z00du0 9\n partDu = getDuPart(du)\n \n sbChArr = getAllSlChSbCh(sbChs)\n \n duMap[partDu] = sbChArr\n \n \n return duMap", "def retrieve_smiles(l):\n\t\n\tl = str(l)\n\tl = l.split(\"\\\\t\")\n\tentry_in_dataset = [l[0].split(\"'\")[1], l[1].split(\"\\\\n\")[0]] \n\t# print (entry_in_dataset)\n\treturn entry_in_dataset", "def test_parse_mapping_file(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n obs = parse_mapping_file(s1)\r\n self.assertEqual(obs, exp)\r\n\r\n # We don't currently support this, but we should soon...\r\n # check that first non-comment, non-blank line is used as\r\n # header\r\n # s1 = ['sample\\ta\\tb', '#comment line to skip',\\\r\n # 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n # exp = ([['x','y','z'],['i','j','k']],\\\r\n # ['sample','a','b'],\\\r\n # ['comment line to skip','more skip'])\r\n # obs = parse_mapping_file(s1)\r\n # self.assertEqual(obs, exp)\r\n\r\n # check that we strip double quotes by default\r\n s2 = ['#sample\\ta\\tb', '#comment line to skip',\r\n '\"x \"\\t\" y \"\\t z ', ' ', '\"#more skip\"', 'i\\t\"j\"\\tk']\r\n obs = parse_mapping_file(s2)\r\n self.assertEqual(obs, exp)", "def crear_mapa (self, ancho = 40 , largo = 40):\n\n for i in range (largo):\n\n a = \" \"\n b = []\n for z in range (ancho):\n b.append(a)\n kasino.mapa.append(b)\n\n for i in range (1,ancho -1):\n kasino.mapa[0][i]=\"═══\"\n kasino.mapa[largo-1][i]=\"═══\"\n\n for i in range (1,largo -1):\n kasino.mapa[i][0]= \" ║\"\n kasino.mapa[i][ancho-1]= \"║\"\n \n kasino.mapa [0][0]=\" ╔\"\n kasino.mapa [0][ancho-1]=\"╗\"\n kasino.mapa [largo-1][0]=\" ╚\"\n kasino.mapa [largo-1][ancho-1]=\"╝\"", "def intialize_asteroid_map(asteroid_map: str) -> Dict[Asteroid, None]:\n asteroids = {}\n for y, row in enumerate(asteroid_map.strip().splitlines()):\n for x, col in enumerate(row):\n if col == \"#\":\n asteroids[Asteroid(x, y)] = None\n return asteroids", "def func2(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n\n final = {}\n for i in range(1,3):\n final[\"formants [{}]\".format(i)] = []\n my_list = data.split(\"formants\")\n for i in range(2,4):\n final[\"formants [{}]\".format(i-1)].extend(list(map(pars_points,my_list[i].split(\"points \")[1:])))\n return final", "def loci_parsed(loci_file):\n #\n ga_list = [\"Ang_30\",\"Ang_29\"]\n\n gb_list = [\"Ang_67\", \"Ang_21\"]\n\n cc_list = [\"Cg12063\", \"Cg125212\", \"Cg126212\", \"Cg12758\", \"Cg_432\"]\n\n loci_dic = {}\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n\n\n for files in loci_file:\n\n name= files.strip().split (\"/\")\n name_loci = name[12].split(\"_\")\n name_loci_1 = name_loci[1].split(\".\")\n real_name_loci = name_loci_1[0]\n\n loci_file = open(files)\n\n\n for line in loci_file:\n\n if line[:1] in \"0123456789\":\n pass\n else:\n\n line_information = line.strip().split()\n isolate = line_information[0]\n sequence = line_information [1]\n\n # if \"-\" in sequence:\n # sequence = sequence.replace (\"-\", \"\")\n\n if isolate in ga_list and loci_list[\"ga\"] == None:\n loci_list[\"ga\"] = sequence\n if isolate in gb_list and loci_list[\"gb\"] == None:\n loci_list[\"gb\"] = sequence\n if isolate in cc_list and loci_list[\"cc\"] == None:\n loci_list[\"cc\"] = sequence\n loci_dic[real_name_loci] = loci_list\n\n\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n return loci_dic", "def mapping_stratum(download_files =True):\r\n # get code description _index \r\n ix_= AGSO_PROPERTIES['props_codes'].index('name')\r\n def mfunc_(d): \r\n \"\"\" Set individual layer in dict of properties \"\"\"\r\n _p= {c: k.lower() if c not in ('code', 'label', 'name') else k \r\n for c, k in zip(AGSO_PROPERTIES['props_codes'], d) }\r\n id_= d[ix_].replace('/', '_').replace(\r\n ' ', '_').replace('\"', '').replace(\"'\", '').lower()\r\n return id_, _p \r\n rock_and_structural_props =list()\r\n for agso_data in tuple(set_agso_properties(download_files)): \r\n # remove the header of the property file\r\n rock_and_structural_props.append(\r\n dict(map( lambda x: mfunc_(x), agso_data[1:])))\r\n \r\n return tuple(rock_and_structural_props)", "def ParserPDB(a):\n\tcontenu=list()\n\tmon_fichier=open(a,\"r\")\n\tfor line in mon_fichier.readlines():\n\t\tcontenu.append(line.strip()) #met le contenu du fichier pdb dans la liste \"contenu\"\n\n\tacidea=dict()\n\t\n\n\n\tfor chain in range(len(contenu)): #On parcourt cette liste contenant tout le fichier pdb\n\t\tif contenu[chain][0:5]==\"MODEL\":\n\t\t\tnewProt = contenu[chain][7:14]\n\t\t\t\n\t\t\tif newProt not in acidea.keys():\n\t\t\t\tacidea[newProt]={}\n\t\t\t\t\n\t\tif contenu[chain][0:4]==\"ATOM\": #Si la ligne commence par \"ATOM\" \n\t\t\tChaine = contenu[chain][21]\n\t\t\t\n\t\t\tif Chaine not in acidea[newProt].keys(): #Si la chaine ( A, B ... ) existe pas deja \n\t\t\t\tacidea[newProt][Chaine] = {} #creation du dictionnaire qui a pour nom les caractères a la ligne 21 ( Chaine)\n\t\t\t\n\t\t\tPosi = contenu[chain][24:26]\n\t\t\tif Posi not in acidea[newProt][Chaine].keys(): #Si la position pour une chaine n'existe pas deja (ex : -3 dans la chaine A)\n\t\t\t\tacidea[newProt][Chaine][Posi]={} # creation du dictionnaire poisition dans le dictionnaire chaine \n\t\t\t\n\t\t\tresidu = contenu[chain][12:16]\n\t\t\tif residu not in acidea[newProt][Chaine][Posi].keys(): #si le residu n'existe pas deja pour une chaine et une position donnée (ex : un CO de la chaine A a la position -3)\n\t\t\t\tacidea[newProt][Chaine][Posi][residu]= {} #Creation du dictionnaire nom de l'atome, contenu dans le dictionnaire position lui meme contenu dans le dictionnaire chaine\t\n\t\t\t\n\t\t\t#repartition de l'information dans le dictionnaire.\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"x\"] = float(contenu[chain][32:38]) #Mise des information de X dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"y\"] = float(contenu[chain][40:46]) #Mise des information de Y dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"z\"] = float(contenu[chain][48:54]) #Meme chose pour Z\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"Id\"] = contenu[chain][9:11] #Meme chose pour Identifiant\n\n\treturn( acidea)", "def parse(input_file):\n clasa = []\n adiacente = []\n suparati = []\n start = None\n final = None\n _before_suparati = True # inaintea liniei care separa clasa de copiii suparati\n with open(input_file) as f:\n lines = list(f.readlines())\n for line in lines: # Procesam fiecare linie\n l = line.replace(\"\\n\", \"\").split()\n if _before_suparati:\n if l[0] == \"suparati\":\n _before_suparati = False\n continue\n clasa.append(l)\n else:\n if l[0] == \"mesaj:\":\n start = l[1]\n final = l[3]\n else:\n suparati.append((l[0], l[1]))\n\n ## Construim adiacentele\n ##\n ## len(clasa) = numarul de randuri din clasa. \n ## 6 copii pe fiecare rand => numarul de copii = 6 * len(clasa)\n adiacente = list([0] * (6 * len(clasa)) for _ in range(6 * len(clasa)))\n\n def _nesuparati(copil1, copil2):\n return (copil1, copil2) not in suparati and (copil2, copil1) not in suparati\n\n ## coloana de la stanga\n for i in range(len(clasa)):\n for j in range(6):\n\n if j % 2 == 0: ## drumuri orizontale pe cele 3 coloane\n \n if _nesuparati(clasa[i][j], clasa[i][j+1]) and\\\n clasa[i][j] != \"liber\" and clasa[i][j+1] != \"liber\":\n adiacente[i * 6 + j][i * 6 + j + 1] = 1\n adiacente[i * 6 + j + 1][i * 6 + j] = 1\n \n if i < len(clasa) - 1: # drumuri verticale de la primul rand pana la ultimul rand - 1\n\n if clasa[i][j] != \"liber\" and clasa[i+1][j] != \"liber\" and\\\n _nesuparati(clasa[i][j], clasa[i+1][j]):\n adiacente[i * 6 + j][(i + 1) * 6 + j] = 1\n adiacente[(i + 1) * 6 + j][i * 6 + j] = 1\n \n if (j == 1 or j == 3) and (i >= len(clasa) - 2): # transferul intre ultimele si penultimele banci\n\n if _nesuparati(clasa[i][j], clasa[i][j+1]) and\\\n clasa[i][j] != \"liber\" and clasa[i][j+1] != \"liber\":\n adiacente[i * 6 + j][i * 6 + j + 1] = 1\n adiacente[i * 6 + j + 1][i * 6 + j] = 1\n\n\n ## Vector de copii\n copii = reduce(lambda x, y: x + y, clasa, []) ## pastram locurile libere ca sa putem potrivi indicii\n\n if copii == [] or start is None or final is None: ## Fisierul e gol sau formatul gresit. Bail out\n raise MalformedInputException(\"Malformed input file. Bailing.\")\n \n start_index = copii.index(start)\n final_index = copii.index(final)\n \n if sum(adiacente[start_index]) < 1 or sum(adiacente[final_index]) < 1:\n raise EarlyNoSolution(\"Nu poate exista o solutie.\")\n\n return start, final, copii, adiacente", "def parseMetadataMap(lines):\r\n return MetadataMap(*parse_mapping_file_to_dict(lines))", "def imprime_mapa(lat,lon):\r\n\r\n lista=[\"colegio\", \"starbucks\",\"estadio de baloncesto\", \"bar\",\"restaurante vegano\",\"peluqueria perros\",\"aeropuerto\"]\r\n \r\n tipo=list()\r\n latitud=list()\r\n longitud=list()\r\n\r\n for q in lista:\r\n resultado=foursquare_visual({'latitud':lat, 'longitud':lon},q)\r\n \r\n for r in resultado:\r\n tipo.append(q.replace(\" \",\"_\"))\r\n latitud.append(r['latitud'])\r\n longitud.append(r['longitud'])\r\n #if q == \"colegio\" or q == \"peluqueria perros\":\r\n # print(pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud}))\r\n # raise\r\n \r\n \r\n df=pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud})\r\n\r\n \r\n\r\n mapa = Map(location=[lat,lon],zoom_start=15)\r\n\r\n empresa = {\r\n \"location\":[lat, lon ],\r\n \"tooltip\" : \"Empresa\"\r\n }\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-dot-circle-o\",\r\n icon_color = \"white\"\r\n )\r\n Marker(**empresa,icon = icon ).add_to(mapa)\r\n\r\n\r\n for i, row in df.iterrows():\r\n establecimiento = {\r\n \"location\":[row[\"latitud\"], row[\"logitud\"]],\r\n \"tooltip\" : row[\"tipo\"].replace(\"_\",\" \").capitalize()\r\n }\r\n\r\n if row[\"tipo\"] == \"starbucks\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"fa-coffee\",\r\n icon_color = \"white\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"restaurante_vegano\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"leaf\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"colegio\":\r\n icon = Icon(color = \"blue\",\r\n prefix = \"fa\",\r\n icon = \"fa-graduation-cap \",\r\n icon_color = \"black\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"peluqueria_perros\":\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-paw\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"estadio_de_baloncesto\":\r\n icon = Icon(color = \"orange\",\r\n prefix = \"fa\",\r\n icon = \"fa-futbol-o \",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"aeropuerto\":\r\n icon = Icon(color = \"white\",\r\n prefix = \"fa\",\r\n icon = \"fa-plane\",\r\n icon_color = \"black\"\r\n )\r\n elif row[\"tipo\"] == \"bar\":\r\n icon = Icon(color = \"pink\",\r\n prefix = \"fa\",\r\n icon = \"fa-glass\",\r\n icon_color = \"white\"\r\n )\r\n \r\n else:\r\n prefix = \"fa\",\r\n icon = \"briefcase\",\r\n icon_color = \"black\" \r\n Marker(**establecimiento,icon = icon ).add_to(mapa)\r\n return mapa", "def mapper_get_words(self, _, record):\n aux1 = record.split(\"\\t\")\n aux2 = Utils.tokenize_words(aux1[2]) # Since we want to work with the words in the primaryTitle, we have to clean the data first\n for word in aux2:\n yield (aux1[1]+\"+\"+word[0].lower(), 1)", "def gen_dataobjs():\n for mch in pb_data.finditer(b_str):\n yield DataObjStr(**mch.groupdict())", "def creer_labyrinthe_depuis_chaine(self, chaine):\n labyLoad = {}\n y = 0\n x = 0\n for obj in chaine:\n if obj == \"\\n\":\n labyLoad[x, y] = obj\n y += 1\n x = 0\n else:\n labyLoad[x, y] = obj\n x += 1\n return labyLoad" ]
[ "0.6102629", "0.5565367", "0.5481452", "0.53654796", "0.5168428", "0.5052943", "0.5018136", "0.4998325", "0.49851122", "0.4938655", "0.49217921", "0.4897635", "0.4893655", "0.48854673", "0.48832467", "0.48750573", "0.48663384", "0.4831402", "0.48311475", "0.47943395", "0.47939312", "0.47884175", "0.4778347", "0.4775585", "0.47488266", "0.47356883", "0.47191578", "0.47053656", "0.47032323", "0.4681907" ]
0.71529096
0
Este metodo guardara en 1 documento los dicc kasino maquina y kasino decoracion y los ints dia y dinero ,en esta ocasion se escribiran todos juntos ya que es facil recuperarlos
def guardar_otras (self,maquinas,decoracion,dia,dinero):#dicc,dicc,int,int stream_guardar = open("yo_otros.txt","wt",encoding="utf-8") for i in maquinas: stream_guardar.write(str(maquinas[i])) for i in decoracion: stream_guardar.write(str(decoracion[i])) stream_guardar.write("D"+str(dia)) stream_guardar.write("M"+str(dinero))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def seleccion(datos,multifasta,querys):\n\n #Hacemos una lista con los nombres de las querys que están en el archivo\n nombres_query=[]\n with open (querys,mode=\"r\") as f:\n for linea in f:\n if linea[0]==\">\":\n nombres_query.append(linea[1:len(linea)-1])\n f.close()\n\n #Obtenemos los nombres de las query y de los subject con los que ha hecho hit\n nombres2=datos[\"Nombre_subject\"]\n nombres1=datos[\"Nombre_query\"]\n nombres1=list(nombres1[1:])\n nombres2=list(nombres2[1:])\n \n seleccion={}#diccionario querys:hits blast\n #Parseamos las listas para obtener el nombre de la query como clave\n #y como valor una lista con los subjects con los que ha hecho hit\n for i in range(len(nombres1)): \n for x in range(len(nombres_query)):\n if nombres_query[x]==nombres1[i]:\n clave=nombres_query[x]\n valor=nombres2[i]\n if clave in seleccion:\n seleccion[clave].append(valor)\n else:\n seleccion[clave]=[valor]\n #Elimino valores duplicados en los valores\n for k, v in seleccion.items():\n nuevo=[]\n for item in v:\n if item not in nuevo:\n nuevo.append(item)\n seleccion[k] = nuevo\n\n #Contador para determinar si se encuentra en una linea con el nombre (>) o con la secuencia\n n=0\n #Contador para recorrer la lista con los nombres de las querys\n cuenta=0\n #Lista con los nombres de los archivos generados\n lista_nombres=[]\n for opciones in seleccion.items():\n abre_query=open(querys,\"r\")#Abrimos el archivo de las querys\n keys=seleccion.keys()#Generamos una lista con las keys del diccionario, que son las querys\n modifica=[]\n modifica1=[]\n modifica2=[]\n modifica3=[]\n\n nombre_archivo=opciones[0]\n with open (multifasta,mode=\"r\") as f:\n with open(nombre_archivo,\"w+\") as archivo: #El nombre de cada archivo será el nombre de su query\n #Forma una lista con todos los hits de blast\n modifica2=opciones[1]\n \n # Forma una lista con el nombre de cada una de las querys\n for x in abre_query: \n if x[0]==\">\":\n modifica1.append(x[1:len(x)-1])\n \n #En caso de que los hits que encuentra en blast no sean las query, las elimina\n eliminar=[item for item in modifica1 if item not in modifica2]\n for r in eliminar:\n modifica1.remove(r)\n \n #Nos quedamos solamente con los hits que encontró en blast, quitando las querys\n modifica3 = [item for item in modifica2 if item not in modifica1]\n modifica3.sort()\n \n #genera la lista con todos los hits, incluidas las query\n if len(modifica1)<=len(keys):\n modifica=modifica1+modifica3\n\n #Forma un archivo por cada query introducida, con los nombres y secuencias\n #que se obtuvieron en el blast\n for linea in f:\n if cuenta==(len(modifica)):\n break\n if linea[1:(len(linea)-1)]==modifica[cuenta]:\n archivo.write(linea)\n n+=1\n elif n==1 and linea[0]!=\">\":\n archivo.write(linea)\n cuenta+=1\n n=0\n else:\n n=0\n lista_nombres=lista_nombres+[nombre_archivo] \n archivo.close()\n n=0\n cuenta=0\n f.close()\n \n \n \n\n \n return lista_nombres", "def getCambiosQafectanCaja(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n\tif usuarioColaborador == \"\" and fechaInicio == \"\" and fechaFin == \"\":\n\t return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\"\"\")\n elif usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\"\"\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\n and c.usuario_Colaborador = '%s'\"\"\" %(fechaInicio,fechaFin,usuarioColaborador))", "def f_precios_masivos(p0_fini, p1_ffin, p2_gran, p3_inst, p4_oatk, p5_ginc):\n\n def f_datetime_range_fx(p0_start, p1_end, p2_inc, p3_delta):\n \"\"\"\n Parameters\n ----------\n p0_start\n p1_end\n p2_inc\n p3_delta\n Returns\n -------\n ls_resultado\n Debugging\n ---------\n \"\"\"\n\n ls_result = []\n nxt = p0_start\n\n while nxt <= p1_end:\n ls_result.append(nxt)\n if p3_delta == 'minutes':\n nxt += timedelta(minutes=p2_inc)\n elif p3_delta == 'hours':\n nxt += timedelta(hours=p2_inc)\n elif p3_delta == 'days':\n nxt += timedelta(days=p2_inc)\n\n return ls_result\n\n # inicializar api de OANDA\n\n api = API(access_token=p4_oatk)\n\n gn = {'S30': 30, 'S10': 10, 'S5': 5, 'M1': 60, 'M5': 60 * 5, 'M15': 60 * 15,\n 'M30': 60 * 30, 'H1': 60 * 60, 'H4': 60 * 60 * 4, 'H8': 60 * 60 * 8,\n 'D': 60 * 60 * 24, 'W': 60 * 60 * 24 * 7, 'M': 60 * 60 * 24 * 7 * 4}\n\n # -- para el caso donde con 1 peticion se cubran las 2 fechas\n if int((p1_ffin - p0_fini).total_seconds() / gn[p2_gran]) < 4999:\n\n # Fecha inicial y fecha final\n f1 = p0_fini.strftime('%Y-%m-%dT%H:%M:%S')\n f2 = p1_ffin.strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n # print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n r_df_final = pd.DataFrame(lista)\n r_df_final = r_df_final[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n r_df_final['TimeStamp'] = pd.to_datetime(r_df_final['TimeStamp'])\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final\n\n # -- para el caso donde se construyen fechas secuenciales\n else:\n\n # hacer series de fechas e iteraciones para pedir todos los precios\n fechas = f_datetime_range_fx(p0_start=p0_fini, p1_end=p1_ffin, p2_inc=p5_ginc,\n p3_delta='minutes')\n\n # Lista para ir guardando los data frames\n lista_df = list()\n\n for n_fecha in range(0, len(fechas) - 1):\n\n # Fecha inicial y fecha final\n f1 = fechas[n_fecha].strftime('%Y-%m-%dT%H:%M:%S')\n f2 = fechas[n_fecha + 1].strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n pd_hist = pd.DataFrame(lista)\n pd_hist = pd_hist[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n pd_hist['TimeStamp'] = pd.to_datetime(pd_hist['TimeStamp'])\n\n # Ir guardando resultados en una lista\n lista_df.append(pd_hist)\n\n # Concatenar todas las listas\n r_df_final = pd.concat([lista_df[i] for i in range(0, len(lista_df))])\n\n # resetear index en dataframe resultante porque guarda los indices del dataframe pasado\n r_df_final = r_df_final.reset_index(drop=True)\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final", "def collecte_docs(self, chercheur, overwrite=False): # self,\n init = overwrite # If True, data persistence is lost when references are updated\n docs = hal.find_publications(chercheur[\"halId_s\"], \"authIdHal_s\")\n\n progress_recorder = ProgressRecorder(self)\n progress_recorder.set_progress(0, len(docs), description=\"récupération des données HAL\")\n # Insert documents collection\n for num, doc in enumerate(docs):\n doc[\"country_colaboration\"] = location_docs.generate_countrys_fields(doc)\n doc = doi_enrichissement.docs_enrichissement_doi(doc)\n if \"fr_abstract_s\" in doc.keys():\n if isinstance(doc[\"fr_abstract_s\"], list):\n doc[\"fr_abstract_s\"] = \"/n\".join(doc[\"fr_abstract_s\"])\n if len(doc[\"fr_abstract_s\"]) > 100:\n doc[\"fr_entites\"] = keyword_enrichissement.return_entities(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n doc[\"fr_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n if \"en_abstract_s\" in doc.keys():\n if isinstance(doc[\"en_abstract_s\"], list):\n doc[\"en_abstract_s\"] = \"/n\".join(doc[\"en_abstract_s\"])\n if len(doc[\"en_abstract_s\"]) > 100:\n doc[\"en_entites\"] = keyword_enrichissement.return_entities(\n doc[\"en_abstract_s\"], \"en\"\n )\n doc[\"en_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"en_abstract_s\"], \"en\"\n )\n\n doc[\"_id\"] = doc[\"docid\"]\n doc[\"validated\"] = True\n\n doc[\"harvested_from\"] = \"researcher\"\n\n doc[\"harvested_from_ids\"] = []\n doc[\"harvested_from_label\"] = []\n\n #\n #\n # print(doc[\"authorship\"], doc ['authLastName_s'])\n\n if len(doc[\"authIdHal_s\"]) != len(doc[\"authLastName_s\"]):\n # print (\"elastichal.py : test d'autorat no good\")\n # test sur le nom complet...\n nom = [\n truc\n for truc in doc[\"authLastName_s\"]\n if chercheur[\"lastName\"].lower() in truc.lower()\n ] # pour les récemment mariés qui auraient un nom composé...\n # Après si 'lun des co-auteur porte le même nom...\n if len(nom) > 0:\n nom = nom[0].title()\n try:\n if doc[\"authLastName_s\"].index(nom) == 0: # premier\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authLastName_s\"].index(nom) == len(doc[\"authLastName_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n except ValueError:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n elif chercheur[\"halId_s\"] in doc[\"authIdHal_s\"]:\n if doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == 0:\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == len(doc[\"authIdHal_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n else:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n\n doc[\"harvested_from_ids\"].append(chercheur[\"halId_s\"])\n\n # historique d'appartenance du docId\n # pour attribuer les bons docs aux chercheurs\n # harvet_history.append({'docid': doc['docid'], 'from': row['halId_s']})\n #\n # for h in harvet_history:\n # if h['docid'] == doc['docid']:\n # if h['from'] not in doc[\"harvested_from_ids\"]:\n # doc[\"harvested_from_ids\"].append(h['from'])\n\n doc[\"records\"] = []\n\n doc[\"MDS\"] = utils.calculate_mds(doc)\n\n try:\n should_be_open = utils.should_be_open(doc)\n if should_be_open == 1:\n doc[\"should_be_open\"] = True\n if should_be_open == -1:\n doc[\"should_be_open\"] = False\n\n if should_be_open == 1 or should_be_open == 2:\n doc[\"isOaExtra\"] = True\n elif should_be_open == -1:\n doc[\"isOaExtra\"] = False\n except IndexError:\n print(\"publicationDate_tdate error ?\")\n doc[\"Created\"] = datetime.datetime.now().isoformat()\n\n if not init: # récupération de l'existant pour ne pas écraser\n field = \"_id\"\n doc_param = esActions.scope_p(field, doc[\"_id\"])\n\n if not es.indices.exists(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\"\n ): # -researchers\" + row[\"ldapId\"] + \"-documents\n print(\"exception \", chercheur[\"labHalId\"], chercheur[\"ldapId\"])\n\n res = es.search(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n body=doc_param,\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\n\n if len(res[\"hits\"][\"hits\"]) > 0:\n doc[\"validated\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"validated\"]\n if \"authorship\" in res[\"hits\"][\"hits\"][0][\"_source\"]:\n doc[\"authorship\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"authorship\"]\n\n if (\n res[\"hits\"][\"hits\"][0][\"_source\"][\"modifiedDate_tdate\"]\n != doc[\"modifiedDate_tdate\"]\n ):\n doc[\"records\"].append(\n {\n \"beforeModifiedDate_tdate\": doc[\"modifiedDate_tdate\"],\n \"MDS\": res[\"hits\"][\"hits\"][0][\"_source\"][\"MDS\"],\n }\n )\n\n else:\n doc[\"validated\"] = True\n progress_recorder.set_progress(num, len(docs), description=\"(récolte)\")\n progress_recorder.set_progress(num, len(docs), description=\"(indexation)\")\n helpers.bulk(\n es,\n docs,\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n refresh=\"wait_for\",\n )\n\n return chercheur # au cas où", "def _obtener_autos_cercanos(self, x, y):\n\n\t\t\"\"\"Convierte a metros\"\"\"\n\n\t\tx = vincenty((0,x), origen).meters\n\t\ty = vincenty((y,0), origen).meters\n\t\t\n\t\tconductores = mongo.db.conductores\n\t\tquery = \"if(this.posicion){if((Math.pow(this.posicion.lng-\"+str(x)+\",2)+Math.pow(this.posicion.lat-\"+str(y)+\",2)) <= \"+str(self.distanciaMaxima)+\") return this}\"\n\t\treturn conductores.find({\"estado\": \"libre\", \"$where\": query})", "def __busca_notas(self, tipo_busca, intervalo_inicial, intervalo_final, serie, \r\n\t\t chave_acesso):\r\n info_consulta = \"11\" # padrao 1\r\n\tresposta = \" \"*231 # padrao 230\r\n\t#resposta = None \r\n\r\n status = self.dll.rRetornarInformacao_NFCe_Daruma(tipo_busca, \r\n intervalo_inicial, intervalo_final, serie, chave_acesso, \r\n\t info_consulta, resposta) \r\n\tif status !=1:\r\n\t if status == -1:\r\n\t\traise Exception(\"-1: Erro encontrado na execucao do metodo\")\r\n elif status == -2:\r\n\t\traise Exception(\"-2: Chave Invalida\")\r\n\t elif status == -3:\r\n\t\traise Exception(\"-3: Falha no schema XML.\")\r\n\t elif status == -4:\r\n\t\traise Exception(\"-4: XML fora do padrao\")\r\n\t elif status == -5:\r\n\t\traise Exception(\"-5: Erro generico\")\r\n\t elif status == -8:\r\n\t\traise Exception(\"-8: Usuario nao Autorizado\")\r\n elif status == -9:\r\n\t\traise Exception(\"-9: Usuario nao Licenciado\")\r\n\t elif status == -10:\r\n\t\traise Exception(\"-10: Documento e Ambiente nao identificados\")\r\n\t elif status == -13:\r\n\t\traise Exception(\"-13: Tipo de Documento nao identificado\")\r\n elif status == -14:\r\n\t\traise Exception(\"-14: Erro retornado pelo WebService.\")\r\n elif status == -52:\r\n\t\traise Exception(\"-52: Erro ao gravar em arquivo temporario\")\r\n elif status == -99:\r\n\t\traise Exception(\"-99: Parametros invalidos ou ponteiro nulo de pametros\")\r\n elif status == -99:\r\n\t\traise Exception(\"-103: Nao foram encontradas as DLLs auxiliaes\")\r\n\t else:\r\n\t\traise Exception(\"Erro ao executar o metodo Retornar Informacao.\")", "def resultat(self, concordance_mf, concordance_pf, liste_F, liste_M, liste_P):\n resultat = {\"Marqueur\": [], \"Conclusion\": [], \"Concordance Mere/Foetus\": [], \"Détails M/F\": [],\n \"Concordance Pere/Foetus\": [], \"Détails P/F\": []}\n marqueurs_conta = 0\n marqueurs_non_conta = 0\n somme_conta = 0\n if liste_F[0].allele[1] == 0.0:\n self.set_sexe(\"F\")\n else:\n self.set_sexe(\"M\")\n if concordance_mf != 16 and concordance_pf != 16 and concordance_pf != None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Conclusion\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\" and liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M : \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\n \"P : \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n elif liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\"\")\n elif liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails M/F\"].append(\"\")\n else:\n resultat[\"Détails M/F\"].append(\"\")\n resultat[\"Détails P/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\",\n \"Concordance Pere/Foetus\", \"Détails P/F\"])\n return resultats, conclusion\n elif concordance_mf != len(liste_F) and concordance_pf == len(liste_F) or concordance_mf != len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Conclusion\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n else:\n resultat[\"Détails M/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf == len(liste_F) or concordance_mf == len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Concordance Mere/Foetus\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf != len(liste_F):\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Concordance Mere/Foetus\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)))\n else:\n resultat[\"Détails P/F\"].append(\"\")\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat,\n columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\", \"Concordance Pere/Foetus\",\n \"Détails P/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion", "def affichage_creation_tournoi():\n nom = \"\"\n lieu = \"\"\n date = \"\"\n nb_tours = 4\n joueurs = []\n temps = \"\"\n note = \"\"\n\n print(\"\\n---------------------------\")\n while len(nom) == 0:\n try:\n nom = str(input(\"\\nNom : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nom valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(lieu) == 0:\n try:\n lieu = str(input(\"\\nLieu : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un lieu valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(date) == 0:\n try:\n date = str(input(\"\\nDate\\nFormat : jj/mm/aaaa : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une date valide.\")\n sl(2)\n continue\n test_date = OutilsControleurs.test_date(date)\n if test_date == 0:\n print(\"\\nVous avez saisi une valeur trop grande.\")\n date = \"\"\n if test_date == 1:\n print(\"\\nVous avez saisi une valeur trop petite.\")\n date = \"\"\n if test_date == 2:\n break\n if test_date == 3:\n print(\"\\nVous avez saisi un format de date incorrect.\")\n date = \"\"\n\n print(\"\\n---------------------------\")\n nb_tours_modif = \"\"\n while nb_tours_modif != 2 or nb_tours_modif != 1:\n try:\n print(\"\\nNombre de tours\\nPar default le nombre est de 4\\nVoulez-vous modifier cette valeur ?\")\n nb_tours_modif = int(input(\"\\n1 - Oui\\n2 - Non\\n\\nVotre choix: \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours_modif == 1:\n while nb_tours == 4:\n try:\n nb_tours = int(input(\"\\nNombre de tours : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours == 4:\n break\n break\n if nb_tours_modif == 2:\n break\n\n print(\"\\n---------------------------\\n\\nListe des joueurs :\\n\")\n liste_joueurs_tournois = Joueur.joueurs_tournoi()\n if liste_joueurs_tournois == 0:\n print(\"Il n'y a pas ou pas suffisament de joueurs pour organiser un tounois.\")\n print(\"Veuillez ajouter des joueurs via le menu.\")\n input(\"\\nAppuyer sur entrer pour continuer\")\n return\n\n for arg in liste_joueurs_tournois:\n print(arg)\n x = 8\n while x != 0:\n try:\n joueur = int(input(\"Saisir encore {} indice de joueurs : \".format(x)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un indice valide.\")\n sl(2)\n continue\n if joueur > 0 and joueur <= len(liste_joueurs_tournois):\n if joueur not in joueurs:\n joueurs.append(joueur)\n else:\n print(\"Vous avez deja saisi ce joueur.\")\n x += 1\n else:\n x += 1\n x -= 1\n\n y = 1\n nom_joueurs = []\n for arg in liste_joueurs_tournois:\n arg = arg[:-15]\n nom_joueurs.append(str(arg).replace(\"Indice joueur : {}\\n \".format(y), \"\").replace(\"\\n \", \"\"))\n y += 1\n joueurs = Joueur.get_joueurs_tournoi(joueurs, nom_joueurs)\n\n print(\"\\n---------------------------\")\n temps_choix = 0\n while temps_choix != 1 or temps_choix != 2 or temps_choix != 3:\n try:\n temps_choix = int(input(\"\\nContrôle de temps\\n1 - Bullet\\\n \\n2 - Blitz\\n3 - Coup rapide\\n\\nVotre choix : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if temps_choix == 1:\n temps = \"Bullet\"\n break\n if temps_choix == 2:\n temps = \"Blitz\"\n break\n if temps_choix == 3:\n temps = \"Coup rapide\"\n break\n\n print(\"\\n---------------------------\")\n while len(note) == 0:\n try:\n note = str(input(\"\\nDescription : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if len(note) == 0:\n break\n return nom, lieu, date, nb_tours, joueurs, temps, note", "def generarConsultasConexion(self):\n for parRecursos in self.CombiConsultaLibre:\n parRecursosL0=self.limpiaRecursos(parRecursos[0])\n parRecursosL1=self.limpiaRecursos(parRecursos[1])\n \n if self.nivel_profundidad>=1:\n consultasparql = self.busConex1 % (parRecursosL0,parRecursosL1,self.limit_BC)\n print consultasparql;\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n triple = parRecursos[0]+\"-|\"+parRecursos[1]+\"-|\"+resul['p1']['value']\n self.ResultConsultasConexion.append(triple) \n \n if self.nivel_profundidad>=2:\n consultasparql = self.busConex2 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o1+\"*-|\"+resul['p2']['value']\n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n \n if self.nivel_profundidad>=3:\n consultasparql = self.busConex3_1 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n o2=resul['o2']['value']\n o2=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o2+\"*-|\"+resul['p2']['value']\n triple3 = o1+\"*-|\"+o2+\"*-|\"+resul['p3']['value'] \n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n self.ResultConsultasConexion.append(triple3) \n\n consultasparql = self.busConex3_2 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n o2=resul['o2']['value']\n o2=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o2+\"*-|\"+resul['p2']['value']\n triple3 = o2+\"*-|\"+o1+\"*-|\"+resul['p3']['value'] \n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n self.ResultConsultasConexion.append(triple3)", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def get_only_wanted(self, datas_percorrer, index_data_1):\n \n # Dicionario de datas guardadas, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas = OrderedDict()\n \n # Sub_dicionario para datas/ano, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas_ano = OrderedDict()\n\n # Sub_dicionario para datas/mes, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas_mes = OrderedDict()\n \n # Lista com o nome dos meses\n meses_ano = [\n 'Janeiro',\n 'Fevereiro',\n 'Março',\n 'Abril',\n 'Maio',\n 'Junho',\n 'Julho',\n 'Agosto',\n 'Setembro',\n 'Outubro',\n 'Novembro',\n 'Dezembro'\n ]\n\n # Lista com o nome dos dias da semana\n dias_semana = [\n 'Domingo',\n 'Segunda-Feira',\n 'Terça-Feira',\n 'Quarta-Feira',\n 'Quinta-Feira',\n 'Sexta-Feira',\n 'Sábado',\n ]\n \n # lista de filtro dos dias desejados\n dias_desejados = [ 0, 2, 6]\n \n # lista contendo o numero do primeiro dia do mes\n # em relação ao numero total de dias no ano\n primeiro_dia_mes = [\n #Primeiro dia de cada mês\n 0, 31, 59, 90,\n 120, 151, 181, 212,\n 243, 273, 304, 334\n ]\n \n # lista de meses com 30 dias \n meses_trinta = [ 4, 6, 9, 11 ]\n \n # Esta variavel trará 31 dias para os não\n # estiverem a lista 'meses_trinta'\n numero_dias_mes = 31\n \n # Numero do dia atual\n numero_dia_ano = primeiro_dia_mes[self.mes -1] + self.dia\n \n # Cria variaveis para trabalhar com dia, mes, ano\n # e index para a lista 'dias_semana'\n dia_atual = self.dia\n mes_atual = self.mes\n ano_atual = self.ano\n sendo_dia = index_data_1\n # Variável para ano bissexto\n se_bissexto = False\n # Verifica se ano é bissexto\n if (ano_atual %4 == 0 and ano_atual %100 != 0):\n se_bissexto = True\n elif ano_atual %400 == 0:\n se_bissexto = True\n else:\n se_bissexto = False\n\n # Nome mes atual\n nome_mes_atual = ''\n \n # Inicia loop para filtrar dias\n for dia_passado in range(0, datas_percorrer + 1):\n\n #Da nome ao mes\n nome_mes_atual = meses_ano[mes_atual - 1]\n \n # Verifica se mes atual esta na lista meses_trinta\n # se true, o mes tem 30 dias\n if mes_atual in meses_trinta:\n numero_dias_mes = 30\n # Se o mes atual é = 2 (fevereiro), o mes possui 28 dias\n elif mes_atual == 2:\n numero_dias_mes = 28\n # Porem se for bissexto, o mes tem 29 dias.\n if se_bissexto == True:\n numero_dias_mes = 29\n else:\n numero_dias_mes = 31\n \n # Verifica se a data passa no filtro 'dias desejados'\n if sendo_dia in dias_desejados:\n # Concatena chave\n chave_dia_mes = str(dia_atual)\n #chave_dia_mes += '/' + str(mes_atual)\n # Concatena valor\n valor_semana = dias_semana[sendo_dia]\n # Guarda as datas no dicionario mes\n datas_guardadas_mes[chave_dia_mes] = valor_semana\n\n # Adiciona uma unidade no numero_do_dia\n # na data atual e no index do dia\n numero_dia_ano += 1\n dia_atual += 1\n sendo_dia += 1\n # Cria ou adiciona o dicionario mes no dicionario de ano\n datas_guardadas_ano[nome_mes_atual] = datas_guardadas_mes\n # Cria ou adiciona o dicionario ano no dicionario geral\n datas_guardadas[ano_atual] = datas_guardadas_ano\n \n # Se o index após a adição for > 6, retorna 0\n if sendo_dia > 6:\n sendo_dia = 0\n \n # Se o dia atual for maior que o numero total\n # de dias do mes, retorna dia primeiro do mes seguinte\n if dia_atual > numero_dias_mes:\n dia_atual = 1\n mes_atual += 1\n datas_guardadas_mes = OrderedDict()\n # Se o mes > 12, retorna janeiro, primeiro do ano seguinte\n if mes_atual > 12:\n mes_atual = 1\n numero_dia_ano = 1\n ano_atual += 1\n datas_guardadas_ano = OrderedDict()\n # Verifica se ano seguinte é bissexto\n if (ano_atual %4 == 0 and ano_atual %100 != 0):\n se_bissexto = True\n elif ano_atual %400 == 0:\n se_bissexto = True\n else:\n se_bissexto = False\n \n return(datas_guardadas)", "def fama (self , diccionario):\n\n decoracion_list = []\n for key , value in diccionario.items():\n a=[]\n a.append(key)\n a.append(value)\n decoracion_list.append (a)\n\n paredes_list = decoracion_list [0:3]\n suelo_list = decoracion_list [3:6]\n reforma_list = decoracion_list [6:]\n\n paredes = 1\n suelo = 1\n reforma = 1\n\n for i in range (len(paredes_list)):\n if paredes_list [i][1] == 1 :\n paredes = i+2 \n\n for i in range (len(suelo_list)):\n if suelo_list [i][1] == 1 :\n suelo = i+2\n\n for i in range (len(reforma_list)):\n if reforma_list [i][1] == 1 :\n reforma = i+2\n\n modificador_fama = 0\n\n if paredes >= 4 and suelo >= 4 and reforma >= 4 :\n modificador_fama = 45\n\n elif paredes >= 3 and suelo >= 3 and reforma >= 3 :\n modificador_fama = 33 \n\n elif paredes >= 2 and suelo >= 2 and reforma >= 2 :\n modificador_fama = 12\n\n fama = (10*paredes)+(10*suelo)+(10*reforma) + modificador_fama + kasino.modificador_fama\n\n \"\"\" FORMULA FAMA : Con esta formula se calcula la fama, que dependera de la decoracion e influira en los visitantes \n Se puede usar modificador_fama para calibrar el juego o añadir niveles de dificulad \"\"\"\n \n return fama , paredes , suelo , reforma", "def busca_por_data(self, data_inicial, data_final):\r\n # data_inicial/data_final formato 'DDMMYYYY'\r\n\tserie = \"\" \r\n\tchave_acesso = \"\" \r\n self.__busca_notas(\"DATA\", data_inicial, data_final, serie, chave_acesso)", "def mdnf():\r\n i2 = question_amount_4.get()\r\n check = (only_int(i2))\r\n if not check:\r\n num_check(820, 300, 4)\r\n else:\r\n for it in range(5):\r\n variat(it)\r\n name = 'МДНФ:'\r\n input_file_docx(name, name)\r\n for p in range(int(question_amount_4.get())):\r\n df = pd.DataFrame(np.array([[0, 0, 0],\r\n [0, 0, 1],\r\n [0, 1, 0],\r\n [0, 1, 1],\r\n [1, 0, 0],\r\n [1, 0, 1],\r\n [1, 1, 0],\r\n [1, 1, 1]]), columns=['x', 'y', 'z'])\r\n iNomer = random.randint(1, 10)\r\n doc_O = docx.Document('mdnf.docx')\r\n doc_t = docx.Document('f_meaning.docx')\r\n strW = doc_t.paragraphs[iNomer - 1].text\r\n a = strW.split(',')\r\n df[\"F(x, y, z)\"] = a\r\n task = f'Построить МДНФ по таблице истинности \\n{df}'\r\n strW_Otvet = doc_O.paragraphs[iNomer - 1].text\r\n answer = task + f' \\n Oтвет: \\n {strW_Otvet}'\r\n input_file_docx(task, answer)\r\n lbvi = Label(window1, font=(\"Arial Bold\", 14), text=\"Выполнено \")\r\n lbvi.place(x=800, y=300)", "def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()", "def NuevaPartida(self,):\n\t\"\"\" Numeros Disponibles \"\"\"\n\tDisponibles[0] = True\n\tDisponibles[1] = True\n\tDisponibles[2] = True\n\tDisponibles[3] = True\n\tDisponibles[4] = True\n\tDisponibles[5] = True\n\t\"\"\" Jugador Uno \"\"\"\n\tJ1[0] = 0\n\tJ1[1] = 0\n\tJ1[2] = 0\n\tJ1[3] = 0\n\tJ1[4] = 0\n\tJ1[5] = 0\n\t\"\"\" Jugador Dos \"\"\"\n\tJ2[0] = 0\n\tJ2[1] = 0\n\tJ2[2] = 0\n\tJ2[3] = 0\n\tJ2[4] = 0\n\tJ2[5] = 0\n\t\"\"\" Jugador Tres \"\"\"\n\tJ3[0] = 0\n\tJ3[1] = 0\n\tJ3[2] = 0\n\tJ3[3] = 0\n\tJ3[4] = 0\n\tJ3[5] = 0\n\t\"\"\" Jugador Cuatro \"\"\"\n\tJ4[0] = 0\n\tJ4[1] = 0\n\tJ4[2] = 0\n\tJ4[3] = 0\n\tJ4[4] = 0\n\tJ4[5] = 0", "def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log", "def getStatVentesMois(self, in_data):\n\n try:\n date_debut = in_data['date_debut']\n dt_debut = dateutil.parser.parse(date_debut)\n date_fin = in_data['date_fin']\n dt_fin = dateutil.parser.parse(date_fin)\n except:\n out_data = {\n 'success': False\n }\n return out_data\n\n local_dt_debut = dt_debut.astimezone (pytz.timezone('Europe/Paris'))\n debut = datetime(local_dt_debut.year, local_dt_debut.month, local_dt_debut.day)\n local_dt_fin = dt_fin.astimezone (pytz.timezone('Europe/Paris'))\n fin = datetime(local_dt_fin.year, local_dt_fin.month, local_dt_fin.day) + timedelta(days=1)\n\n commandes=[]\n ventes=[]\n day = 0\n stop = False\n ca = 0\n nb_commandes = 0\n nb_souscriptions = 0\n while not stop :\n time_debut = debut + timedelta(days=day)\n timestamp = calendar.timegm(time_debut.timetuple()) * 1000\n time_fin = time_debut + timedelta(days=1)\n c_list = Commande.objects.filter(etat='PAY',date__gte=time_debut,date__lt=time_fin).distinct()\n # ch_list = CommandeHistory.objects.filter(etat='PAY',date__gte=time_debut, date__lt=time_fin)\n total_euros = 0\n total_souscriptions = 0\n total_commandes = 0\n\n for commande in c_list:\n total_euros += commande.montant\n for souscription in commande.souscription_set.all():\n total_souscriptions += souscription.quantite\n total_commandes += 1\n\n ca+=total_euros\n nb_souscriptions+=total_souscriptions\n nb_commandes+=total_commandes\n commandes.append([timestamp,total_commandes])\n ventes.append([timestamp,total_euros])\n day += 1\n if (debut + timedelta(days=day))>=fin:\n stop=True\n\n serie_list = [\n {\n 'label': \"commandes\",\n 'data': commandes,\n 'yaxis': 1\n },\n {\n 'label': \"€\",\n 'data': ventes,\n 'yaxis': 2\n }\n ]\n\n options = {\n \"series\": {\n \"lines\": {\n \"show\": True,\n \"fill\": True\n },\n \"points\": { \"show\": True }\n },\n 'axisLabels': {\n 'show': True\n },\n \"xaxis\": {\n \"mode\": \"time\",\n \"timeformat\": \"%e %b\",\n \"monthNames\": [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"juin\", \"juil\", \"aout\", \"sept\", \"oct\", \"nov\", \"dec\"]\n },\n \"yaxes\": [\n {\n 'axisLabel': 'commandes',\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n },\n {\n 'axisLabel': \"CA\",\n \"position\": \"right\",\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n }\n ],\n \"grid\": {\n \"hoverable\": True,\n \"borderWidth\": 1\n },\n \"colors\": [\"rgb(138,75,117)\", \"rgb(71,160,62)\"],\n \"tooltip\":True,\n \"tooltipOpts\": {\n \"content\": \"%x : %y %s\"\n },\n \"legend\": {\n \"show\": True,\n \"labelFormatter\": None, # null or (fn: string, series object -> string)\n #\"labelBoxBorderColor\": color,\n #noColumns: number\n #'position': \"ne\" or \"nw\" or \"se\" or \"sw\"\n #margin: number of pixels or [x margin, y margin]\n #backgroundColor: null or color\n #backgroundOpacity: number between 0 and 1\n #container: null or jQuery object/DOM element/jQuery expression\n #sorted: null/false, true, \"ascending\", \"descending\", \"reverse\", or a comparator\n }\n };\n\n\n out_data = {\n 'success': True,\n 'souscriptions': serie_list,\n 'options': options,\n 'ca':ca,\n 'nb_commandes':nb_commandes,\n 'nb_souscriptions':nb_souscriptions\n }\n return out_data", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def addDadosPessoais(self, documento, nome, sexo, dataNascId, enderecoId, celular = None, fixo = None):\r\n if sexo == 1:\r\n sexo = 'TRUE'\r\n else:\r\n sexo = 'FALSE'\r\n try:\r\n self.cursor.execute(\"INSERT INTO DADOS_PESSOAIS(DOCUMENTO, NOME, SEXO, CELULAR, FIXO, DATA_NASC, ENDERECO) VALUES ('%s', '%s', %s, NULL, NULL, %s, %s);\" %(documento, nome, sexo, dataNascId, enderecoId))\r\n if celular is not None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET CELULAR = '%s' WHERE DOCUMENTO = '%s';\" %(celular, documento))\r\n if fixo is not None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET FIXO = '%s' WHERE DOCUMENTO = '%s';\" %(fixo, documento))\r\n return True\r\n except:\r\n return False", "def run(self):\n self.db.table('materia').insert([\n {\n 'nombre': 'Cálculo Diferencial e Integral',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'calculo-diferencial-e-integral',\n 'numeroUrl': 1891,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Estructuras de Datos',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'estructuras-de-datos',\n 'numeroUrl': 241,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Inglés Técnico 1',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'ingles-tecnico-1',\n 'numeroUrl': 665,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Teoría de la Computación 1',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'teoria-de-la-computacion-1',\n 'numeroUrl': 1865,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Programación Orientada a Objetos',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'programacion-orientada-a-objetos',\n 'numeroUrl': 246,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Ingeniería de Requerimientos',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'ingenieria-de-requerimientos',\n 'numeroUrl': 2006,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Métodos Computacionales para el Cálculo',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'métodos-computacionales-para-el-cálculo',\n 'numeroUrl': 2036,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Teoría de la Computación 2',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'teoría-de-la-computación-2',\n 'numeroUrl': 2013,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Arquitecturas y Organización de Computadoras 1',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'arquitecturas-y-organización-de-computadoras-1',\n 'numeroUrl': 2052,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Programación Concurrente',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'programación-concurrente',\n 'numeroUrl': 2059,\n 'esRecursable': False,\n }\n ])", "def __init__(self, periodo, reunion, sesion, tipo_sesion, fecha):\n self.periodo = periodo\n self.reunion = reunion\n self.sesion = sesion\n self.tipo_sesion = tipo_sesion\n self.fecha = fecha\n\n self.html_version_taquigrafica = None\n self.dialogo = None\n self.intervenciones = []\n self.intervenciones_por_diputado = {}", "def updatePeliculas():\n\txml_ciudades = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1\"\n\txml_peliculas = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1&ciudad=%s&byciudad=1\" #id_ciudad\n \n\tciudades = parse_ciudades( urlopen(url_movil) ) \n\t\n\t\n\tbase_url_pelicula = \"http://www.cinemex.com/cartelera/pelicula.php?vcode=%s\" #mex_vc\n\tpeliculas = {}\n \n\tpelis_obj = [] #Contiene toda la info de las peliculas, titulo, sinopsis, etc\n \n\t#Crea un diccionario con el vc y el objeto de la pelicula\n\t#De esta forma no hay peliculas repetidas\n\tfor ciudad_id in ciudades:\n\t\txml_url = xml_peliculas % ciudad_id\n\t\ttry:\n\t\t\txml = urlopen(xml_url)\n\t\texcept:\n\t\t\tlogger.debug( 'error cargando pagina %s' % xml_url)\n\t\t\tcontinue\n\t\tpelis_actual = parse_peliculas(xml)\n\t\t#Agregar las peliculas q no estan todavia\n\t\tfor peli in pelis_actual:\n\t\t\tkey = peli.get('mex_vc', '')\n\t\t\tif key not in peliculas: peliculas[key] = peli\n \n\tfor k, v in peliculas.items():\n\t\turl = base_url_pelicula % k\n\t\thtml = urlopen(url)\n\t\tpelis_obj.append(scrape_pelicula(html, v))\n \n\tfor peli in pelis_obj:\n\t\tcreatePelicula(peli)", "def comunicacion():\n global dsgraph\n global mss_cnt\n\n #Extraemos el mensaje y creamos un grafo con el\n message= request.args['content']\n gm = Graph()\n gm.parse(data=message)\n\n msgdic = get_message_properties(gm)\n\n # Comprobamos que sea un mensaje FIPA ACL\n if msgdic is None:\n # Si no es, respondemos que no hemos entendido el mensaje\n gr = build_message(Graph(), ACL['not-understood'], sender=InfoAgent.uri, msgcnt=mss_cnt)\n else:\n # Obtenemos la performativa\n perf = msgdic['performative']\n\n if perf != ACL.request:\n # Si no es un request, respondemos que no hemos entendido el mensaje\n gr = build_message(Graph(), ACL['not-understood'], sender=InfoAgent.uri, msgcnt=mss_cnt)\n else:\n #Extraemos el objeto del contenido que ha de ser una accion de la ontologia de acciones del agente\n # de registro\n\n # Averiguamos el tipo de la accion\n if 'content' in msgdic:\n content = msgdic['content']\n accion = gm.value(subject=content, predicate= RDF.type)\n\n # Aqui realizariamos lo que pide la accion\n\n #Extraiem els parametres necessaris per realitzar la busqueda\n paq = paquet[\"vacances\"]\n\n destination = gm.value(subject= paq, predicate= paquet.desti)\n departureDate = gm.value(subject= paq, predicate= paquet.dep_date)\n returnDate = gm.value(subject= paq, predicate= paquet.ret_date)\n numAdults = gm.value(subject= paq, predicate= paquet.num_adults)\n numChildren = gm.value(subject= paq, predicate= paquet.num_child)\n centric = gm.value(subject= paq, predicate = paquet.centric)\n category = gm.value(subject= paq, predicate = paquet.category)\n minStars = gm.value(subject= paq, predicate = paquet.min_stars)\n\n gh = buscar_hotels(destination, departureDate, returnDate, numAdults, numChildren, centric, category, minStars)\n\n # Por ahora simplemente retornamos un Inform-done\n gr = build_message(gh,\n ACL['inform-done'],\n sender=InfoAgent.uri,\n msgcnt=mss_cnt,\n receiver=msgdic['sender'],)\n mss_cnt += 1\n return gr.serialize(format='xml')", "def getIntervenciones():" ]
[ "0.65296155", "0.59569544", "0.5910678", "0.5854344", "0.5832947", "0.58255154", "0.5819173", "0.57754415", "0.57597715", "0.571407", "0.570511", "0.570511", "0.570511", "0.570511", "0.570511", "0.5703714", "0.5645902", "0.56388974", "0.55998516", "0.55933887", "0.55918986", "0.5575722", "0.55564696", "0.5490803", "0.5475313", "0.54649544", "0.5445691", "0.54403585", "0.54263115", "0.5415754" ]
0.649683
1
Este metodo recuperara los dicc kasino maquinas y kasino decoracion y los ints dia y dinero, para ello usamos un contador y recorremos los dicc igualando el dicc a su parte de la cadena escrita en el documento mediante el contador
def cargar_otras(self): stream_cargar = open ('yo_otros.txt', 'rt',encoding="utf-8") datos=stream_cargar.readlines() # print(datos) # print (len(kasino.maquinas)) lista_maquinas=[] lista_deco =[] day="" money="" contador=0 dia_o_dinero="dia" for i in datos[0]: # print(contador,i) if contador <8: lista_maquinas.append(i) contador+=1 elif contador <17: lista_deco.append(i) contador+=1 elif contador >= 17 and dia_o_dinero =="dia": if i =="D": pass elif i =="M": dia_o_dinero="dinero" else: day+=i elif contador >= 17 and dia_o_dinero == "dinero": money+=i # print("lm",lista_maquinas) # print ("ld",lista_deco) # print(day,money) contador=0 for i in kasino.maquinas: kasino.maquinas[i]=int(lista_maquinas[contador]) contador+=1 contador=0 for i in kasino.decoracion: kasino.decoracion[i]=int(lista_deco[contador]) contador+=1 kasino.dia=int( day) kasino.dinero=int(money)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _obtener_autos_cercanos(self, x, y):\n\n\t\t\"\"\"Convierte a metros\"\"\"\n\n\t\tx = vincenty((0,x), origen).meters\n\t\ty = vincenty((y,0), origen).meters\n\t\t\n\t\tconductores = mongo.db.conductores\n\t\tquery = \"if(this.posicion){if((Math.pow(this.posicion.lng-\"+str(x)+\",2)+Math.pow(this.posicion.lat-\"+str(y)+\",2)) <= \"+str(self.distanciaMaxima)+\") return this}\"\n\t\treturn conductores.find({\"estado\": \"libre\", \"$where\": query})", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def le_infotudo(info_file):\n \n infoarq = open(info_file, 'r')\n infodata = infoarq.read()\n infoarq.close()\n \n info_temp = infodata.split('\\n')\n \n # ... lendo data de inicio da simulacao\n info_date = info_temp[5]\n info_date = info_date.split(' ')\n \n lista = list()\n for i in range(len(info_date)):\n if info_date[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_date[j]\n \n dstart = datetime(int(info_date[2]), int(info_date[1]), int(info_date[0]), int(info_date[3]))\n \n # ... lendo nt e dt\n info_timestep = info_temp[8]\n info_timestep = info_timestep.split(' ')\n \n lista = list()\n for i in range(len(info_timestep)):\n if info_timestep[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_timestep[j]\n \n nt = info_timestep[0]\n dt = info_timestep[1]\n dt = dt.split(\".\")\n dt = dt[0]\n \n # ... lendo nc\n info_nc = info_temp[11]\n info_nc = info_nc.split(' ')\n \n lista = list()\n for i in range(len(info_nc)):\n if info_nc[i] == '':\n lista.append(i)\n \n for j in reversed(lista):\n del info_nc[j]\n \n nc = info_nc[0]\n \n return int(nc), int(nt), int(dt), dstart", "def crear_dicionarios():\r\n valor_alfanumerico = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12,\r\n 'm': 13, 'n': 14, 'ñ': 15, 'o': 16, 'p': 17, 'q': 18, 'r': 19, 's': 20, 't': 21, 'u': 22, 'v': 23, 'w': 24, 'x': 25, 'y': 26,\r\n 'z': 27, 'A': 28, 'B': 29, 'C': 30, 'D': 31, 'E': 32, 'F': 33, 'G': 34, 'H': 35, 'I': 36, 'J': 37, 'K': 38, 'L': 39, 'M': 40,\r\n 'N': 41, 'Ñ': 42, 'O': 43, 'P': 44, 'Q': 45, 'R': 46, 'S': 47, 'T': 48, 'U': 49, 'V': 50, 'W': 51, 'X': 52, 'Y': 53, 'Z': 54,\r\n 'á': 55, 'Á': 56, 'é': 57, 'É': 58, 'í': 59, 'Í': 60, 'ó': 61, 'Ó': 62, 'ú': 63, 'Ú': 64, '/': 65, '(': 66, ')': 67, '\"': 68,\r\n '=': 69, '&': 70, '%': 71, '$': 72, '#': 73, '!': 74, '¡': 75, '¿': 76, '?': 77, '*': 78, '-': 79, '+': 80, \"'\": 81, '0': 82,\r\n '1': 83, '2': 84, '3': 85, '4': 86, '5': 87, '6': 88, '7': 89, '8': 90, '9': 91, '|': 92, '°': 93, '<': 94, '>': 95, '{': 96,\r\n '}': 97, '[': 98, ']': 99, ',': 100, '.': 101, ':': 102, ';': 103, '_': 104, '^': 105, '`': 106, '~': 107, '¬': 108, ' ': 109}\r\n return valor_alfanumerico", "def conteo_numero(numero,letras=\"abcdefghijklmnñopqrstuvwxyz\"):\n dicc=conteos_mensaje(numero_a_letras(numero),letras)\n if numero==1:\n if 'z' in letras:\n dicc['z']+=1\n else:\n if 'c' in letras:\n dicc['c']+=1\n if 'e' in letras:\n dicc['e']+=1\n if 's' in letras:\n dicc['s']+=1\n return dicc", "def guardar_otras (self,maquinas,decoracion,dia,dinero):#dicc,dicc,int,int\n\n stream_guardar = open(\"yo_otros.txt\",\"wt\",encoding=\"utf-8\")\n for i in maquinas:\n stream_guardar.write(str(maquinas[i]))\n\n for i in decoracion:\n stream_guardar.write(str(decoracion[i]))\n\n stream_guardar.write(\"D\"+str(dia))\n stream_guardar.write(\"M\"+str(dinero))", "def fama (self , diccionario):\n\n decoracion_list = []\n for key , value in diccionario.items():\n a=[]\n a.append(key)\n a.append(value)\n decoracion_list.append (a)\n\n paredes_list = decoracion_list [0:3]\n suelo_list = decoracion_list [3:6]\n reforma_list = decoracion_list [6:]\n\n paredes = 1\n suelo = 1\n reforma = 1\n\n for i in range (len(paredes_list)):\n if paredes_list [i][1] == 1 :\n paredes = i+2 \n\n for i in range (len(suelo_list)):\n if suelo_list [i][1] == 1 :\n suelo = i+2\n\n for i in range (len(reforma_list)):\n if reforma_list [i][1] == 1 :\n reforma = i+2\n\n modificador_fama = 0\n\n if paredes >= 4 and suelo >= 4 and reforma >= 4 :\n modificador_fama = 45\n\n elif paredes >= 3 and suelo >= 3 and reforma >= 3 :\n modificador_fama = 33 \n\n elif paredes >= 2 and suelo >= 2 and reforma >= 2 :\n modificador_fama = 12\n\n fama = (10*paredes)+(10*suelo)+(10*reforma) + modificador_fama + kasino.modificador_fama\n\n \"\"\" FORMULA FAMA : Con esta formula se calcula la fama, que dependera de la decoracion e influira en los visitantes \n Se puede usar modificador_fama para calibrar el juego o añadir niveles de dificulad \"\"\"\n \n return fama , paredes , suelo , reforma", "def calcula(self, is_deterministico):\n # criando header da tabela\n tabela = PrettyTable([\"Rodadas\",\n \"E[T1]\",\n \"E[W1]\",\n \"E[X1]\",\n \"E[N1]\",\n \"E[Nq1]\",\n \"E[Ns1]\",\n \"E[T2]\",\n \"E[W2]\",\n \"E[X2]\",\n \"E[N2]\",\n \"E[Nq2]\",\n \"E[Ns2]\",\n \"Var[W1]\",\n \"Var[W2]\"])\n \n\n for index in range(1, self.n_rodadas+1):\n # calculando a esperanca das metricas da fila 1\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w1: \", len(self.w1[index]))\n if len(self.w1[index]) > 0:\n self.x1_med_rodada[index] = sum(self.x1[index])/len(self.w1[index])\n self.w1_med_rodada[index] = sum(self.w1[index])/len(self.w1[index])\n self.nq1_med_rodada[index] = sum(self.nq1[index])/len(self.w1[index])\n self.ns1_med_rodada[index] = sum(self.ns1[index])/len(self.w1[index])\n self.n1_med_rodada[index] = sum(self.n1[index])/len(self.w1[index])\n self.t1_med_rodada[index] = sum(self.t1[index])/len(self.w1[index])\n\n # calculando a esperanca das metricas da fila 2\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w2: \", len(self.w2[index]))\n if len(self.w2[index]) > 0:\n self.x2_med_rodada[index] = sum(self.x2[index])/len(self.w2[index])\n self.w2_med_rodada[index] = sum(self.w2[index])/len(self.w2[index])\n self.nq2_med_rodada[index] = sum(self.nq2[index])/len(self.w2[index])\n self.ns2_med_rodada[index] = sum(self.ns2[index])/len(self.w2[index])\n self.n2_med_rodada[index] = sum(self.n2[index])/len(self.w2[index])\n self.t2_med_rodada[index] = sum(self.t2[index])/len(self.w2[index])\n\n # calculo de Var[W1] e Var[W2] para exibir na tabela\n if len(self.w1[index]) == 1:\n self.var_w1_med_rodada[index] = 0\n else:\n for amostra in range(len(self.w1[index])):\n self.var_w1_med_rodada[index] += (self.w1[index][amostra] - self.w1_med_rodada[index]) ** 2\n self.var_w1_med_rodada[index] /= (len(self.w1[index]) - 1)\n\n if len(self.w2[index]) == 1:\n self.var_w2_med_rodada[index] = 0\n else:\n for amostra2 in range(len(self.w2[index])):\n self.var_w2_med_rodada[index] += (self.w2[index][amostra2] - self.w2_med_rodada[index]) ** 2\n self.var_w2_med_rodada[index] /= (len(self.w2[index]) - 1)\n\n tabela.add_row([\"rodada_\" + str(index),\n round(self.t1_med_rodada[index], 6),\n round(self.w1_med_rodada[index], 6),\n round(self.x1_med_rodada[index], 6),\n round(self.n1_med_rodada[index], 6),\n round(self.nq1_med_rodada[index], 6),\n round(self.ns1_med_rodada[index], 6),\n round(self.t2_med_rodada[index], 6),\n round(self.w2_med_rodada[index], 6),\n round(self.x2_med_rodada[index], 6),\n round(self.n2_med_rodada[index], 6),\n round(self.nq2_med_rodada[index], 6),\n round(self.ns2_med_rodada[index], 6),\n round(self.var_w1_med_rodada[index], 6),\n round(self.var_w2_med_rodada[index], 6)])\n\n # acumulando medias totais\n self.x1_med_total += self.x1_med_rodada[index]\n self.w1_med_total += self.w1_med_rodada[index]\n self.nq1_med_total += self.nq1_med_rodada[index]\n self.ns1_med_total += self.ns1_med_rodada[index]\n self.n1_med_total += self.n1_med_rodada[index]\n self.t1_med_total += self.t1_med_rodada[index]\n self.x2_med_total += self.x2_med_rodada[index]\n self.w2_med_total += self.w2_med_rodada[index]\n self.nq2_med_total += self.nq2_med_rodada[index]\n self.ns2_med_total += self.ns2_med_rodada[index]\n self.n2_med_total += self.n2_med_rodada[index]\n self.t2_med_total += self.t2_med_rodada[index]\n self.var_w1_med_total += self.var_w1_med_rodada[index]\n self.var_w2_med_total += self.var_w2_med_rodada[index]\n\n # dividindo medias acumuladas pelo total de rodadas e enfim, calculando a media total de cada metrica\n self.x1_med_total /= self.n_rodadas\n self.w1_med_total /= self.n_rodadas\n self.nq1_med_total /= self.n_rodadas\n self.ns1_med_total /= self.n_rodadas\n self.n1_med_total /= self.n_rodadas\n self.t1_med_total /= self.n_rodadas\n self.x2_med_total /= self.n_rodadas\n self.w2_med_total /= self.n_rodadas\n self.nq2_med_total /= self.n_rodadas\n self.ns2_med_total /= self.n_rodadas\n self.n2_med_total /= self.n_rodadas\n self.t2_med_total /= self.n_rodadas\n self.var_w1_med_total /= self.n_rodadas\n self.var_w2_med_total /= self.n_rodadas\n\n tabela.add_row([\"Media\",\n round(self.t1_med_total, 6),\n round(self.w1_med_total, 6),\n round(self.x1_med_total, 6),\n round(self.n1_med_total, 6),\n round(self.nq1_med_total, 6),\n round(self.ns1_med_total, 6),\n round(self.t2_med_total, 6),\n round(self.w2_med_total, 6),\n round(self.x2_med_total, 6),\n round(self.n2_med_total, 6),\n round(self.nq2_med_total, 6),\n round(self.ns2_med_total, 6),\n round(self.var_w1_med_total, 6),\n round(self.var_w2_med_total, 6)\n ])\n\n print(tabela, \"\\n\")\n\n if not is_deterministico:\n self.calcula_ic()", "def f_precios_masivos(p0_fini, p1_ffin, p2_gran, p3_inst, p4_oatk, p5_ginc):\n\n def f_datetime_range_fx(p0_start, p1_end, p2_inc, p3_delta):\n \"\"\"\n Parameters\n ----------\n p0_start\n p1_end\n p2_inc\n p3_delta\n Returns\n -------\n ls_resultado\n Debugging\n ---------\n \"\"\"\n\n ls_result = []\n nxt = p0_start\n\n while nxt <= p1_end:\n ls_result.append(nxt)\n if p3_delta == 'minutes':\n nxt += timedelta(minutes=p2_inc)\n elif p3_delta == 'hours':\n nxt += timedelta(hours=p2_inc)\n elif p3_delta == 'days':\n nxt += timedelta(days=p2_inc)\n\n return ls_result\n\n # inicializar api de OANDA\n\n api = API(access_token=p4_oatk)\n\n gn = {'S30': 30, 'S10': 10, 'S5': 5, 'M1': 60, 'M5': 60 * 5, 'M15': 60 * 15,\n 'M30': 60 * 30, 'H1': 60 * 60, 'H4': 60 * 60 * 4, 'H8': 60 * 60 * 8,\n 'D': 60 * 60 * 24, 'W': 60 * 60 * 24 * 7, 'M': 60 * 60 * 24 * 7 * 4}\n\n # -- para el caso donde con 1 peticion se cubran las 2 fechas\n if int((p1_ffin - p0_fini).total_seconds() / gn[p2_gran]) < 4999:\n\n # Fecha inicial y fecha final\n f1 = p0_fini.strftime('%Y-%m-%dT%H:%M:%S')\n f2 = p1_ffin.strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n # print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n r_df_final = pd.DataFrame(lista)\n r_df_final = r_df_final[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n r_df_final['TimeStamp'] = pd.to_datetime(r_df_final['TimeStamp'])\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final\n\n # -- para el caso donde se construyen fechas secuenciales\n else:\n\n # hacer series de fechas e iteraciones para pedir todos los precios\n fechas = f_datetime_range_fx(p0_start=p0_fini, p1_end=p1_ffin, p2_inc=p5_ginc,\n p3_delta='minutes')\n\n # Lista para ir guardando los data frames\n lista_df = list()\n\n for n_fecha in range(0, len(fechas) - 1):\n\n # Fecha inicial y fecha final\n f1 = fechas[n_fecha].strftime('%Y-%m-%dT%H:%M:%S')\n f2 = fechas[n_fecha + 1].strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n pd_hist = pd.DataFrame(lista)\n pd_hist = pd_hist[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n pd_hist['TimeStamp'] = pd.to_datetime(pd_hist['TimeStamp'])\n\n # Ir guardando resultados en una lista\n lista_df.append(pd_hist)\n\n # Concatenar todas las listas\n r_df_final = pd.concat([lista_df[i] for i in range(0, len(lista_df))])\n\n # resetear index en dataframe resultante porque guarda los indices del dataframe pasado\n r_df_final = r_df_final.reset_index(drop=True)\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final", "def getaccidentesRangoHoras(analyzer, Start_Time, End_Time): \n lst = om.values(analyzer['dateIndex'], minKey(analyzer), maxKey(analyzer))\n dicc_severidad = {\"1\":0,\"2\":0,\"3\":0,\"4\":0}\n for i in range(lt.size(lst)):\n accidentes_dia = lt.getElement(lst, i)['severityIndex'][\"table\"]\n cantidad_accidentes = lt.size(accidentes_dia) \n total_severidad = total_severidad_hora(cantidad_accidentes, accidentes_dia, Start_Time, End_Time, dicc_severidad)\n total_accidentes = 0\n for severidad in dicc_severidad:\n total_accidentes += dicc_severidad[severidad]\n for severidad in total_severidad: \n porcentaje = round(int(dicc_severidad[severidad]) / total_accidentes, 2)\n dicc_severidad[severidad] = (\"Cantidad accidentes: \" + str(dicc_severidad[severidad]), \"Porcentaje: \"+str((porcentaje * 100))) \n return dicc_severidad", "def __busca_notas(self, tipo_busca, intervalo_inicial, intervalo_final, serie, \r\n\t\t chave_acesso):\r\n info_consulta = \"11\" # padrao 1\r\n\tresposta = \" \"*231 # padrao 230\r\n\t#resposta = None \r\n\r\n status = self.dll.rRetornarInformacao_NFCe_Daruma(tipo_busca, \r\n intervalo_inicial, intervalo_final, serie, chave_acesso, \r\n\t info_consulta, resposta) \r\n\tif status !=1:\r\n\t if status == -1:\r\n\t\traise Exception(\"-1: Erro encontrado na execucao do metodo\")\r\n elif status == -2:\r\n\t\traise Exception(\"-2: Chave Invalida\")\r\n\t elif status == -3:\r\n\t\traise Exception(\"-3: Falha no schema XML.\")\r\n\t elif status == -4:\r\n\t\traise Exception(\"-4: XML fora do padrao\")\r\n\t elif status == -5:\r\n\t\traise Exception(\"-5: Erro generico\")\r\n\t elif status == -8:\r\n\t\traise Exception(\"-8: Usuario nao Autorizado\")\r\n elif status == -9:\r\n\t\traise Exception(\"-9: Usuario nao Licenciado\")\r\n\t elif status == -10:\r\n\t\traise Exception(\"-10: Documento e Ambiente nao identificados\")\r\n\t elif status == -13:\r\n\t\traise Exception(\"-13: Tipo de Documento nao identificado\")\r\n elif status == -14:\r\n\t\traise Exception(\"-14: Erro retornado pelo WebService.\")\r\n elif status == -52:\r\n\t\traise Exception(\"-52: Erro ao gravar em arquivo temporario\")\r\n elif status == -99:\r\n\t\traise Exception(\"-99: Parametros invalidos ou ponteiro nulo de pametros\")\r\n elif status == -99:\r\n\t\traise Exception(\"-103: Nao foram encontradas as DLLs auxiliaes\")\r\n\t else:\r\n\t\traise Exception(\"Erro ao executar o metodo Retornar Informacao.\")", "def getIntervencionesDiputados(self):\n prog_indices = re.compile('(sr.|sra.).*', re.IGNORECASE)\n prog_nombre = re.compile('(sr.|sra.).*,*(\\.-)', re.IGNORECASE)\n\n result = prog_indices.finditer(self.dialogo)\n\n indices = []\n for i in result:\n indices.append(i.span()[0])\n\n dips = []\n for indice in range(len(indices) - 1):\n inicio, final = prog_nombre.match(self.dialogo[indices[indice]:indices[indice + 1]]).span()\n\n discurso = self.dialogo[indices[indice]:indices[indice + 1]]\n\n nombre = discurso[inicio:final]\n dips.append(nombre)\n self.intervenciones.append([nombre, discurso])\n\n dips_unicos = list(set(dips))\n\n for dip in dips_unicos:\n temp_dip = []\n for entrada in self.intervenciones:\n if dip == entrada[0]:\n temp_dip.append(entrada[1])\n\n self.intervenciones_por_diputado[dip] = temp_dip", "def getCambiosQafectanCaja(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n\tif usuarioColaborador == \"\" and fechaInicio == \"\" and fechaFin == \"\":\n\t return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\"\"\")\n elif usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\"\"\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\n and c.usuario_Colaborador = '%s'\"\"\" %(fechaInicio,fechaFin,usuarioColaborador))", "def calendario():\n ################VARIABLES_CALENDARIO###############\n\tyear=input(\"Dime un año \")\n\tmes=input(\"Dime un mes \")\n\n\t\"\"\"Declaramos las variables que vamos a necesitar para el calendario\"\"\"\n\tfilas=0\n\tcolumna=0\n\tcontador=1\n\tif mes==4 or mes==6 or mes==9 or mes==11:\n\t\ttop=31\n\telse:\n\t\tif mes==2:\n\t\t\tif year%4==0:\n\t\t\t\ttop=28\n\t\t\telse:30\n\t\telse:\n\t\t\ttop=32\n\n\t\"\"\"Guardamos la fecha introducida, para que el sistema la reconozca como tal\t\"\"\"\t\n\tfecha_nacimiento=datetime.datetime(year, mes, 1)\n\n\t\"\"\"guardamos en una variable el día de la semana de la fecha\"\"\"\n\tdia_semana=datetime.datetime.weekday(fecha_nacimiento)\n\n\t##########Creamos el calendario#############\n\tprint \" L M X J V S D\"\n\tfor filas in my_range(1,6,1):\n\t\tfor columna in my_range(1,7,1):\n\t\t\tif not contador>=top:\n\t\t\t\tif filas==1: #Creamos la 1 semana\n\t\t\t\t\tif columna<=dia_semana:\n\t\t\t\t\t\tprint\" \", \n\t\t\t\t\t\t#Si el mes no empieza en lunes escribira 3 especios y el contador no subira\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint \"\", contador,\n\t\t\t\t\t\tcontador=contador+1\t\n\t\t\t\t\t\t\"\"\"En la primera semana, empezara a escribir el dia que \n\t\t\t\t\t\ttoque. Para que quede encuadrado y bonito muestra un \n\t\t\t\t\t\tespacio y el contador, este augmentara en este caso\"\"\"\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tif contador<=9:\n\t\t\t\t\t\tprint \"\",contador,\n\t\t\t\t\t\tcontador=contador+1\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint contador,\n\t\t\t\t\t\tcontador=contador+1\t\n\t\t\t\t\t\"\"\" Si no nos encontramos en la 1 fila, escribira los dias\n\t\t\t\t\t normalmente para que quede más visual si el numero es de\n\t\t\t\t\t una cifra tambien escribira un espacio \"\"\"\n\t\tprint\"\"", "def calcular_ocupacion():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=0\"\"\")\n\tnormales = cur.fetchall()\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=1\"\"\")\n\tlegendarios = cur.fetchall()\n\t# Calcula la ocupacion como cant_normales * 1 + cant_legendarios * 5\n\tocupado = normales[0][0] + 5 * legendarios[0][0]\n\treturn ocupado", "def dico(pays):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"country\"].isin(\r\n [pays])].sort_values([\"year\"], ascending=False)\r\n resultat = {}\r\n resultat[\"country\"] = str(df.iloc[0][1])\r\n resultat[\"year\"] = int(df.iloc[0][2])\r\n resultat[\"value\"] = float(df.iloc[0][4])\r\n return resultat", "def pacMare(date, estac):\n monthList = [\"JAN\", \"FEV\", \"MAR\", \"ABR\", \"MAI\", \"JUN\", \"JUL\",\n \"AGO\", \"SET\", \"OUT\", \"NOV\", \"DEZ\"]\n an = date.year\n Mesl = date.month\n strmes = monthList[Mesl-1]\n di = date.day\n data1 = \"%s/%s/%s\" %(di, Mesl, an)\n\n DT = 1\n HI = -3\n d0 = 1\n\n estacoes = Estacao()\n constantes = Constantes()\n cadastro = Cadastro()\n combinacoes = Combinacoes()\n\n f = estacoes.data['name'].index(estac)\n Cod = estacoes.data['ID'][f]\n LA1 = estacoes.data['latG'][f]\n LA2 = estacoes.data['latM'][f]\n LO1 = estacoes.data['lonG'][f]\n LO2 = estacoes.data['lonM'][f]\n nc = estacoes.data['ncomp'][f]\n NM = estacoes.data['nm'][f]\n fu = estacoes.data['fuso'][f]\n ca = estacoes.data['carta'][f]\n hemlat = estacoes.data['hemlat'][f]\n hemlon = estacoes.data['hemlon'][f]\n \n infoList = []\n lat = base10Tobase60(lat=base60Tobase10(LA1, LA2, hemlat))\n lon = base10Tobase60(lon=base60Tobase10(LO1, LO2, hemlon))\n latSTR = u\"Lat: %s\" % lat\n lonSTR = u\"Lon: %s\" % lon\n ncSTR = u\"Componentes: %s\" %(nc)\n nmSTR = u\"Nível Médio: %s cm\" %(int(NM))\n fuSTR = u\"Fuso: - %sh\" %(int(fu))\n caSTR = u\"Número Carta: %s\" %(ca)\n\n infoList.append(latSTR)\n infoList.append(lonSTR)\n infoList.append(ncSTR)\n infoList.append(nmSTR)\n infoList.append(fuSTR)\n infoList.append(caSTR)\n\n f = constantes.data['ID'].index(Cod)\n ai = constantes.data['const'][ f:f+nc ]\n h = constantes.data['amp'][ f:f+nc ]\n G = constantes.data['phase'][ f:f+nc ]\n HH = h[:]\n GG = G[:]\n\n MK, constID = [],[]\n for k in range(nc):\n f = cadastro.data['const'].index(ai[k])\n MK.append(cadastro.data['M'][f])\n constID.append(cadastro.data['cod'][f])\n MK = str2int(MK)\n constID = str2int(constID)\n\n BB, CC = [],[]\n for k in range(nc):\n f = combinacoes.data['ID'].index(constID[k])\n aux = combinacoes.data['subs'][ f: f+MK[k] ]\n aux = str2float(aux)\n BB.append(aux)\n aux = combinacoes.data['comb'][ f: f+MK[k] ]\n aux = str2float(aux)\n CC.append(aux)\n\n cdat = open(web2pyPath + \"modules/data/Vdata.txt\")\n V = []\n for line in cdat.readlines():\n line2 = line.strip('\\r\\n').split(',')\n line2 = str2float(line2)\n V.append(line2)\n\n D = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n n = 30\n\n # calculo dos elementos astronomicos\n MB = float(an % 4)\n MC = float(an % 100)\n MD = float(an % 400)\n dd = float(di)\n\n if MB == 0 and MC != 0 or MD == 0:\n D[2] = 29\n\n i1 = float(an / 100)\n i2 = i1 - 19\n if i2 != 0:\n t1 = i2\n j1 = abs(i2)\n c3 = j1 / i2\n t2 = t1 * t1 * c3\n c1 = int(j1 * 0.75 + 0.5) * c3\n else:\n t1 = 0.\n t2 = 0.\n c1 = 0.\n\n s0 = 277.0224 + 307.8831 * t1 - 0.0011 * t2 - 13.1764 * c1\n h0 = 280.1895 + 0.7689 * t1 + 0.0003 * t2 - 0.9856 * c1\n p0 = 334.3853 + 109.034 * t1 - 0.0103 * t2 - 0.1114 * c1\n nl = 100.7902 + 134.142 * t1 - 0.0021 * t2 - 0.053 * c1\n P1 = 281.2208 + 1.7192 * t1 + 0.00045 * t2 - 0.000047 * c1\n\n for i in range(Mesl):\n di = float(di + D[i])\n\n # bug de 2001\n if an <= 2000:\n di = di - 1 \n\n IA = i1 * 100\n BI = an - IA\n\n AI = int((BI - 1) * 0.25); AI = float(AI)\n if MD == 0: AI = AI + 1\n AD = AI + di\n N2 = n * DT * 0.5\n AV = N2\n SN = AV / 10000\n b = [None]\n b.append( s0 + 129.38481 * BI + 13.1764 * AD )\n b.append( h0 - 0.23872 * BI + 0.98565 * AD )\n b.append( p0 + 40.66249 * BI + 0.1114 * AD )\n b.append(None)\n b.append( nl + 19.32818 * BI + 0.05295 * AD )\n b.append( P1 + 0.01718 * BI + 0.000047 * AD )\n b[0] = b[2] - b[1]\n b[4] = 90.\n b.append( b[3] + N2 * 0.00464183 )\n b.append( b[5] + N2 * 0.00220641 )\n b.append( b[6] + N2 * 0.00000196 )\n\n a = [ [0.,1.,0.], [0.,2.,0.], [0.,3.,0.], [0.,0.,2.], [0.,1.,2.], [1.,0.,-1.], \n [2.,-1.,-1.], [2.,-1.,0.], [2.,-1.,1.], [2.,0.,0.], [2.,1.,0.], \n [2.,2.,0.], [2.,3.,0.] ]\n\n b[0] = b[0] + HI * 14.49205211\n b[1] = b[1] + HI * 0.54902653\n b[2] = b[2] + HI * 0.0410686\n b[3] = b[3] + HI * 0.00464183\n b[5] = b[5] + HI * 0.00220641\n b[6] = b[6] + HI * 0.00000196\n\n z, Q = [], []\n for i in range(13):\n s = 0.\n for J in range(3):\n s = s + a[i][J] * b[J + 7]\n \n XX = s * 0.017453\n z.append(np.cos(XX))\n Q.append(np.sin(XX))\n\n W = []\n for i in range(37):\n WQ = 0.\n for J in range(5):\n WQ = WQ + V[i][J] * b[J]\n \n if i == 13 or i == 30:\n W.append( WQ + b[9] )\n elif i == 17 or i == 32:\n W.append( WQ - b[9] )\n else:\n W.append(WQ)\n\n F, U = [], []\n for k in range(38):\n F.append(None) # apenas para facilitar a copia do codigo em VB\n U.append(None) # depois, ambos serao popped-up\n z.insert(0, None) # idem\n Q.insert(0, None) # idem\n\n F[1] = 1\n F[2] = 1\n F[3] = 1 - 0.0307 * z[1] + 0.0007 * z[2] - 0.0534 * z[10] - 0.0218 * z[11] - 0.0059 * z[12]\n F[4] = 1 + 0.4142 * z[1] + 0.0377 * z[2] - 0.0008 * z[3] - 0.0028 * z[8] + 0.0431 * z[10] - 0.0023 * z[11]\n F[5] = 1 + 0.4141 * z[1] + 0.0384 * z[2] - 0.003 * z[7] - 0.003 * z[9] + 0.0179 * z[10] - 0.004 * z[12] - 0.0017 * z[13]\n F[6] = 1 + 0.1885 * z[1] - 0.0063 * z[2] - 0.0063 * z[12]\n F[7] = 1 + 0.1884 * z[1] - 0.0061 * z[2] - 0.0087 * z[10]\n F[8] = 1 + 0.1884 * z[1] - 0.0057 * z[2] + 0.0007 * z[6] - 0.0028 * z[10] - 0.0039 * z[12] - 0.0007 * z[13]\n F[9] = 1 + 0.1881 * z[1] - 0.0058 * z[2] - 0.0576 * z[10] + 0.0175 * z[11]\n F[10] = 1 + 0.1885 * z[1] - 0.0058 * z[2] + 0.0001 * z[8] - 0.0054 * z[10] - 0.001 * z[11]\n F[11] = 1 - 0.2454 * z[1] - 0.0142 * z[2] + 0.0445 * z[10]\n F[12] = 1 + 0.1714 * z[1] - 0.0054 * z[2] + 0.3596 * z[10] + 0.0664 * z[11] - 0.0057 * z[12]\n F[13] = 1 + 0.1905 * z[1]\n F[14] = 1 - 0.0078 * z[1]\n F[15] = 1 - 0.0112 * z[1] + 0.0007 * z[2] - 0.0004 * z[4] - 0.0015 * z[10] - 0.0003 * z[11]\n F[16] = 1\n F[17] = 1 + 0.1158 * z[1] - 0.0029 * z[2] + 0.0001 * z[11]\n F[18] = 1 + 0.019 * z[1]\n F[19] = 1 - 0.0384 * z[1] - 0.0185 * z[2] + 0.0132 * z[4] + 0.0105 * z[8] + 0.0344 * z[10]\n F[20] = 1 + 0.1676 * z[1] + 0.03 * z[11]\n F[21] = 1 + 0.1685 * z[1] - 0.0047 * z[2] - 0.0152 * z[10] - 0.0098 * z[11] - 0.0057 * z[12]\n F[22] = 1 + 0.6398 * z[1] + 0.1342 * z[2] + 0.008500001 * z[3] + 0.0296 * z[8] + 0.1496 * z[10] - 0.0037 * z[11]\n F[23] = 1 - 0.0337 * z[1]\n F[24] = 1 - 0.0374 * z[1] - 0.061 * z[12]\n F[25] = 1 - 0.0375 * z[1]\n F[26] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0007 * z[6] - 0.0039 * z[12]\n F[27] = 1 - 0.0373 * z[1] + 0.0042 * z[10] - 0.0036 * z[11]\n F[28] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0005 * z[10] - 0.0001 * z[11]\n F[29] = 1 - 0.0448 * z[1]\n F[30] = 1 - 0.0367 * z[1] + 0.0047 * z[8] - 0.2505 * z[10] - 0.1102 * z[11] - 0.0156 * z[12]\n F[31] = 1\n F[32] = 1 - 0.0022 * z[1]\n F[33] = 1 - 0.2535 * z[4] + 0.0141 * z[5]\n F[34] = 1 + 0.2852 * z[1] + 0.0324 * z[2]\n F[35] = 1 + 0.4389 * z[1] + 0.0487 * z[2] + 0.0487 * z[10] + 0.065 * z[11]\n F[36] = 1 + 0.4168 * z[1] + 0.0466 * z[2] - 0.078 * z[10]\n F[37] = 1 - 0.0564 * z[1]\n\n U[1] = 0\n U[2] = 0\n U[3] = 0.0007 * Q[1] - 0.0008 * Q[2] - 0.0534 * Q[10] - 0.0218 * Q[11] - 0.0059 * Q[12]\n U[4] = 0.4142 * Q[1] + 0.0377 * Q[2] - 0.0008 * Q[3] + 0.0027 * Q[8] - 0.0432 * Q[10] + 0.0022 * Q[11]\n U[5] = 0.4142 * Q[1] + 0.0384 * Q[2] + 0.003 * Q[7] + 0.003 * Q[9] - 0.018 * Q[10] - 0.004 * Q[12] - 0.0017 * Q[13]\n U[6] = -0.1885 * Q[1] + 0.0062 * Q[2] + 0.0062 * Q[12]\n U[7] = -0.1884 * Q[1] + 0.006 * Q[2] - 0.0087 * Q[10]\n U[8] = -0.1884 * Q[1] + 0.0057 * Q[2] - 0.0008 * Q[6] - 0.0028 * Q[10] + 0.0039 * Q[12] + 0.0007 * Q[13]\n U[9] = -0.1882 * Q[1] + 0.0057 * Q[2] - 0.0576 * Q[10] + 0.0175 * Q[11]\n U[10] = -0.1885 * Q[1] + 0.0057 * Q[2] + 0.0001 * Q[8] - 0.0064 * Q[10] - 0.001 * Q[11]\n U[11] = -0.1886 * Q[1] - 0.0142 * Q[2] - 0.0446 * Q[10]\n U[12] = -0.2294 * Q[1] - 0.3596 * Q[10] - 0.0665 * Q[11] + 0.0057 * Q[12]\n U[13] = 0.246 * Q[1]\n U[14] = 0.0077 * Q[1]\n U[15] = 0.0111 * Q[1] - 0.0008 * Q[2] - 0.0004 * Q[4] - 0.0015 * Q[10] - 0.0003 * Q[11]\n U[16] = 0\n U[17] = 0.1554 * Q[1] - 0.003 * Q[2] - 0.0002 * Q[11]\n U[18] = 0.019 * Q[1]\n U[19] = -0.0384 * Q[1] - 0.0185 * Q[2] - 0.0132 * Q[4] - 0.0106 * Q[8] - 0.0344 * Q[10]\n U[20] = 0.231 * Q[1] - 0.03 * Q[11]\n U[21] = 0.2274 * Q[1] - 0.0047 * Q[2] - 0.0152 * Q[10] - 0.0098 * Q[11] - 0.0057 * Q[12]\n U[22] = 0.6398 * Q[1] + 0.1342 * Q[2] - 0.0296 * Q[8] - 0.1497 * Q[10] + 0.0037 * Q[11]\n U[23] = 0.0373 * Q[1]\n U[24] = 0.0373 * Q[1] + 0.006 * Q[12]\n U[25] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[26] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[27] = 0.0373 * Q[1] + 0.0042 * Q[10] + 0.0036 * Q[11]\n U[28] = 0.0373 * Q[1] - 0.0005 * Q[2] + 0.0005 * Q[9] + 0.0001 * Q[11]\n U[29] = 0.0487 * Q[1]\n U[30] = 0.0366 * Q[1] + 0.0047 * Q[8] - 0.2505 * Q[9] - 0.1102 * Q[11]\n U[31] = 0\n U[32] = -0.0022 * Q[1]\n U[33] = -0.2535 * Q[4] + 0.0141 * Q[5]\n U[34] = 0.3108 * Q[1] + 0.0324 * Q[2]\n U[35] = 0.4389 * Q[1] + 0.0487 * Q[2] - 0.0488 * Q[9] - 0.065 * Q[11]\n U[36] = 0.4542 * Q[1] + 0.0466 * Q[2] - 0.0078 * Q[10]\n U[37] = 0.0563 * Q[1]\n\n z.pop(0)\n Q.pop(0)\n F.pop(0)\n U.pop(0)\n AV = n * DT * 0.5\n\n for i in range(37):\n XX = F[i]\n YY = U[i]\n F[i] = np.sqrt( XX ** 2 + YY ** 2 )\n U[i] = W[i] + np.arctan(YY / XX) * 57.29578\n U[i] = U[i] - int(U[i] / 360) * 360\n if U[i] < 0: U[i] = U[i] + 360\n\n\n # calculo das alturas\n HC, GC = [],[]\n for k in range(110):\n HC.append(0)\n GC.append(0)\n\n for i in range(nc):\n s = 0.\n WQ = 0.\n T = 1.\n\n for J in range(MK[i]):\n jj = int(BB[i][J])\n kk = CC[i][J]\n T = T * F[jj-1] ** abs(kk)\n s = s + U[jj-1] * kk\n WQ = WQ + V[jj-1][5] * kk\n ZQ = s\n \n h[i] = T * h[i]\n s = s - G[i]\n if s < 0: s = s + 360.\n G[i] = s\n try: \n W[i] = WQ * DT\n except IndexError:\n W.append( WQ * DT )\n HC[i] = T * HC[i]\n ZQ = ZQ - GC[i]\n if ZQ < 0: ZQ = ZQ + 360.\n GC[i] = ZQ\n\n x, Y2, y = [],[],[]\n MM = 0\n for i in range(n):\n s = 0.\n ZQ = 0.\n\n for j in range(nc):\n AA = G[j] * 0.017453\n s = s + h[j] * np.cos(AA)\n G[j] = G[j] + W[j]\n AC = GC[j] * 0.017453\n ZQ = ZQ + HC[j] * np.cos(AC)\n GC[j] = GC[j] + W[j]\n\n x.append(s + NM)\n Y2.append(x[i])\n y.append(ZQ + MM)\n\n x = np.array(x, dtype=np.float32)\n x = x/100.\n h = x[3:-3]\n hours = np.arange(24)\n years, months, days = 0*hours+an, 0*hours+Mesl, 0*hours+int(dd)\n time = []\n for year, month, day, hour in zip(years, months, days, hours):\n time.append( dt.datetime(year, month, day, hour) )\n\n time = mpldates.date2num(time)\n time2 = np.linspace(time[0], time[-1], 500)\n\n interp = interp1d(time, h, kind='cubic')\n h2 = interp(time2)\n\n dh = np.gradient(h2)\n dhSign = dh > 0\n # gathering pairs\n pairs = []\n for k in range(len(dh)-1):\n pairs.append([dhSign[k], dhSign[k+1]])\n\n f = []\n for k in range(len(pairs)):\n if pairs[k] == [True, False] or pairs[k] == [False, True]:\n f.append(k)\n\n datas = mpldates.num2date(time2[f])\n hora = []\n for data in datas:\n hora.append(\"%02i:%02i\" %(data.hour, data.minute))\n altura = h2[f]\n altura = ['%.1f' % a for a in altura]\n\n return infoList, hora, altura, time2, h2", "def calculate_correction(filedic):\n lanczos_cera = xr.open_mfdataset(filedic['lanczos(CERA)'], combine='by_coords')\n lanczos_noaa = xr.open_mfdataset(filedic['lanczos(20CR)'], combine='by_coords')\n return lanczos_noaa.drop('number').squeeze() - lanczos_cera.drop('number').squeeze()", "def construct_occurrence_dico(data) :\n print('Constructing occurence dictionnaries...')\n\n p_kw_dico = dict()\n kw_p_dico = dict()\n full_stem_dico = {}\n for patent in data :\n patent_id = patent['id']\n #[keywords,stem_dico] = extract_keywords(patent[1]+\". \"+patent[2],patent_id)\n [keywords,stem_dico] = extract_keywords(patent['title']+\". \"+patent['abstract'],patent_id)\n #print(keywords)\n\n for k in keywords :\n # add to p_kw dico\n if k in kw_p_dico :\n kw_p_dico[k].append(patent_id)\n else :\n kw_p_dico[k]= [patent_id]\n #\n if patent_id in p_kw_dico :\n p_kw_dico[patent_id].append(k)\n else :\n p_kw_dico[patent_id] = [k]\n\n for k in stem_dico.keys():\n if k in full_stem_dico :\n full_stem_dico[k]=full_stem_dico[k].union(stem_dico[k])\n else :\n full_stem_dico[k] = stem_dico[k]\n\n return([p_kw_dico,kw_p_dico,full_stem_dico])", "def cima(pila):\n return pila.datos[pila.tope]", "def convertirDic(botones):\n dic_aux = {}\n for clave,valor in botones.items():\n dic_aux[tuple(map(int,clave.split(\",\")))] = valor\n return dic_aux", "def cknf():\r\n i2 = question_amount_3.get()\r\n check = (only_int(i2))\r\n if not check:\r\n num_check(820, 270, 3)\r\n else:\r\n for it in range(5):\r\n variat(it)\r\n name = 'СKНФ:'\r\n input_file_docx(name, name)\r\n for p in range(int(question_amount_3.get())):\r\n df = pd.DataFrame(np.array([[0, 0, 0],\r\n [0, 0, 1],\r\n [0, 1, 0],\r\n [0, 1, 1],\r\n [1, 0, 0],\r\n [1, 0, 1],\r\n [1, 1, 0],\r\n [1, 1, 1]]), columns=['x', 'y', 'z'])\r\n df[\"F(x, y, z)\"] = np.random.randint(0, 2, size=len(df))\r\n task = f'Постойте СКНФ для следующей таблицы: \\n {df}'\r\n r = (df.loc[df['F(x, y, z)'] == 0, ['x', 'y', 'z']].astype(bool)).astype('int8')\r\n res = (r.apply(lambda r: '({}{} v {}{} v {}{})'.format('!' * r['x'], 'x', '!' * r['y'],\r\n 'y', '!' * r['z'], 'z'), axis=1).str.cat(sep=' ^ '))\r\n answer = task + f'\\n Ответ: {res}'\r\n input_file_docx(task, answer)\r\n lbvi = Label(window1, font=(\"Arial Bold\", 14), text=\"Выполнено \")\r\n lbvi.place(x=800, y=270)", "def fecha_cadena(anno, mes, dia):\n dia_ = str(dia).strip()\n mes_ = str(mes).strip()\n anno_ = str(anno).strip()\n # día\n if len(dia_) < 2:\n dia_ = \"0\" + dia_\n # mes\n if len(mes_) < 2:\n mes_ = \"0\" + mes_\n # año\n for i in range(len(anno_), 4):\n anno_ = \"0\" + anno_\n return anno_ + mes_ + dia_", "def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log", "def per_capi(country):\r\n df = ouvrir_fichier()\r\n df = df.loc[df['country'].isin([country])]\r\n df = df[(df[\r\n 'emissions'] == 'Emissions per capita (metric tons of carbon dioxide)'\r\n )]\r\n resultat = {}\r\n longeur = len(df)\r\n for i in range(longeur):\r\n resultat[int(df.iloc[i][2])] = float(df.iloc[i][4])\r\n\r\n return resultat", "def valida_iin(ccred):\n\n # Vai ser usado o tuplo que contem todas as informacoes sobre os diferentes tipos de cartao definido nas linhas de codigo iniciais.\n # Sao acedidas as informacoes no indice 0 (Rede Emissora), 2 (Digitos iniciais IIN) e 3 (Numero de Digitos).\n \n # Percorremos o tuplo com as informacoes sobre os cartoes. Se a cadeia de caracteres introduzida comecar por alguma das cadeias no indice 2 de t_cartoes e tiver o comprimento especificicado no indice 3, devolvemos a rede emissora a qual corresponde essas 2 condicoes. \n\n \n comp = str(len(ccred))\n \n for e in t_cartoes:\n \n if comeca_por_um(ccred,e[2]) and comp in e[3]: \n return e[0]\n \n return ''", "def densidad(qe):\r\n global x,rhoe,rhoi,dx,nparticulas,npuntos_malla,pared_izquierda,pared_derecha\r\n \r\n j1 = sp.dtype(sp.int32) # Asegura que la variable permanezca entera\r\n j2 = sp.dtype(sp.int32) \r\n \r\n # Factor de ponderacion de carga \r\n re = qe/dx \r\n # Densidad electronica \r\n rhoe = sp.zeros(npuntos_malla+1) \r\n # Mapa de cargas sobre la malla\r\n for i in range(nparticulas):\r\n xa = x[i]/dx # xparticula/dx\r\n j1 = int(xa) # indices de la malla fija xmalla/dx\r\n j2 = j1 + 1 # Siguiente punto en la malla\r\n f2 = xa - j1 # |xmalla - xparticula|/dx\r\n f1 = 1.0 - f2\r\n rhoe[j1] = rhoe[j1] + re*f1\r\n rhoe[j2] = rhoe[j2] + re*f2\r\n\r\n # Condiciones de frontera periodica\r\n rhoe[0] += rhoe[npuntos_malla]\r\n rhoe[npuntos_malla] = rhoe[0]\r\n \r\n # Se agrega una densidad de iones neutral\r\n rhoi = rho0\r\n\r\n return True" ]
[ "0.6076252", "0.58189213", "0.58189213", "0.58189213", "0.58189213", "0.58189213", "0.5760438", "0.572302", "0.569719", "0.5684234", "0.5659719", "0.562336", "0.55975646", "0.55528826", "0.5529479", "0.5520378", "0.5477187", "0.54736406", "0.54670554", "0.54157156", "0.5395477", "0.53891087", "0.5351938", "0.5335156", "0.5335122", "0.52992976", "0.5294983", "0.52926606", "0.5270152", "0.5262885" ]
0.6132804
0
Constructor method. The `pos` parameter expects the genomic position as a 0based index. Setting the `refr` or `alt` parameters to `.` will designate this variant as a "no call".
def __init__(self, seqid, pos, refr, alt, **kwargs): self._seqid = seqid self._pos = pos self._refr = refr self._alt = alt self.info = dict() for key, value in kwargs.items(): self.info[key] = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, pos):\r\n self.pos = pos", "def __init__(self, pos, score=0):\n self.__pos = pos\n self.__score = score", "def __init__(self, pos, vel=None, frame=None):\n\n if isinstance(pos, coord.Galactocentric):\n pos = pos.data\n\n if not isinstance(pos, coord.BaseRepresentation):\n # assume Cartesian if not specified\n if not hasattr(pos, 'unit'):\n pos = pos * u.one\n\n # 3D coordinates get special treatment\n ndim = pos.shape[0]\n if ndim == 3:\n # TODO: HACK: until this stuff is in astropy core\n if isinstance(pos, coord.BaseRepresentation):\n kw = [(k, getattr(pos, k)) for k in pos.components]\n pos = getattr(coord, pos.__class__.__name__)(**kw)\n\n else:\n pos = coord.CartesianRepresentation(pos)\n\n else:\n pos = rep_nd.NDCartesianRepresentation(pos)\n\n else:\n ndim = 3\n\n if vel is None:\n if 's' not in pos.differentials:\n raise TypeError(\"You must specify velocity data when creating \"\n \"a {0} object.\".format(self.__class__.__name__))\n else:\n vel = pos.differentials.get('s', None)\n\n if not isinstance(vel, coord.BaseDifferential):\n # assume representation is same as pos if not specified\n if not hasattr(vel, 'unit'):\n vel = vel * u.one\n\n if ndim == 3:\n name = pos.__class__.get_name()\n Diff = coord.representation.DIFFERENTIAL_CLASSES[name]\n vel = Diff(*vel)\n else:\n Diff = rep_nd.NDCartesianDifferential\n vel = Diff(vel)\n\n # make sure shape is the same\n if pos.shape != vel.shape:\n raise ValueError(\"Position and velocity must have the same shape \"\n f\"{pos.shape} vs. {vel.shape}\")\n\n from ..potential.frame import FrameBase\n if frame is not None and not isinstance(frame, FrameBase):\n raise TypeError(\"Input reference frame must be a FrameBase \"\n \"subclass instance.\")\n\n self.pos = pos\n self.vel = vel\n self.frame = frame\n self.ndim = ndim", "def __init__(self,\n qpos: Optional[np.ndarray] = None,\n qvel: Optional[np.ndarray] = None,\n qacc: Optional[np.ndarray] = None):\n self.qpos = qpos\n self.qvel = qvel\n self.qacc = qacc", "def __init__(\n self,\n locus_tag: str,\n location: FeatureLocation,\n reading_frame: int,\n reference_sequence: Seq,\n name: str = None,\n anticodon: str = None\n ):\n\n super().__init__('tRNA', location=location, reference_sequence=reference_sequence, name=name)\n self.reading_frame = reading_frame\n self.locus_tag = locus_tag\n self.anticodon = anticodon\n self.gene = None", "def __init__(self, pos, direction, right):\n self.position = pos\n self.direction = direction\n self.right = right", "def __init__(self, pos=(0,0), dire = 0):\r\n self.initX = self.x = pos[0]\r\n self.initY = self.y = pos[1]\r\n self.initDir = self.dir = math.radians(dire)", "def __init__(\r\n self,\r\n unique_id,\r\n model,\r\n pos,\r\n ):\r\n super().__init__(unique_id, model)\r\n self.pos = np.array(pos)", "def __init__(self, pos, length, direction, board_size):\n self._pos = pos\n self._x_pos, self._y_pos = self._pos\n self._len = length\n self._dir = direction\n self._bs = board_size\n self._is_hit = False\n self._hit_coors = []\n self._coordinates = self.coordinates()", "def __init__(self, pos, radius=0):\n super().__init__(pos, radius)", "def __init__(self, *args):\n if len(args) == 1:\n position = args[0]\n if len(position) != 2:\n raise PositionError\n self._position = args\n elif len(args) == 2:\n self._position = args\n else:\n raise PositionError", "def __init__(self, position):\n self.position = position\n self.direction = 'U'\n self.length = 0", "def __init__(self, offset=(0, 0), spacing=10.0, side='left', length=1.15, **kwargs):\n super().__init__(offset)\n\n self._spacing = spacing\n if side == 'left':\n self._angle = 90\n elif side == 'right':\n self._angle = -90\n else:\n raise ValueError('Side must be left or right.')\n self._length = length\n self._gc = kwargs", "def __init__(self, pos: Vec2d, limb_lengths: list[int]) -> None:\n\t\tself.pos = Vec2d(pos)\n\t\tself.bones = self.__generate_bone_structure(limb_lengths)\n\t\tself.__target = self.__calculate_end_effector_pos().copy()", "def __init__(self, start_pos, end_pos, direction):\n self.s_pos = start_pos\n self.e_pos = end_pos\n self.dir = direction", "def __init__(self, pos, radius):\n super().__init__(pos, radius)", "def __init__(self, pos, radius):\n self.pos = pos\n self.radius = radius", "def _init(self, position):\n\t\tself._position = position", "def create(self, pos):\n self.pos = pos", "def __init__(self):\n self.position = 0", "def __init__(self,x_pos, y_pos, velocity, kind, fillcolor = 'red'):\n self._velocity = velocity\n self._kind = kind\n super().__init__(x = x_pos, y=y_pos, width = BOLT_WIDTH, \\\n height = BOLT_HEIGHT, fillcolor=fillcolor)", "def __init__(\n self,\n locus_tag: str,\n gene_type: str,\n location: Union[FeatureLocation, CompoundLocation],\n name: str,\n reference_sequence: Seq,\n cog: str = None,\n y_ome: str = None,\n essential: bool = False,\n replication_strand: str = None,\n origin_distance: int = None,\n terminus_distance: int = None\n ):\n\n super().__init__('gene', location=location, reference_sequence=reference_sequence, name=name)\n self.reading_frame = get_reading_frame(self.location, len(reference_sequence))\n\n # if the gene is a coding sequence, it should have a multiple of 3 length; sequence is set by super init\n if gene_type == 'CDS' and len(self.location) % 3 != 0:\n raise ValueError(locus_tag + ': sequence should have multiple of 3 length if gene is coding')\n\n self.locus_tag = locus_tag\n self.gene_type = gene_type\n self.cog = cog\n self.y_ome = y_ome\n self.essential = essential\n self.replication_strand = replication_strand\n self.origin_distance = origin_distance\n self.terminus_distance = terminus_distance\n \n # only set by add_regulon_db_gene_ids\n self.id = None\n\n # only set after calculate_and_add_cai is run\n self.cai = None\n\n # only set after the appropriate linking functions are run\n self.protein = None\n self.trna = None\n self.transcription_units = []\n self.attenuators = []\n self.riboswitches = []\n self.shine_dalgarno = None\n self.i_modulons = []", "def __init__(self, indices, protein=None, dna_strand=None):\r\n if protein is None and dna_strand is None:\r\n raise ValueError(\"Pattern DNA \" + str(indices) + \" is Error, because the value cannot be obtained.\")\r\n elif protein is not None and dna_strand is not None:\r\n raise ValueError(\"Pattern DNA \" + str(indices) + \" is Error, because the value cannot be selected.\")\r\n\r\n self._indices = indices\r\n\r\n if protein is not None:\r\n self._t_strand = self._obtain_t_strand(protein)\r\n self._c_strand = self._obtain_c_strand(self._t_strand)\r\n else:\r\n self._t_strand = copy.deepcopy(dna_strand)\r\n self._c_strand = self._obtain_c_strand(dna_strand)", "def __init__(self, chromosome, strand, full_position=None, position_before=None, position_after=None, immutable=False):\n # need to make instance mutable to be able to set anything, due to how __setattr__ is decorated\n self.make_mutable_REMEMBER_CLEANUP_FIRST() \n # now start setting attributes\n self.chromosome = chromosome\n self.strand = strand\n # parse full_position if provided\n if full_position is not None:\n if (position_before is not None) or (position_after is not None):\n raise ValueError(\"If providing full_position, cannot also provide position_before/position_after!\")\n self.position_before, self.position_after = self._parse_full_position(full_position)\n # otherwise use position_before and/or position_after\n else:\n if position_before is None and position_after is None:\n raise ValueError(\"Can't create an Insertion_position object with no known position values!\")\n try:\n self.position_before = None if position_before is None else int(position_before)\n self.position_after = None if position_after is None else int(position_after)\n except TypeError: \n raise ValueError(\"position_before/position_after must be int-castable or None!\")\n if immutable: self.make_immutable()", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def __init__(self, cls: type, pos_options: typing.Optional[list] = None,\n kw_options: typing.Optional[typing.Dict[str, typing.Any]] = None):\n\n self.cls = cls\n if pos_options is None:\n self.pos_options = []\n else:\n self.pos_options = pos_options\n\n if kw_options is None:\n self.kw_options = {}\n else:\n self.kw_options = kw_options\n\n # Try construct object once\n self.get_next()", "def __init__(self,\n seq,\n aligned_index,\n unaligned_index):\n \n self.seq=seq\n self.aligned_index=aligned_index\n self.unaligned_index=unaligned_index\n self.numeric_seq=convert_to_numeric(self.seq)\n self.upstream_regions=[]\n self.downstream_regions=[]\n self.labels=[]\n self.match_count=0\n self.percent_match=0\n self.non_specific_hits=0\n self.non_specific_percent=0\n \n self.std_index = False\n self.f_std_index = None\n self.r_std_index = None", "def __init__(self, genotypes, sample_pos, node_pos, edges, scale_snps=True): \n super().__init__(genotypes=genotypes,\n sample_pos=sample_pos,\n node_pos=node_pos,\n edges=edges,\n scale_snps=scale_snps)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(observationRPY, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.position is None:\n self.position = pcl_segment.msg.positionRPY()\n if self.is_Known is None:\n self.is_Known = False\n else:\n self.position = pcl_segment.msg.positionRPY()\n self.is_Known = False", "def __init__(self, **kwds):\n raise NotImplementedError" ]
[ "0.66734743", "0.6133717", "0.60993713", "0.60905766", "0.60079235", "0.60035133", "0.5972953", "0.59705895", "0.59635913", "0.59585696", "0.5953488", "0.59305876", "0.5828635", "0.5826504", "0.5818104", "0.57789505", "0.57692695", "0.5743065", "0.57181144", "0.57134575", "0.57084835", "0.56331897", "0.5620225", "0.5602323", "0.55986387", "0.5533526", "0.5521456", "0.5511773", "0.54872787", "0.5477865" ]
0.65443873
1
Getter method for the variant window. The "variant window" (abbreviated `VW` in VCF output) is the sequence interval in the proband contig that encompasses all kmers overlapping the variant. GCCTAGTTAGCTAACGTCCCGATCACTGTGTCACTGC .....A ....A. ...A.. ..A... .A.... A..... | < position of variant [] < variant window, interval (inclusive) encompassing all 6mers that overlap the variant
def window(self): return self.attribute('VW')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CuN17vol(n,h,w,V):\n Vshell = 2*ZCE_GaAs(n,V)*(h-ZCE_GaAs(n,V)) + w*ZCE_GaAs(n,V)\n Vcore = (w-2*ZCE_GaAs(n,V))*(h-ZCE_GaAs(n,V))\n return Vshell*1e14,Vcore*1e14", "def get_asymwindow(self):\n asymwindow = sum(\n [\n np.concatenate(\n [\n np.zeros(self.win_length-(i*self.hop_length)),\n self.window[:i*self.hop_length] ** 2\n ], axis=0\n ) for i in range(1, self.r_overlap)\n ]\n )\n\n return asymwindow", "def window(n, /, wintype='boxcar'):\n if wintype == 'welch':\n raise ValueError('Welch window needs 2-tuple of (name, beta).')\n elif wintype == 'kaiser':\n raise ValueError('Kaiser window needs 2-tuple of (name, beta).')\n elif wintype == 'gaussian':\n raise ValueError('Gaussian window needs 2-tuple of (name, stdev).')\n else:\n w = signal.get_window(wintype, n)\n # if normalize:\n # w /= np.sum(w)\n return w", "def size_with_window(self):\n return self.container['size_with_window']", "def _getWindow(self, signal, sr, window, aggregation):\n if aggregation == \"rmse\":\n return np.sqrt(np.mean(np.square(signal), axis=0))\n\n elif aggregation == \"sum\":\n return np.sum(signal, axis=0)\n\n elif aggregation == \"replayGain\":\n return standard.ReplayGain(sampleRate=sr)(signal)\n\n return signal", "def __repr__(self):\n name = 'sw'\n return \"{}({}, {})\".format(\n name,\n list(self),\n self.window_size)", "def get_window_size(self):\n raise NotImplementedError", "def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin", "def v_win(self, diff, draw_margin):\n x = diff - draw_margin\n denom = self.cdf(x)\n return (self.pdf(x) / denom) if denom else -x", "def get_w(self):\n raise NotImplementedError", "def span(self):\n if self.vcount == 0:\n return None\n else:\n return self.vmax-self.vmin", "def get_optimal_window(mutation_position_relative, seq_len_wo_special, model_window):\n half_model_window = model_window // 2\n if seq_len_wo_special <= model_window:\n return [0,seq_len_wo_special]\n elif mutation_position_relative < half_model_window:\n return [0,model_window]\n elif mutation_position_relative >= seq_len_wo_special - half_model_window:\n return [seq_len_wo_special - model_window, seq_len_wo_special]\n else:\n return [max(0,mutation_position_relative-half_model_window), min(seq_len_wo_special,mutation_position_relative+half_model_window)]", "def get_w(self):\n return self.w", "def _get_window_width(self):", "def number_windows(self):\n if self.is_power_onoff():\n return 0\n elif int(self['x1_size']) > 0 and int(self['x2_size']) > 0:\n return 2\n elif int(self['x1_size']) > 0:\n return 1\n else:\n raise Exception, 'Could not determine number of windows'", "def window(self) -> pulumi.Input['AssetModelMetricWindowArgs']:\n return pulumi.get(self, \"window\")", "def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def get_window_name(cls, quad):\t\t\n\t\treturn ast.literal_eval(str(cls.get_address_value(quad.result)))", "def get_window(wintype: str, block_samples: int) -> tf.Tensor:\n if wintype == 'vorbis':\n window = tf.signal.vorbis_window(block_samples, dtype=tf.float32)\n elif wintype == 'kaiser-bessel-derived':\n window = tf.signal.kaiser_bessel_derived_window(block_samples,\n dtype=tf.float32)\n elif wintype == 'rectangular':\n window = 1.0/tf.sqrt(2.0) * tf.ones((block_samples,))\n else:\n raise ValueError(f'Window type {wintype} unknown.')\n return window", "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0", "def get_vwind(self):\n return self.read_register(4100, 1, 3)", "def windows(self,windowSize):\n for i in range(0,len(self)-windowSize):\n yield (i,i+windowSize)", "def find_windowsize(data):\n time = [i[0] for i in data]\n voltage = [i[1] for i in data]\n\n if len(time) != len(voltage):\n total_index_data = len(voltage)\n else:\n total_index_data = min(len(time), len(voltage))\n\n windowsize = round(total_index_data / 6)\n\n return windowsize", "def dynamic_voting_window(x, lower):\n\n if x < 3:\n return lower\n elif x < 8:\n return lower + 0.2 * (x - 2)\n elif x < 13:\n return lower + 1 + 0.4 * (x - 7)\n elif x < 16:\n return lower + 3 + (x - 12) * (0.5 + (0.1 * (x - 13)))\n else:\n return 9", "def w(self):\n return self._w", "def getwinsize(self):", "def window(k, w, x0):\n return 2.*w * sinc(k*w) * (np.cos(k*x0) + 1.j*np.sin(k*x0))", "def wm(self):\n return self.position" ]
[ "0.5497812", "0.5490596", "0.54333067", "0.53656536", "0.53286386", "0.518327", "0.5145856", "0.51408875", "0.51281923", "0.51159835", "0.5093824", "0.50667363", "0.50662166", "0.5062015", "0.5056756", "0.5056118", "0.50559324", "0.5042922", "0.5027012", "0.5025038", "0.50142014", "0.5011057", "0.4995039", "0.49904755", "0.49851933", "0.49287984", "0.49283797", "0.49216345", "0.49128786", "0.4912458" ]
0.67229086
0
Wrap the `kevlar call` procedure as a generator function. Input is the following. an iterable containing one or more target sequences from the reference genome, stored as khmer or screed sequence records an iterable containing one or more contigs assembled by kevlar, stored as khmer or screed sequence records alignment match score (integer) alignment mismatch penalty (integer) alignment gap open penalty (integer) alignment gap extension penalty (integer) The function yields tuples of target sequence name, query sequence name, and alignment CIGAR string
def call(targetlist, querylist, match=1, mismatch=2, gapopen=5, gapextend=0, ksize=31): for query in sorted(querylist, reverse=True, key=len): bestcigar = None bestscore = None besttarget = None bestorientation = None for target in sorted(targetlist, key=lambda record: record.name): cigar, score, strand = align_both_strands( target.sequence, query.sequence, match, mismatch, gapopen, gapextend ) if bestscore is None or score > bestscore: bestscore = score bestcigar = cigar besttarget = target bestorientation = strand if bestorientation == -1: query.sequence = kevlar.revcom(query.sequence) for varcall in make_call(besttarget, query, bestcigar, ksize): yield varcall
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_call_cli(targetfile, queryfile, cigar, capsys):\n target = data_file(targetfile)\n query = data_file(queryfile)\n args = kevlar.cli.parser().parse_args(['call', query, target])\n kevlar.call.main(args)\n\n out, err = capsys.readouterr()\n print(out)\n cigars = list()\n for line in out.strip().split('\\n'):\n cigarmatch = re.search('CG=([^;\\n]+)', line)\n if cigarmatch:\n cigar = cigarmatch.group(1)\n cigars.append(cigar)\n assert cigar in cigars", "def __call__(self, seq_path, result_path=None, log_path=None):\r\n raise NotImplementedError(\"Aligner is an abstract class\")", "def __call__(self, seq_path, result_path=None, log_path=None):\r\n # return list of the chimeric sequences\r\n return self.getResult(seq_path)", "def haplotype_caller(gatk, xmx, reference, bams, cores, out_directory, ploidy, bed_file=None):\n gvcfs = []\n bam_pairs = get_bam_pairs(bams)\n commands = []\n try:\n os.mkdir(out_directory)\n except OSError:\n pass\n for sample, bams in bam_pairs.items():\n output = os.path.join(out_directory, os.path.basename(sample + '.g.vcf'))\n command = HAPLOTYPE_CALLER.format(xmx, gatk, reference, output, ploidy)\n command = command + ' -I ' + ' -I '.join(bams) \n command = command + ' -bamout ' + output + \".bam\"\n if bed_file is not None:\n command = command + \" -L \" + bed_file\n commands.append(command)\n print command\n gvcfs.append(output)\n queue_jobs(commands, \"haplotypeCaller\", cores)\n return gvcfs", "def test_call_ssc_two_proximal_snvs():\n qfile = data_file('ssc107.contig.augfasta.gz')\n tfile = data_file('ssc107.gdna.fa.gz')\n\n qinstream = kevlar.parse_augmented_fastx(kevlar.open(qfile, 'r'))\n query = [record for record in qinstream][0]\n target = [record for record in khmer.ReadParser(tfile)][0]\n\n variants = make_call(target, query, '25D263M25D', 31)\n assert len(variants) == 2", "def call(args) :\n from caller import bam_call\n bam_call(args)", "def main():\n parser = argparse.ArgumentParser(description='MergeGVCFs and genotype them using the GATK')\n parser.add_argument('-g', '--gatk', dest='gatk', help=\"Location of the GATK\", required=True)\n parser.add_argument('-x', '--xmx', dest='xmx', help=\"Memory to use with JAVA\", required=True)\n parser.add_argument('-c', '--cores', dest='cores', help=\"Number of cores to use\")\n parser.add_argument('-o', '--output', dest='output', \n help='Final output from the haplotype caller')\n parser.add_argument('-r', '--reference', dest='reference', \n help='Reference FASTA file')\n parser.add_argument('-b','--bed', dest='bed_file',\n help=\"Bed file for limiting the GATK\")\n parser.add_argument('-p', '--ploidy', dest='ploidy', \n help=\"Sample ploidy\", default=2)\n parser.add_argument('-d', '--out_directory', dest='directory', help='Output director')\n parser.add_argument('bams', nargs=\"*\", help='gVCF variant call files output from the GATK')\n args = parser.parse_args()\n args.cores = int(args.cores)\n args.xmx = args.xmx.strip('\"')\n print args.bams\n genovcfs = haplotype_caller(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n bams=args.bams, reference=args.reference,\n out_directory=args.directory, ploidy=args.ploidy, bed_file=args.bed_file)\n outputs = merge_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n gvcfs=genovcfs, reference=args.reference)\n genotype_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n inputs=outputs, output=args.output, reference=args.reference,bed_file=args.bed_file)\n #haplotype_single(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n # inputs=args.gvcfs, reference=args.reference)", "def __call__(self, words, offset):\n for b in self._backends:\n for alt, l in b(words, offset): # yield from\n yield alt, l", "def test_call_wrapped_function(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'most_abundant',\r\n 'ChoiceF': make_most_abundant, 'ChoiceFRequiresSeqs': True})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "def __init__(\n self,\n models,\n tgt_dict,\n tgt_dict_mt,\n beam_size=1,\n beam_size_mt=1,\n max_len_a=0,\n max_len_b=200,\n max_len_a_mt=0,\n max_len_b_mt=200,\n max_len=0,\n min_len=1,\n normalize_scores=True,\n len_penalty=1.0,\n len_penalty_mt=1.0,\n unk_penalty=0.0,\n temperature=1.0,\n match_source_len=False,\n no_repeat_ngram_size=0,\n eos=None,\n eos_mt=None,\n symbols_to_strip_from_output=None,\n lm_model=None,\n lm_weight=1.0,\n ):\n super().__init__()\n\n from examples.speech_to_speech.unity.sequence_generator import SequenceGenerator\n\n self.generator = SequenceGenerator(\n models,\n tgt_dict,\n beam_size=beam_size,\n max_len_a=max_len_a,\n max_len_b=max_len_b,\n max_len=max_len,\n min_len=min_len,\n normalize_scores=normalize_scores,\n len_penalty=len_penalty,\n unk_penalty=unk_penalty,\n temperature=temperature,\n match_source_len=match_source_len,\n no_repeat_ngram_size=no_repeat_ngram_size,\n search_strategy=search.BeamSearch(tgt_dict),\n eos=eos,\n symbols_to_strip_from_output=symbols_to_strip_from_output,\n lm_model=lm_model,\n lm_weight=lm_weight,\n )\n self.eos = self.generator.eos\n\n self.generator_mt = SequenceGenerator(\n models,\n tgt_dict_mt,\n beam_size=beam_size_mt,\n max_len_a=max_len_a_mt,\n max_len_b=max_len_b_mt,\n max_len=max_len,\n min_len=min_len,\n normalize_scores=normalize_scores,\n len_penalty=len_penalty_mt,\n unk_penalty=unk_penalty,\n temperature=temperature,\n match_source_len=match_source_len,\n no_repeat_ngram_size=no_repeat_ngram_size,\n search_strategy=search.BeamSearch(tgt_dict_mt),\n eos=eos_mt,\n symbols_to_strip_from_output=symbols_to_strip_from_output,\n )", "def _yield_result_files(self, tpl, **kwargs):\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"ONP\", \"PacBio\"):\n suffix = \"_long\"\n else:\n suffix = \"\"\n yield from expand(\n tpl,\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n **kwargs\n )", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=__description__)\n\n # Positionals\n parser.add_argument(\"fasta_file\",help=\"fasta file to be turned into kmers\")\n\n # Options\n parser.add_argument(\"-o\",\"--outbase\",help=\"base name for output files\",action=\"store\",type=str,default=None)\n parser.add_argument(\"-k\",\"--kmersize\",help=\"kmer size\",action=\"store\",type=int,default=12)\n parser.add_argument(\"-s\",\"--seqperfile\",help=\"number of sequences per output file\",action=\"store\",\n type=int,default=50000)\n parser.add_argument(\"-n\",\"--numkmers\",\n help=\"Number of kmers to make, starting from most to least common. If -1, make all possible.\",\n type=int,default=1000000)\n\n args = parser.parse_args(argv)\n\n if args.outbase is None:\n out_base = args.fasta_file\n else:\n out_base = args.outbase\n\n parse_proteome(args.fasta_file,kmer_size=args.kmersize,out_base=out_base,\n seq_per_file=args.seqperfile,num_to_write=args.numkmers)", "def gene_aligner(fq1_files, smp_name, args, fq2_files=None):\n project_path = init_rnaseq_project(args['path_out'], analysis_type=1)\n gene_align_path = project_path['gene']\n\n ## qc-report\n qc_path = os.path.join(gene_align_path['report'], 'qc')\n # QC_reporter(fq1_files, qc_path).run()\n\n ## update args\n args['fq1'] = fq1_files\n args['fq2'] = fq2_files\n args['path_out'] = gene_align_path['mapping']\n args['smp_name'] = smp_name\n args['align_to_te'] = False\n\n ## run alignment\n map_bam_list = Alignment(**args).run()\n\n ## filt map_genome\n map_bam = []\n for i in map_bam_list:\n for k in i:\n if k.endswith('map_' + args['genome'] + '.bam'):\n map_bam.append(k)\n\n # # create bigWig files\n # for bam in map_bam:\n # bam2bigwig(\n # bam=bam, \n # genome=args['genome'], \n # path_out=gene_align_path['bigWig'],\n # strandness=args['s'], \n # binsize=args['bin_size'],\n # overwrite=args['overwrite']) \n\n return map_bam", "def main():\n # Get command line options\n args = get_args()\n\n # Set substitution matrix:\n if args.exchange_matrix == \"pam250\":\n exchangeMatrix = pam250\n elif args.exchange_matrix == \"blosum62\":\n exchangeMatrix = blosum62\n else:\n exchangeMatrix = identity\n\n # Read sequences from fasta file, and catch error reading file\n try:\n sequences = readSequences(open(args.fasta))\n except OSError as e:\n print(\"ERROR: cannot open or read fasta input file:\", e.filename)\n\n for seq in sequences:\n print(seq)\n\n # Call alignment routine(s):\n if args.align_global:\n alignment, score_matrix = do_global_alignment(\n sequences, exchangeMatrix, args.gap_penalty)\n elif args.align_local:\n alignment, score_matrix = do_local_alignment(\n sequences, exchangeMatrix, args.gap_penalty)\n elif args.align_semiglobal:\n alignment, score_matrix = do_semiglobal_alignment(\n sequences, exchangeMatrix, args.gap_penalty)\n else:\n sys.exit(\"BUG! this should not happen.\")\n \n\n # Print the result to files\n if args.alignment: \n print_alignment_to_file(alignment, args.alignment)\n if args.score_matrix:\n print_matrix_to_file(score_matrix, args.score_matrix)\n \n # Print the result on screen\n if args.print_on_screen:\n print_matrix_on_screen(alignment)\n print_matrix_on_screen(score_matrix)", "def main():\n arg_parser = argparse.ArgumentParser(description=\"\"\"\n This utility will take a SAM alignment file from paired end reads \n and filter the original read FASTQ files do those reads without\n high-likelihood alignments to human.\n For gzipped alignments, consider using pipes: \n gunzip -c ref.fna.gz | strip_mt_ebv.py | gzip > ref.nomtebv.fna.gz\n \"\"\")\n\n arg_parser.add_argument(\n '--alnfile', '-A',\n type=argparse.FileType('r'),\n help='Alignment File. Can be stdin. For gzip, consider pipes',\n default=sys.stdin\n )\n arg_parser.add_argument(\n '--r1in', '-1',\n required=True,\n help='Input fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2in', '-2',\n required=True,\n help='Input fastq file for R2'\n )\n arg_parser.add_argument(\n '--r1out', '-o1',\n required=True,\n help='Output fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2out', '-o2',\n required=True,\n help='Output fastq file for R2'\n )\n arg_parser.add_argument(\n '--mapq',\n default=30,\n type=int,\n help='Minimum mapq required to be considered a valid read'\n )\n arg_parser.add_argument(\n '--cov_min',\n type=float,\n default=0.9\n )\n\n args = arg_parser.parse_args()\n\n passed_ids = get_passing_ids(\n args.alnfile,\n args.mapq,\n args.cov_min,\n )\n\n filter_fastq(\n passed_ids,\n args.r1in,\n args.r2in,\n args.r1out,\n args.r2out\n )", "def main(): \n \n # parse command line arguments\n parser = argparse.ArgumentParser(description='Runs variant calling on pileup file and stores in vfc file')\n parser.add_argument('--use-read-quality', default=False, action='store_true',\n help='tells the algorithm to estimate p from read qualities')\n parser.add_argument('--call-less-positions', default=False, action='store_true',\n help='tells the program to call less positions (not whole pileup file)')\n parser.add_argument('--input-file', default='merged-normal.pileup', type=str,\n help='path to input file in pileup format')\n parser.add_argument('--output-file', default='Make name from input name', type=str,\n help='name for the output vcf file. If not given, will be created from input file name')\n parser.add_argument('--p', default='0.99', type=float,\n help='probability estimate of one nucleotide read being correct, used by vc algorithm')\n parser.add_argument('--positions-to-call', default='10000', type=int,\n help='how many positions to call if call-less-positions set to true')\n args = parser.parse_args()\n if args.output_file == 'Make name from input name':\n args.output_file = args.input_file + '.vcf'\n \n variant_caller = VariantCaller()\n sample = 'SAMPLE1'\n \n # creates vcf file\n create_vcf_start = time.time()\n vcf = create_vcf_file(args.output_file, sample)\n create_vcf_end = time.time()\n print('Vcf header created. Elapsed time: {}'.format(create_vcf_end - create_vcf_start))\n\n main_loop_start = time.time()\n position_count = 0\n variant_caller_time = 0\n positions_with_variants = 0\n write_vcf_time = 0\n\n for pileup_line in pileup_reader(args.input_file):\n # calls variant for each pileup line\n variant_caller_start = time.time()\n variant_caller.call_variant(pileup_line, args.p, args.use_read_quality)\n if pileup_line['alts'] != '.':\n positions_with_variants += 1\n variant_caller_time += time.time() - variant_caller_start\n\n # writes line in VCF file\n write_vcf_start = time.time()\n write_vcf_line(pileup_line, vcf, sample)\n write_vcf_time = time.time() - write_vcf_start\n\n position_count += 1\n if args.call_less_positions and (position_count >= args.positions_to_call):\n break\n \n main_loop_end = time.time()\n total_running_time = main_loop_end - main_loop_start\n\n print('Processed {} positions. Found variants at {} positions.'.format(position_count, positions_with_variants))\n\n print('Total running time is {}'.format(total_running_time))\n print('Pileup reader: {}'.format(total_running_time - variant_caller_time - write_vcf_time))\n print('Variant calling: {}'.format(variant_caller_time))\n print('Vcf writing: {}'.format(write_vcf_time))", "def __call__(self, *args, **kwargs):\n procedure_name = \".\".join(cypher_escape(part) for part in self.name.split(\".\"))\n arg_list = [(str(i), arg) for i, arg in enumerate(args)]\n cypher = \"CALL %s(%s)\" % (procedure_name, \", \".join(\"$\" + a[0] for a in arg_list))\n keys = kwargs.get(\"keys\")\n if keys:\n cypher += \" YIELD %s\" % \", \".join(keys)\n return self.graph.run(cypher, dict(arg_list))", "def test_call_ssc_isolated_snv(ccid, cigar, varcall):\n qfile = data_file('ssc' + ccid + '.contig.augfasta')\n tfile = data_file('ssc' + ccid + '.gdna.fa')\n\n qinstream = kevlar.parse_augmented_fastx(kevlar.open(qfile, 'r'))\n queryseqs = [record for record in qinstream]\n targetseqs = [record for record in khmer.ReadParser(tfile)]\n\n calls = list(call(targetseqs, queryseqs))\n assert len(calls) == 1\n assert str(calls[0]) == varcall", "def intronDiscovery(poolArguement):\n\n\tbamFiles, gene, chrom, start, stop, cwd = poolArguement\n\n\tprint ('processing ' + gene)\n\n\tpos = ''.join([chrom, ':', start, '-', stop])\n\n\tfor bam in bamFiles:\n\n\t\tspliceDict = {}\n\t\tgeneFilePath = (cwd + \"/\" + bam[:-4] + \"/\" + gene + \".txt\")\n\n\t\ttry:\n\t\t\texitcode, stdout, stderr = run(' '.join(['samtools view', bam, pos]))\n\t\texcept Exception as e:\n\t\t\tprint ('Exception message: ' + str(e))\n\t\t\tprint (\"Exception occured while running \\\"samtools view\\\" on \" + bam + \" for position \" + pos + \" Skipping.\")\n\t\t\tcontinue\n\n\t\tif not stdout:\n\t\t\t#print ('No introns found for ' + gene + ' at ' + pos + ' in ' + bam)\n\t\t\tcontinue\n\n\t\tfor line in stdout.splitlines():\n\n\t\t\telems = line.decode().split()\n\n\t\t\talignmentStart = int(elems[3])\n\t\t\tcigar = str(elems[5])\n\t\t\talignmentScore = int(elems[1])\n \n\t\t\tif 'N' not in cigar: \t#only get introns\n\t\t\t\tcontinue\n\n\t\t\tif (alignmentScore >= 256): \t#only primary alignments\n\t\t\t\tcontinue\n\n\t\t\tif not ((alignmentStart > int(start)) and (alignmentStart < int(stop))): \t#check if alignment start is after known junction start but before known junction end \n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\toffset, matchedExon, intronLength = parseCIGARForIntrons(cigar)\n\t\t\texcept Exception as e:\n\t\t\t\tprint ('Error message: ' + str(e))\n\t\t\t\tprint ('Error trying to parse CIGAR string: ' + cigar + ' with the bam file ' + bam + ' and the position: ' + pos + ' Skipping.')\n\t\t\t\tcontinue\n\n\t\t\tjunctionStart = alignmentStart + matchedExon + offset\n\t\t\tjunctionEnd = junctionStart + intronLength\n\n\t\t\t# Beryl Cummings' Code, taken from makeUniqSpliceDict()\n\t\t\t# uniqueSplice = ':'.join([chrom, str(junctionStart), str(junctionEnd)])\n\t\t\tuniqueSplice = (chrom, str(junctionStart), str(junctionEnd))\n\t\t\t\n\t\t\tif uniqueSplice not in spliceDict:\n\t\t\t\tspliceDict[uniqueSplice] = 1\n\t\t\telse:\n\t\t\t\tspliceDict[uniqueSplice] += 1\n\n\t\tdel stdout # saves ram in between samtool calls\n\n\t\tif spliceDict:\n\t\t\tprintSplices(geneFilePath, spliceDict)\n\t\t\tdel spliceDict\n\n\tprint ('finished ' + gene)", "def createBeamspotFinder(config=jobConfig, containerName = \"VxPrimaryCandidate\",suffix=\"\"):\n import AthenaCommon.CfgMgr as CfgMgr\n from AthenaCommon.AppMgr import ToolSvc\n from AthenaCommon.AlgSequence import AlgSequence\n topSequence = AlgSequence()\n\n # Extra options that may not be in default jobConfig\n\n if not 'MinVertexProb' in config:\n config['MinVertexProb'] = 0.01\n if not 'MaxVtxChi2' in config:\n config['MaxVtxChi2'] = 100 \n\n if not 'FixParK' in config:\n config['FixParK'] = False\n\n if not 'MaxSigmaTr' in config:\n config['MaxSigmaTr'] = 100.\n if not 'MaxVtxErrTr' in config:\n config['MaxVtxErrTr'] = 100.\n if not 'OutlierChi2Tr' in config:\n config['OutlierChi2Tr'] = 50.\n\n \n InDetBeamSpotVertex = CfgMgr.InDet__InDetBeamSpotVertex(name= 'InDetBeamSpotVertex_'+containerName+suffix,\n VertexContainer = containerName,\n VertexTypes = config['VertexTypes'],\n MinTracksPerVtx = config['MinTracksPerVtx'], \n MinVtxNum = config['MinVtxNum'],\n MaxOutlierLoops = 30,\n OutlierMaxRejection = 30,\n OutlierWidthFail= 5.1e-3, # in mm\n OutlierRhoFail = 0.8,\n DoHists = doVertexHists,\n OutputLevel = min(INFO,config['outputlevel']),\n VertexTreeName = \"Vertices_\"+containerName+suffix,\n MinVertexProb = config['MinVertexProb'],\n MaxVtxChi2 = config['MaxVtxChi2'],\n MaxSigmaTr = config['MaxSigmaTr'] ,\n MaxVtxErrTr = config['MaxVtxErrTr'] ,\n OutlierChi2Tr = config['OutlierChi2Tr']\n )\n ToolSvc += InDetBeamSpotVertex\n # Will be automatically printed as part of InDetBeamSpotFinder printout\n # print ToolSvc.InDetBeamSpotVertex\n \n # from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotDbWriterTool\n InDetBeamSpotDbWriterTool = CfgMgr.InDet__InDetBeamSpotDbWriterTool(name = 'InDetBeamSpotDbWriterTool_'+containerName+suffix,\n OutputLevel = min(INFO,config['outputlevel']),\n TreeName = \"COOLBeamspot_\"+containerName+suffix,\n Tag = containerName+suffix\n )\n ToolSvc += InDetBeamSpotDbWriterTool\n print ToolSvc.InDetBeamSpotDbWriterTool\n \n #from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotFinder as InDetBeamSpotFinder\n topSequence += CfgMgr.InDet__InDetBeamSpotFinder(name = 'InDetBeamSpotFinder_'+containerName+suffix,\n BeamSpotTool = InDetBeamSpotVertex,\n BeamSpotWriterTool = InDetBeamSpotDbWriterTool,\n MaxCount = config['MaxCount'],\n LumiRange = config['LumiRange'],\n LumiBlockRanges = config['LumiBlockRanges'],\n RunRange = config['RunRange'],\n EventRange = config['EventRange'],\n #ForceRunNumber = 52280,\n DoHists = doBeamspotHist,\n WriteDb = False,\n UseDefaultValues = True,\n #WriteFailed = True,\n Default_SigmaX = 30.0,\n Default_SigmaY = 30.0,\n Default_SigmaZ = 500.0,\n Default_SigmaXY = 0.0,\n OutputLevel = min(INFO,config['outputlevel']),\n BeamSpotRootName = \"Beamspots_\"+containerName+suffix\n )\n try:\n topSequence.InDetBeamSpotFinder.UseLBFromViewed = config['UseLBFromViewed']\n topSequence.InDetBeamSpotFinder.UseLBFromAccepted = config['UseLBFromAccepted']\n except:\n print 'ERROR: You are using an older version of InDetBeamSpotFinder - please update to InDetBeamSpotFinder-01-00-29 or later'\n print topSequence.InDetBeamSpotFinder", "def main():\n args = get_args()\n fasta = args.fasta\n kmer= args.overlap\n\n if kmer <= 1:\n die('-k \"{}\" must be a positive integer'.format(kmer))\n\n if not os.path.isfile(fasta):\n die('\"{}\" is not a file'.format(fasta))\n\n kstart={}\n kend={}\n with open(fasta, 'r') as f:\n for record in SeqIO.parse(f, \"fasta\"):\n kstart[record.id]=(find_kmers(record.seq, kmer)[0])\n kend[record.id]=(find_kmers(record.seq, kmer)[-1])\n \n for endk, endv in kend.items():\n for startk, startv in kstart.items():\n if endv in startv:\n if endk is not startk:\n print(endk, startk)", "def gene_rnaseq(args):\n log.info('running for genes')\n\n group = 'gene' # !!!!\n\n ###########################\n ## sense strand analysis ##\n ###########################\n ## control, args['c1']\n ctl_args = args.copy()\n ctl_args['align_to_te'] = False # required !!!!\n ctl_args['extra_index'] = None # required !!!!\n ctl_args['path_out'] = os.path.join(args['path_out'], args['C'])\n ctl_bam = gene_aligner(args['c1'], args['C'], ctl_args, args['c2'])\n ## count reads\n ctl_count = os.path.join(args['path_out'], args['C'], group, 'count', 'count.sens.txt')\n run_featureCounts(\n gtf=args['gtf'],\n bam_files=ctl_bam,\n out=ctl_count,\n strandness=args['s'],\n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## treatment, args['t1']\n tre_args = args.copy()\n tre_args['align_to_te'] = False # required !!!!\n tre_args['extra_index'] = None # required !!!!\n tre_args['path_out'] = os.path.join(args['path_out'], args['T'])\n tre_bam = gene_aligner(args['t1'], args['T'], tre_args, args['t2'])\n ## count reads\n tre_count = os.path.join(args['path_out'], args['T'], 'gene', 'count', 'count.sens.txt')\n run_featureCounts(\n gtf=args['gtf'], \n bam_files=tre_bam, \n out=tre_count, \n strandness=args['s'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## de analysis using DESeq2\n de_path = os.path.join(args['path_out'], args['C'] + '.vs.' + args['T'])\n deseq2_exe(\n control=ctl_count, \n treatment=tre_count, \n path_out=de_path, \n genome=args['genome'], \n nameA=args['C'], \n nameB=args['T'], \n group=group,\n path_suffix='sense')\n\n ###############################\n ## antisense strand analysis ##\n ###############################\n # determine the strandness\n if args['s'] == 2:\n args['anti_strand'] = 1\n elif args['s'] == 1:\n args['anti_strand'] = 2\n else:\n args['anti_strand'] = 0\n\n ## count reads\n ctl_count = os.path.join(args['path_out'], args['C'], group, 'count', 'count.anti.txt')\n run_featureCounts(\n gtf=args['gtf'], \n bam_files=ctl_bam, \n out=ctl_count, \n strandness=args['anti_strand'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## count reads\n tre_count = os.path.join(args['path_out'], args['T'], group, 'count', 'count.anti.txt')\n run_featureCounts(\n gtf=args['gtf'], \n bam_files=tre_bam, \n out=tre_count, \n strandness=args['anti_strand'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## de analysis using DESeq2\n de_path = os.path.join(args['path_out'], args['C'] + '.vs.' + args['T'])\n deseq2_exe(\n control=ctl_count, \n treatment=tre_count, \n path_out=de_path, \n genome=args['genome'], \n nameA=args['C'], \n nameB=args['T'], \n group=group,\n path_suffix='antisense')", "def _generic_alignment(cline, seqrecs, preserve_order=True, **kwargs):\n # convert iterator to list, so that we can extract keys and still run the alignment\n unaligned = list(seqrecs)\n # if alignment sequences from NCBI Blast, id will include spaces\n keys = [seqrec.id.split()[0] for seqrec in unaligned]\n # execute alignment\n aligned = _generic_aligner_commandline_file(cline, unaligned, **kwargs)\n if preserve_order:\n aligned = SeqIO.to_dict(aligned)\n aligned = MultipleSeqAlignment(aligned[key] for key in keys)\n # make all alignment uppercase\n return MultipleSeqAlignment([seqrec.upper() for seqrec in aligned])", "def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)", "def run_sims_foreach_allele(params_foreach_allele_dirpath, output_dirpath, start_popsize, max_popsize, gens, trials, seq_error_dirpath, use_norm_approx):\n\n ################ hard_coded_parameters ###################\n #this gives the selection coefficient. These are 'neutral'\n #simulations, so this is set at 0, but if one wants to \n #simulate alleles under selection, then change this\n #accordingly\n s = 0.0\n ################ hard_coded_parameters ###################\n\n if seq_error_dirpath[-1] != '/':\n seq_error_dirpath += '/'\n if output_dirpath[-1] != '/':\n output_dirpath += '/'\n if params_foreach_allele_dirpath[-1] != '/':\n params_foreach_allele_dirpath += '/'\n if not os.path.exists(output_dirpath):\n os.makedirs(output_dirpath)\n\n #get error rate for each possible amino acid\n error_rate_dic = estimate_error(seq_error_dirpath)\n\n #now get the population size at each\n #generation\n pop_sizes = get_pop_sizes(start_popsize, max_popsize, gens)\n\n #choose the correct parameter file based upon\n #the value of the global variable 'SGE_TASK_ID'\n param_filepaths = []\n for i in os.listdir(params_foreach_allele_dirpath):\n if i == 'README' or i[0] == '.':\n continue\n param_filepaths.append(params_foreach_allele_dirpath + i)\n sge_task_id = int(os.environ['SGE_TASK_ID'])\n param_filepath = param_filepaths[sge_task_id-1]\n\n #read in parameters from the parameter\n #file\n filein = open(param_filepath, \"r\")\n filein.readline()#burn header\n line = filein.readline()[:-1].split('\\t')\n filein.close()\n gene = line[0]\n position = line[1]\n derived_allele = line[3]\n rep = line[4]\n start_allele_freq = line[5]\n ending_num_reads = line[6]\n\n #check if sufficient data in param file\n if start_allele_freq == 'NA' or ending_num_reads == 'NA':\n output_filepath = '%s%s_%s_%s_%s' % (output_dirpath, gene, derived_allele, position, rep)\n fileout = open(output_filepath, \"w\")\n fileout.write(\"No simulations because start allele freq and/or ending number of reads was 'NA'.\\nnull\\n\")\n fileout.close()\n return\n else:\n start_allele_freq = float(start_allele_freq)\n ending_num_reads = float(ending_num_reads)\n #check if the ending number of reads is zero\n if ending_num_reads == 0.0:\n output_filepath = '%s%s_%s_%s_%s' % (output_dirpath, gene, derived_allele, position, rep)\n fileout = open(output_filepath, \"w\")\n fileout.write(\"No simulations because the ending number of reads was 0 so the ending allele frequency is undefined.\\nnull\\n\")\n fileout.close()\n return\n\n #this is the mutation rate that the given allele will mutate\n #to no longer be that allele\n error_rate_from = error_rate_dic[derived_allele][0]\n #this is the mutation rate where other alleles will mutate\n #to become the given allele\n error_rate_to = error_rate_dic[derived_allele][1]\n\n #define the output filepath based on allele identity\n output_filepath = '%s%s_%s_%s_%s' % (output_dirpath, gene, derived_allele, position, rep)\n\n #run simulation!\n run_sims(output_filepath, pop_sizes, trials, error_rate_from, error_rate_to, start_allele_freq, ending_num_reads, use_norm_approx, s)\n\n return", "def enrichKeywords(self, result):\n\n # TODO: Implement function\n pass", "def run(self):\n contig_file = self.data.contigfiles[0]\n reads = self.data.readfiles\n\n ## Index contigs using IS algorithm\n prefix = os.path.join(self.outpath, 'bt2')\n cmd_args = [self.build_bin, '-f', contig_file, prefix]\n self.arast_popen(cmd_args, overrides=False)\n\n ## Align reads\n samfile = os.path.join(self.outpath, 'align.sam')\n cmd_args = [self.executable, '-x', prefix, '-S', samfile,\n '-p', self.process_threads_allowed]\n if len(reads) == 2:\n cmd_args += ['-1', reads[0], '-2', reads[1]]\n elif len(reads) == 1:\n cmd_args += ['-U', reads[0]]\n else:\n raise Exception('Bowtie plugin error')\n self.arast_popen(cmd_args, overrides=False)\n\n if not os.path.exists(samfile):\n raise Exception('Unable to complete alignment')\n return {'alignment': samfile}", "def matchmaker(samfile, semaphore=None):\n #reader = DictReader(samfile)\n labels = ['qname', 'flag', 'rname', 'pos', 'mapq', 'cigar', 'rnext', 'pnext',\n 'tlen', 'seq', 'qual']\n cached_rows = {}\n for line in samfile:\n if line.startswith('@'):\n continue # skip header line\n if 'HCV' not in line:\n continue # skip reads that mapped to another reference\n\n items = line.strip('\\n').split('\\t')\n row = dict(zip(labels, items[:11]))\n qname = row['qname']\n old_row = cached_rows.pop(qname, None)\n if old_row is None:\n cached_rows[qname] = row\n else:\n if semaphore is not None:\n semaphore.acquire()\n # current row should be the second read of the pair\n yield old_row, row", "def testGetSequence():\r\n\t\r\n\t#a few of hand-tested genome positions\r\n\ttest_data = [\t('1',500,520,'GTCTGACCTGAGGAGAACTGT'),\r\n\t\t\t\t\t('2',500,520,'CCCGACCCCGACCCCGACCCA'),\r\n\t\t\t\t\t('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'),\r\n\t\t\t\t\t('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'),\r\n\t\t\t\t\t('5',100000,100020,'AATGTTCACCAGTATATTTTA'),\r\n\t\t\t\t\t('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'),\r\n\t\t\t\t\t('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')]\r\n\t\t\t\t\t\r\n\tfor this_check in test_data:\r\n\t\tyield CheckGetSequence, this_check", "def main(argv=None):\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--version\", action='version', version=\"1.0\")\n\n parser.add_argument(\"-m\", \"--merge-pairs\", dest=\"merge_pairs\",\n action=\"store_true\",\n help=\"merge paired-ended reads and output interval \"\n \"for entire fragment. \")\n\n parser.add_argument(\"--max-insert-size\", dest=\"max_insert_size\", type=int,\n help=\"only merge paired-end reads if they are less than \"\n \"# bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--min-insert-size\", dest=\"min_insert_size\", type=int,\n help=\"only merge paired-end reads if they are at \"\n \"least # bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--bed-format\", dest=\"bed_format\", type=str,\n choices=('3', '4', '5', '6'),\n help=\"bed format to output. \")\n\n parser.set_defaults(\n region=None,\n call_peaks=None,\n merge_pairs=None,\n min_insert_size=0,\n max_insert_size=0,\n bed_format='6',\n )\n\n (args, unknown) = E.start(parser, argv=argv, unknowns=True)\n\n if len(unknown) == 0:\n unknown.append(\"-\")\n\n samfile = pysam.AlignmentFile(unknown[0], \"rb\")\n\n args.bed_format = int(args.bed_format)\n\n if args.merge_pairs is not None:\n counter = merge_pairs(samfile,\n args.stdout,\n min_insert_size=args.min_insert_size,\n max_insert_size=args.max_insert_size,\n bed_format=args.bed_format)\n\n E.info(\"category\\tcounts\\n%s\\n\" % counter.asTable())\n\n else:\n # use until_eof. Files from stdin have no index\n it = samfile.fetch(until_eof=True)\n\n # more comfortable cigar parsing will\n # come with the next pysam release\n BAM_CMATCH = 0\n BAM_CDEL = 2\n BAM_CREF_SKIP = 3\n take = (BAM_CMATCH, BAM_CDEL, BAM_CREF_SKIP)\n outfile = args.stdout\n\n for read in it:\n if read.is_unmapped:\n continue\n\n t = 0\n for op, l in read.cigar:\n if op in take:\n t += l\n\n if read.is_reverse:\n strand = \"-\"\n else:\n strand = \"+\"\n outfile.write(\"%s\\t%d\\t%d\\t%s\\t%d\\t%c\\n\" %\n (read.reference_name,\n read.pos,\n read.pos + t,\n read.qname,\n read.mapq,\n strand))\n\n E.stop()" ]
[ "0.55534565", "0.49242163", "0.48804748", "0.48293254", "0.4763826", "0.47369397", "0.46600562", "0.4650148", "0.45753592", "0.4518727", "0.45137888", "0.4498816", "0.44679454", "0.44503778", "0.44378328", "0.4428958", "0.44179434", "0.440283", "0.4389591", "0.43709272", "0.43639746", "0.43605042", "0.43510962", "0.43364745", "0.43050486", "0.43045908", "0.4292884", "0.42903593", "0.42783946", "0.4276091" ]
0.61551183
0
Return a new instance of the Network class.
def new_network(): new_names = Names() new_devices = Devices(new_names) return Network(new_names, new_devices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def getInstance():\n return net()", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def get_network(self, red=r):\n bbid_dict = {'bbid': self.bbid,\n 'min_lat': self.min_lat,\n 'max_lat': self.max_lat,\n 'min_lon': self.min_lon,\n 'max_lon': self.max_lon\n }\n return Network(bbid=bbid_dict, r=red)", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def _build_network(self):\n pass", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def get_network(self):\n\n # Find which nodes are input and which are output. We may want to store\n # this info somewhere else (like in the genome)\n\n inputs = []\n outputs = []\n bias = []\n edges = []\n node_num = dict() #Map from node_id to zero index node number\n\n for i, node in enumerate(self.node_genes):\n # Create mapping\n node_num[node.node_id] = i\n\n # Store input and output node_numbers\n if node.node_type is INPUT:\n inputs.append(i)\n elif node.node_type is OUTPUT:\n outputs.append(i)\n elif node.node_type is BIAS:\n bias.append(i)\n\n # Create edge list.\n for link in self.link_genes:\n if link.enabled:\n edges.append((node_num[link.to_node.node_id],\n node_num[link.from_node.node_id], link.weight))\n\n\n # Build an adjacency matrix for the network\n n = len(node_num)\n adj_matrix = np.zeros((n, n))\n try:\n for e in edges:\n adj_matrix[e[:2]] = e[2]\n except:\n global GENOME\n GENOME = self\n print([node.node_id for node in self.node_genes])\n print()\n print('len(node_genes)', len(self.node_genes))\n print('edge', e)\n print('adj.shape', adj_matrix.shape)\n sys.exit()\n\n return Network(adj_matrix, inputs, outputs, bias)", "def clone(self) -> \"Graph\":\n return Graph(seed=self.seed,\n layout=self.layout,\n community_n=self.community_n,\n community_size_mean=self.community_size_mean,\n community_size_std=self.community_size_std,\n community_p_in=self.community_p_in,\n community_p_out=self.community_p_out,\n considered_immune_threshold=self.considered_immune_threshold)", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net", "def test_create_network():\n _network = Network()", "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def __init__(self, network: Network):\n self.graph = network.graph", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def __deepcopy__(self, memodict={}):\n nodes = [deepcopy(n) for n in self.nodes]\n return Network(nodes)", "def new(cls):\n return cls()", "def New():\n Self = $classname()\n Self._initialize_()\n Self._update_()\n return Self", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def network_with_devices():\n new_names = Names()\n new_devices = Devices(new_names)\n new_network = Network(new_names, new_devices)\n\n [SW1_ID, SW2_ID, OR1_ID] = new_names.lookup([\"Sw1\", \"Sw2\", \"Or1\"])\n\n # Add devices\n new_devices.make_device(SW1_ID, new_devices.SWITCH, 0)\n new_devices.make_device(SW2_ID, new_devices.SWITCH, 0)\n new_devices.make_device(OR1_ID, new_devices.OR, 2)\n\n return new_network", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def __init__(self, network=None, additional_info=None): # noqa: E501 # noqa: E501\n self._network = None\n self._additional_info = None\n self.discriminator = None\n self.network = network\n self.additional_info = additional_info" ]
[ "0.76890683", "0.7565279", "0.72739667", "0.72504044", "0.72160345", "0.70732576", "0.69950795", "0.6969925", "0.690757", "0.68565255", "0.67459494", "0.6560776", "0.65384907", "0.6508981", "0.6470703", "0.6434769", "0.64018965", "0.63492566", "0.63455343", "0.6334854", "0.63262504", "0.6294186", "0.6258903", "0.6232228", "0.6215313", "0.62059283", "0.61820537", "0.6155272", "0.61416316", "0.612815" ]
0.82193184
0
Return a Network class instance with three devices in the network.
def network_with_devices(): new_names = Names() new_devices = Devices(new_names) new_network = Network(new_names, new_devices) [SW1_ID, SW2_ID, OR1_ID] = new_names.lookup(["Sw1", "Sw2", "Or1"]) # Add devices new_devices.make_device(SW1_ID, new_devices.SWITCH, 0) new_devices.make_device(SW2_ID, new_devices.SWITCH, 0) new_devices.make_device(OR1_ID, new_devices.OR, 2) return new_network
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def get_network(self):\n\n # Find which nodes are input and which are output. We may want to store\n # this info somewhere else (like in the genome)\n\n inputs = []\n outputs = []\n bias = []\n edges = []\n node_num = dict() #Map from node_id to zero index node number\n\n for i, node in enumerate(self.node_genes):\n # Create mapping\n node_num[node.node_id] = i\n\n # Store input and output node_numbers\n if node.node_type is INPUT:\n inputs.append(i)\n elif node.node_type is OUTPUT:\n outputs.append(i)\n elif node.node_type is BIAS:\n bias.append(i)\n\n # Create edge list.\n for link in self.link_genes:\n if link.enabled:\n edges.append((node_num[link.to_node.node_id],\n node_num[link.from_node.node_id], link.weight))\n\n\n # Build an adjacency matrix for the network\n n = len(node_num)\n adj_matrix = np.zeros((n, n))\n try:\n for e in edges:\n adj_matrix[e[:2]] = e[2]\n except:\n global GENOME\n GENOME = self\n print([node.node_id for node in self.node_genes])\n print()\n print('len(node_genes)', len(self.node_genes))\n print('edge', e)\n print('adj.shape', adj_matrix.shape)\n sys.exit()\n\n return Network(adj_matrix, inputs, outputs, bias)", "def _connection_maker(\n self,\n first_device,\n first_port,\n second_device,\n second_port):\n if first_port is None:\n return self.network.make_connection(\n first_device.id, None,\n second_device.id, second_port.id)\n else:\n return self.network.make_connection(\n first_device.id, first_port.id,\n second_device.id, second_port.id)", "def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):\r\n network = {}\r\n macs = self.get('%s_mac' % kind)\r\n network['mac_addresses'] = macs\r\n\r\n if len(macs) == 0:\r\n return network\r\n\r\n if router:\r\n network['router'] = self.get('router', macs[0])\r\n\r\n if vlans:\r\n network['vlans'] = self.get('vlans', macs[0])\r\n\r\n if vlan_ids:\r\n network['vlan_ids'] = self.get('vlan_ids', macs[0])\r\n\r\n return network", "def copy(self):\n return MultiterminalDevice(\n self.center.copy(),\n list(i.copy() for i in self.leads),\n list(i.copy() for i in self.connections),\n )", "def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def cc(self):\n return MultiterminalDevice(\n self.center.cc(),\n list(i.cc() for i in self.leads),\n list(i.conj() for i in self.connections),\n )", "def create_network(num_nodes=8, num_assets=1, channels_per_node=3, transport_class=None):\n # pylint: disable=too-many-locals\n\n # TODO: check if the loopback interfaces exists\n\n random.seed(1337)\n\n if channels_per_node > num_nodes:\n raise ValueError(\"Can't create more channels than nodes\")\n\n client_hosts = ['127.0.0.10', '127.0.0.11']\n\n # if num_nodes it is not even\n half_of_nodes = int(ceil(num_nodes / 2))\n\n # globals\n discovery = PredictiveDiscovery((\n (host, half_of_nodes, INITIAL_PORT)\n for host in client_hosts\n ))\n\n # The mock needs to be atomic since all app's will use the same instance,\n # for the real application the syncronization is done by the JSON-RPC\n # server\n blockchain_service = BlockChainServiceMock()\n\n # Each app instance is a Node in the network\n apps = []\n for host in client_hosts:\n for idx in range(half_of_nodes):\n port = INITIAL_PORT + idx\n\n app = mk_app(\n blockchain_service,\n discovery,\n transport_class or UDPTransport,\n port=port,\n host=host,\n )\n\n apps.append(app)\n\n for i in range(num_assets):\n asset_address = sha3('asset:%d' % i)[:20]\n blockchain_service.new_channel_manager_contract(asset_address=asset_address)\n\n asset_list = blockchain_service.asset_addresses\n assert len(asset_list) == num_assets\n\n create_network_channels(blockchain_service, asset_list, apps, channels_per_node)\n\n for app in apps:\n for asset_address in asset_list:\n app.raiden.setup_asset(asset_address, app.config['reveal_timeout'])\n\n return apps", "def get_network(self, red=r):\n bbid_dict = {'bbid': self.bbid,\n 'min_lat': self.min_lat,\n 'max_lat': self.max_lat,\n 'min_lon': self.min_lon,\n 'max_lon': self.max_lon\n }\n return Network(bbid=bbid_dict, r=red)", "def create_netatmo_connection(self) -> ty.Tuple[lnetatmo.WeatherStationData, NetatmoDomain]:\n auth = lnetatmo.ClientAuth(clientId=self.client_id,\n clientSecret=self.client_secret,\n username=self.username,\n password=self.password,\n scope='read_station')\n device_data = lnetatmo.WeatherStationData(auth)\n domain = NetatmoDomain(device_data.stations)\n\n return device_data, domain", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def network_instances(self) -> Iterator[NetworkInstance]:\n return self._get_related_instance(NetworkInstance, \"l3-network\")", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def test_make_connection(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n or1 = devices.get_device(OR1_ID)\n\n # or1 inputs are initially unconnected\n assert or1.inputs == {I1: None,\n I2: None}\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # or1 inputs should now be connected\n assert or1.inputs == {I1: (SW1_ID, None),\n I2: (SW2_ID, None)}", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def getMoreComplexThreeLayerGraph(self):\n makeLayer = self.makeLayer\n graph = self.graph\n addNodesToLayer = self.addNodesToLayer\n addPortOnSide = self.addPortOnSide\n addEdgeBetweenPorts = self.addEdgeBetweenPorts\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer()\n middleLayer = makeLayer()\n rightLayer = makeLayer()\n\n leftNodes = addNodesToLayer(3, leftLayer)\n middleNodes = addNodesToLayer(2, middleLayer)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n leftMiddleNodePort = addPortOnSide(leftNodes[1], PortSide.EAST)\n middleLowerNodePortEast = addPortOnSide(middleNodes[1], PortSide.EAST)\n middleUpperNodePortEast = addPortOnSide(middleNodes[0], PortSide.EAST)\n rightUpperNodePort = addPortOnSide(rightNodes[0], PortSide.WEST)\n rightMiddleNodePort = addPortOnSide(rightNodes[1], PortSide.WEST)\n\n addEdgeBetweenPorts(middleUpperNodePortEast, rightUpperNodePort)\n addEdgeBetweenPorts(middleUpperNodePortEast, rightMiddleNodePort)\n addEdgeBetweenPorts(middleUpperNodePortEast, rightMiddleNodePort)\n eastWestEdgeFromTo(middleLowerNodePortEast, rightNodes[2])\n eastWestEdgeFromTo(leftMiddleNodePort, middleNodes[0])\n eastWestEdgeFromTo(middleNodes[1], rightUpperNodePort)\n eastWestEdgeFromTo(leftMiddleNodePort, middleNodes[1])\n eastWestEdgeFromTo(leftNodes[2], middleNodes[0])\n eastWestEdgeFromTo(leftNodes[0], middleNodes[0])\n\n return graph", "def get_network(self) -> EthereumNetwork:\n return EthereumNetwork(int(self.w3.net.version))", "def init_network() -> dict:\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network", "def get_network_devices(user, passwd, base_api_url):\n network_devices = ''\n response = connect_to_idrac(user, passwd, base_api_url)\n if response and response.json():\n network_devices_info = response.json()\n try:\n network_devices = network_devices_info[u'Members']\n except KeyError:\n network_devices = ''\n get_user_response(message='could not get network devices info')\n else:\n get_user_response(message='idrac connection status code is 401')\n\n return network_devices", "def _build_network(self):\n pass", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def test_03(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_03\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"3-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network,\n destination=Address(\"1:*\"),\n ).doc(\"3-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"3-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"3-2-1\") \\\n .success()\n\n # network 2 sees nothing\n tnet.sniffer2.start_state.doc(\"3-3-0\") \\\n .timeout(10).doc(\"3-3-1\") \\\n .success()\n\n # network 3 sees nothing\n tnet.sniffer3.start_state.doc(\"3-4-0\") \\\n .timeout(10).doc(\"3-4-1\") \\\n .success()\n\n # run the group\n tnet.run()", "def network(self):\n return self.__network", "def macro_network():\n # fmt: off\n tpm = np.array([\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 1.0, 1.0],\n ])\n # fmt: on\n return Network(tpm, node_labels=LABELS[:tpm.shape[1]])", "def getInstance():\n return net()", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def get_devices(self):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevice.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/networkdevice'.format(self.url_base))\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tjson_res = ERS._to_json(resp.text)['ns3:searchResult']\n\n\t\tif resp.status_code == 200 and int(json_res['@total']) > 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(i['@name'], i['@id'])\n\t\t\t\t\t\t\t\t for i in json_res['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(json_res['ns3:resources']['ns5:resource']['@name'],\n\t\t\t\t\t\t\t\t json_res['ns3:resources']['ns5:resource']['@id'])]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 0:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = []\n\t\t\treturn result\n\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result" ]
[ "0.62895656", "0.5824083", "0.5780455", "0.5759089", "0.5730831", "0.5706215", "0.57014054", "0.5680786", "0.56487197", "0.56329834", "0.55932325", "0.5591472", "0.55858415", "0.55215573", "0.5447525", "0.5435627", "0.5412156", "0.5403432", "0.5399039", "0.537979", "0.53570205", "0.535587", "0.53442913", "0.52786094", "0.52759033", "0.52685356", "0.52645594", "0.52600753", "0.522795", "0.5224965" ]
0.7487247
0
Test if execute_network returns the correct output for XOR gates.
def test_execute_xor(new_network): network = new_network devices = network.devices names = devices.names [SW1_ID, SW2_ID, XOR1_ID, I1, I2] = names.lookup( ["Sw1", "Sw2", "Xor1", "I1", "I2"]) # Make devices devices.make_device(XOR1_ID, devices.XOR) devices.make_device(SW1_ID, devices.SWITCH, 0) devices.make_device(SW2_ID, devices.SWITCH, 0) # Make connections network.make_connection(SW1_ID, None, XOR1_ID, I1) network.make_connection(SW2_ID, None, XOR1_ID, I2) network.execute_network() assert new_network.get_output_signal(XOR1_ID, None) == devices.LOW # Set Sw1 to HIGH devices.set_switch(SW1_ID, devices.HIGH) network.execute_network() assert network.get_output_signal(XOR1_ID, None) == devices.HIGH # Set Sw2 to HIGH devices.set_switch(SW2_ID, devices.HIGH) network.execute_network() assert network.get_output_signal(XOR1_ID, None) == devices.LOW
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_execute_non_xor_gates(new_network, gate_id, switch_outputs,\n gate_output, gate_kind):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [AND1_ID, OR1_ID, NAND1_ID, NOR1_ID, SW1_ID, SW2_ID, SW3_ID, I1, I2,\n I3] = names.lookup([\"And1\", \"Or1\", \"Nand1\", \"Nor1\", \"Sw1\", \"Sw2\", \"Sw3\",\n \"I1\", \"I2\", \"I3\"])\n\n LOW = devices.LOW\n HIGH = devices.HIGH\n\n # Make devices\n gate_id = eval(gate_id)\n gate_kind = eval(gate_kind)\n devices.make_device(gate_id, gate_kind, 3)\n devices.make_device(SW1_ID, devices.SWITCH, 0)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n devices.make_device(SW3_ID, devices.SWITCH, 0)\n\n # Make connections\n network.make_connection(SW1_ID, None, gate_id, I1)\n network.make_connection(SW2_ID, None, gate_id, I2)\n network.make_connection(SW3_ID, None, gate_id, I3)\n\n # Set switches\n switches = [SW1_ID, SW2_ID, SW3_ID]\n for i, switch_output in enumerate(switch_outputs):\n devices.set_switch(switches[i], eval(switch_output))\n\n network.execute_network()\n assert network.get_output_signal(gate_id, None) == eval(gate_output)", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def eval_genome(genome, config):\n\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n error = 4.0\n for xi, xo in zip(xor_inputs, xor_outputs):\n output = net.activate(xi)\n error -= (output[0] - xo[0]) ** 2\n return error", "def test_execute_non_gates(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n LOW = devices.LOW\n HIGH = devices.HIGH\n\n # Make different devices\n [SW1_ID, SW2_ID, SW3_ID, CL_ID, D_ID] = names.lookup([\"Sw1\", \"Sw2\", \"Sw3\",\n \"Clock1\", \"D1\"])\n devices.make_device(SW1_ID, devices.SWITCH, 1)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n devices.make_device(SW3_ID, devices.SWITCH, 0)\n devices.make_device(CL_ID, devices.CLOCK, 1)\n devices.make_device(D_ID, devices.D_TYPE)\n\n # Make connections\n network.make_connection(SW1_ID, None, D_ID, devices.DATA_ID)\n network.make_connection(CL_ID, None, D_ID, devices.CLK_ID)\n network.make_connection(SW2_ID, None, D_ID, devices.SET_ID)\n network.make_connection(SW3_ID, None, D_ID, devices.CLEAR_ID)\n\n # Get device outputs, the expression is in a string here so that it\n # can be re-evaluated again after executing devices\n sw1_output = \"network.get_output_signal(SW1_ID, None)\"\n sw2_output = \"network.get_output_signal(SW2_ID, None)\"\n sw3_output = \"network.get_output_signal(SW3_ID, None)\"\n clock_output = \"network.get_output_signal(CL_ID, None)\"\n dtype_Q = \"network.get_output_signal(D_ID, devices.Q_ID)\"\n dtype_QBAR = \"network.get_output_signal(D_ID, devices.QBAR_ID)\"\n\n # Execute devices until the clock is LOW at the start of its\n # period\n clock_device = devices.get_device(CL_ID)\n network.execute_network()\n while clock_device.clock_counter != 1 or eval(clock_output) != LOW:\n network.execute_network()\n\n # The clock is not rising yet, Q could be (randomly) HIGH or LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output)] == [HIGH, LOW, LOW, LOW]\n\n assert eval(dtype_Q) in [HIGH, LOW]\n assert eval(dtype_QBAR) == network.invert_signal(eval(dtype_Q))\n\n network.execute_network() # the clock has risen\n # While sw1(DATA) is high, Q has now changed to HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, LOW) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, HIGH) # Sw2 is connected to SET\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is LOW, and the clock is rising,\n # sw2(SET) is HIGH, so Q is HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n LOW, HIGH, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, HIGH) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, LOW) # Sw2 is connected to SET\n devices.set_switch(SW3_ID, HIGH) # Sw3 is connected to CLEAR\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is HIGH, and the clock is rising,\n # sw3(CLEAR) is HIGH, so Q is LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, HIGH, HIGH, LOW, HIGH]", "def test_bit_xor(self):\n value = bytearray([1])\n ops = [bitwise_operations.bit_xor(self.test_bin_ones, 0, 8, 1, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [1] * 4)\n assert bins[self.test_bin_ones] == expected_result", "def tests_truth():\n circ_m = ccxtest(4)\n print(circ_m)\n circ_m = crootnxtest(4)\n print(circ_m)\n circ_m = oracletest(4)\n print(circ_m)\n circ_m = ccx_otest(4)\n print(circ_m)", "def fit_xor(network,complexity=False):\n samples = 4\n xor_inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]\n xor_outputs = [0, 1, 1, 0]\n\n\n sum_square_error = 0.0\n for inputs, expected in zip(xor_inputs, xor_outputs):\n # Serial activation propagates the inputs through the entire network.\n output = network.activate(inputs)\n sum_square_error += (output[0] - expected) ** 2\n\n # When the output matches expected for all inputs, fitness will reach\n # its maximum value of 1.0.\n fitness = 4 - sum_square_error\n\n if complexity:\n # Add to the fitness if the inputs are connected to the outputs\n G = DiGraph(network.A.T)\n if has_path(G,0,3) and has_path(G,1,3):\n fitness += con_weight\n\n # Add to fitness if there are more than 3 edges\n if (network.A != 0).sum() > 3:\n fitness += .10\n\n return fitness", "def verify_output(self, output):\n return output == self.output", "def test_network(neural_network, test_data):\n total_trials = 0\n correct_trials = 0\n output_values = [np.argmax(neural_network.calculate_output(vector[1])) for vector in test_data]\n expected_values = list(zip(*test_data))[0]\n for expected, recieved in zip(expected_values,output_values):\n total_trials += 1\n if expected == recieved:\n correct_trials+=1\n return correct_trials/total_trials", "def test_oscillating_network(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [NOR1, I1] = names.lookup([\"Nor1\", \"I1\"])\n # Make NOR gate\n devices.make_device(NOR1, devices.NOR, 1)\n\n # Connect the NOR gate to itself\n network.make_connection(NOR1, None, NOR1, I1)\n\n assert not network.execute_network()", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def test_get_connected_output(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n # Inputs are unconnected, get_connected_output should return None\n assert network.get_connected_output(OR1_ID, I1) is None\n assert network.get_connected_output(OR1_ID, I2) is None\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n assert network.get_connected_output(OR1_ID, I1) == (SW1_ID, None)\n assert network.get_connected_output(OR1_ID, I2) == (SW2_ID, None)\n\n # Not a valid port for Sw1, get_connected_output should return None\n assert network.get_connected_output(SW1_ID, I2) is None", "def test_cnot():\n\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n X q0\n CNOT q0 q1\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.0, 0.0, 0.0, 1.0]).all()", "def xor_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 0],\n [1, 0, 1],\n [0, 1, 1],\n [0, 0, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def test_others(self):\n outputAssert = self.buildingTests([\"Hola me gust@ programar en ICC 1.03\"])\n self.assertTrue((outputAssert[0][4] == outputAssert[1][4] and outputAssert[0][5] == outputAssert[1][5]) ^ (outputAssert[0][4] == outputAssert[1][5]) , f\"El resultado debería ser: \\\"{outputAssert[1][5]}\\\"\")", "def test_check_network(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n # Inputs are unconnected, check_network() should return False\n assert not network.check_network()\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Inputs are now connected, check_network() should return True\n assert network.check_network()", "def graph_helper(device, output,input,target):\n output = output.clone().squeeze()\n corrects = torch.zeros(output.shape[0])\n for i in range(output.shape[0]): # goes through each iteration\n outputi = output[i]\n golden_label = convert_to_bits(device, outputi, input)\n target = target.view(target.size(0), -1)\n corrects[i] += torch.amin(golden_label == target, dim=[0]).sum().item() # counts the number that are the same i.e. correct predictions\n correct = corrects.cpu().detach().numpy()\n return correct", "def neural_result(self, input):\n n_output = self.network.activate(input)\n if n_output >= 0.5:\n return 2\n else:\n return 1", "def evaluate(net, data_loader):\n correct = 0\n total = 0\n net.reset()\n for data in tqdm(data_loader):\n inputs, output = data\n mask, score = gate_activation(net, inputs.view(-1))\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n print()\n print()\n print(\"mask\", mask)\n print(\"score\", score)\n print(\"xo\", xo)\n total += 1\n correct += ((xo > 0.5) == output[0].item())\n\n return float(correct)/total", "def test_XOR():\n\tk, outputs = 2, [0,1,1,0]\n\n\ttrue_pi0s = set(['00','11'])\n\ttrue_pi1s = set(['01','10'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('11',[],[[0,1]]),('00',[],[[0,1]])]\n\ttrue_ts1s = [('10',[[0,1]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def test_bit_xor_bad_arg(self):\n value = 1\n ops = [bitwise_operations.bit_xor(\"bad_name\", 0, 8, 1, value, None)]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(self.test_key, ops)", "def test_bit_xor_multiple_bytes(self):\n value = bytearray([8] * 5)\n ops = [bitwise_operations.bit_xor(self.five_255_bin, 0, 40, 5, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([247] * 5)\n assert bins[self.five_255_bin] == expected_result", "def test_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n register q2[2]\n register q3[3]\n X q2\n CNOT q2 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array)\n assert isclose(result, [1.0, 0.0, 1.0, 0.0]).all()", "def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")", "def test_network(bpn, test_data):\n DisplayNetwork.display_green(\"[INFO] Started to test the network\")\n output = bpn.Run(np.array(test_data))\n return output", "def test_bit_xor_multiple_bytes_value_unchanged(self):\n value = bytearray([0])\n ops = [bitwise_operations.bit_xor(self.test_bin_zeroes, 7, 8, 1, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 5)\n assert bins[self.test_bin_zeroes] == expected_result", "def get_connection():\n input_layer = layers.SigmoidLayer(2, weight=default_weight.copy())\n output_layer = layers.OutputLayer(1)\n return input_layer > output_layer", "def check_input_matches_expected_output(in_, out):\n ...", "def test_normal_goes_normal(self):\n eq_(self.msg, output(self.msg,\"OUTPUT\"))", "def xor_fault(a, b, out, fault):\n if (a != b) == out:\n return fault == 0\n else:\n return fault == 1" ]
[ "0.7108362", "0.64456266", "0.62483436", "0.6179121", "0.5954281", "0.5924266", "0.579673", "0.5786133", "0.5775739", "0.5742536", "0.57371473", "0.57168096", "0.56857705", "0.5650925", "0.56489795", "0.55563605", "0.54900867", "0.5481334", "0.5473376", "0.5467943", "0.54622275", "0.5449864", "0.5443937", "0.543103", "0.5414737", "0.53792274", "0.5370164", "0.536373", "0.53494996", "0.5346792" ]
0.75833476
0
Test if execute_network returns the correct output for nonXOR gates.
def test_execute_non_xor_gates(new_network, gate_id, switch_outputs, gate_output, gate_kind): network = new_network devices = network.devices names = devices.names [AND1_ID, OR1_ID, NAND1_ID, NOR1_ID, SW1_ID, SW2_ID, SW3_ID, I1, I2, I3] = names.lookup(["And1", "Or1", "Nand1", "Nor1", "Sw1", "Sw2", "Sw3", "I1", "I2", "I3"]) LOW = devices.LOW HIGH = devices.HIGH # Make devices gate_id = eval(gate_id) gate_kind = eval(gate_kind) devices.make_device(gate_id, gate_kind, 3) devices.make_device(SW1_ID, devices.SWITCH, 0) devices.make_device(SW2_ID, devices.SWITCH, 0) devices.make_device(SW3_ID, devices.SWITCH, 0) # Make connections network.make_connection(SW1_ID, None, gate_id, I1) network.make_connection(SW2_ID, None, gate_id, I2) network.make_connection(SW3_ID, None, gate_id, I3) # Set switches switches = [SW1_ID, SW2_ID, SW3_ID] for i, switch_output in enumerate(switch_outputs): devices.set_switch(switches[i], eval(switch_output)) network.execute_network() assert network.get_output_signal(gate_id, None) == eval(gate_output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_execute_xor(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, XOR1_ID, I1, I2] = names.lookup(\n [\"Sw1\", \"Sw2\", \"Xor1\", \"I1\", \"I2\"])\n\n # Make devices\n devices.make_device(XOR1_ID, devices.XOR)\n devices.make_device(SW1_ID, devices.SWITCH, 0)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n\n # Make connections\n network.make_connection(SW1_ID, None, XOR1_ID, I1)\n network.make_connection(SW2_ID, None, XOR1_ID, I2)\n\n network.execute_network()\n assert new_network.get_output_signal(XOR1_ID, None) == devices.LOW\n\n # Set Sw1 to HIGH\n devices.set_switch(SW1_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.HIGH\n\n # Set Sw2 to HIGH\n devices.set_switch(SW2_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.LOW", "def test_execute_non_gates(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n LOW = devices.LOW\n HIGH = devices.HIGH\n\n # Make different devices\n [SW1_ID, SW2_ID, SW3_ID, CL_ID, D_ID] = names.lookup([\"Sw1\", \"Sw2\", \"Sw3\",\n \"Clock1\", \"D1\"])\n devices.make_device(SW1_ID, devices.SWITCH, 1)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n devices.make_device(SW3_ID, devices.SWITCH, 0)\n devices.make_device(CL_ID, devices.CLOCK, 1)\n devices.make_device(D_ID, devices.D_TYPE)\n\n # Make connections\n network.make_connection(SW1_ID, None, D_ID, devices.DATA_ID)\n network.make_connection(CL_ID, None, D_ID, devices.CLK_ID)\n network.make_connection(SW2_ID, None, D_ID, devices.SET_ID)\n network.make_connection(SW3_ID, None, D_ID, devices.CLEAR_ID)\n\n # Get device outputs, the expression is in a string here so that it\n # can be re-evaluated again after executing devices\n sw1_output = \"network.get_output_signal(SW1_ID, None)\"\n sw2_output = \"network.get_output_signal(SW2_ID, None)\"\n sw3_output = \"network.get_output_signal(SW3_ID, None)\"\n clock_output = \"network.get_output_signal(CL_ID, None)\"\n dtype_Q = \"network.get_output_signal(D_ID, devices.Q_ID)\"\n dtype_QBAR = \"network.get_output_signal(D_ID, devices.QBAR_ID)\"\n\n # Execute devices until the clock is LOW at the start of its\n # period\n clock_device = devices.get_device(CL_ID)\n network.execute_network()\n while clock_device.clock_counter != 1 or eval(clock_output) != LOW:\n network.execute_network()\n\n # The clock is not rising yet, Q could be (randomly) HIGH or LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output)] == [HIGH, LOW, LOW, LOW]\n\n assert eval(dtype_Q) in [HIGH, LOW]\n assert eval(dtype_QBAR) == network.invert_signal(eval(dtype_Q))\n\n network.execute_network() # the clock has risen\n # While sw1(DATA) is high, Q has now changed to HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, LOW) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, HIGH) # Sw2 is connected to SET\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is LOW, and the clock is rising,\n # sw2(SET) is HIGH, so Q is HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n LOW, HIGH, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, HIGH) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, LOW) # Sw2 is connected to SET\n devices.set_switch(SW3_ID, HIGH) # Sw3 is connected to CLEAR\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is HIGH, and the clock is rising,\n # sw3(CLEAR) is HIGH, so Q is LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, HIGH, HIGH, LOW, HIGH]", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def eval_genome(genome, config):\n\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n error = 4.0\n for xi, xo in zip(xor_inputs, xor_outputs):\n output = net.activate(xi)\n error -= (output[0] - xo[0]) ** 2\n return error", "def test_cnot():\n\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n X q0\n CNOT q0 q1\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.0, 0.0, 0.0, 1.0]).all()", "def test_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n register q2[2]\n register q3[3]\n X q2\n CNOT q2 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array)\n assert isclose(result, [1.0, 0.0, 1.0, 0.0]).all()", "def test_oscillating_network(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [NOR1, I1] = names.lookup([\"Nor1\", \"I1\"])\n # Make NOR gate\n devices.make_device(NOR1, devices.NOR, 1)\n\n # Connect the NOR gate to itself\n network.make_connection(NOR1, None, NOR1, I1)\n\n assert not network.execute_network()", "def tests_truth():\n circ_m = ccxtest(4)\n print(circ_m)\n circ_m = crootnxtest(4)\n print(circ_m)\n circ_m = oracletest(4)\n print(circ_m)\n circ_m = ccx_otest(4)\n print(circ_m)", "def verify_output(self, output):\n return output == self.output", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def test_network(neural_network, test_data):\n total_trials = 0\n correct_trials = 0\n output_values = [np.argmax(neural_network.calculate_output(vector[1])) for vector in test_data]\n expected_values = list(zip(*test_data))[0]\n for expected, recieved in zip(expected_values,output_values):\n total_trials += 1\n if expected == recieved:\n correct_trials+=1\n return correct_trials/total_trials", "def test_get_connected_output(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n # Inputs are unconnected, get_connected_output should return None\n assert network.get_connected_output(OR1_ID, I1) is None\n assert network.get_connected_output(OR1_ID, I2) is None\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n assert network.get_connected_output(OR1_ID, I1) == (SW1_ID, None)\n assert network.get_connected_output(OR1_ID, I2) == (SW2_ID, None)\n\n # Not a valid port for Sw1, get_connected_output should return None\n assert network.get_connected_output(SW1_ID, I2) is None", "def fit_xor(network,complexity=False):\n samples = 4\n xor_inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]\n xor_outputs = [0, 1, 1, 0]\n\n\n sum_square_error = 0.0\n for inputs, expected in zip(xor_inputs, xor_outputs):\n # Serial activation propagates the inputs through the entire network.\n output = network.activate(inputs)\n sum_square_error += (output[0] - expected) ** 2\n\n # When the output matches expected for all inputs, fitness will reach\n # its maximum value of 1.0.\n fitness = 4 - sum_square_error\n\n if complexity:\n # Add to the fitness if the inputs are connected to the outputs\n G = DiGraph(network.A.T)\n if has_path(G,0,3) and has_path(G,1,3):\n fitness += con_weight\n\n # Add to fitness if there are more than 3 edges\n if (network.A != 0).sum() > 3:\n fitness += .10\n\n return fitness", "def test_norn(task, result):\n if type(result) != str:\n c_print(f\"*** {task.host}: ERROR running Nornir task ***\")", "def evaluate(net, data_loader):\n correct = 0\n total = 0\n net.reset()\n for data in tqdm(data_loader):\n inputs, output = data\n mask, score = gate_activation(net, inputs.view(-1))\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n print()\n print()\n print(\"mask\", mask)\n print(\"score\", score)\n print(\"xo\", xo)\n total += 1\n correct += ((xo > 0.5) == output[0].item())\n\n return float(correct)/total", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def solve(self, network):\n # Convert to a network if it is not.\n if not isinstance(network, NeuralNetwork):\n network = NeuralNetwork(network)\n \n steps, _, _ = self._loop(network, max_steps=100000)\n if steps < 100000:\n print((\"Failed 100k test with %d\" % steps))\n return 0\n successes = 0\n points = np.array([-0.9, -0.5, 0.0, 0.5, 0.9])\n # The 1.35 and 0.15 were taken from the neat-python implementation.\n for x in points * self.h:\n for theta in points * self.r:\n for dx in points * 1.35:\n for dtheta0 in points * 0.15:\n state = (x, dx, np.array([theta, 0.0]), np.array([dtheta0, 0.0]))\n steps, states, _ = self._loop(network, initial=state, max_steps=1000)\n if steps >= 1000:\n successes += 1\n # return random.random() < 0.5\n return int(successes > 100)", "def test_evaluate_ne_expression(self):\n value = self.evaluate_common(\"2M ne 3M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"2D ne 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"2F ne 2D\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"2 ne 2L\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"2 ne '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"'2' ne '2'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"datetime'2013-08-30T18:49' ne datetime'2013-08-30T18:49'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"datetime'2013-08-30T18:49' ne datetime'2013-08-30T18:49:01'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\n \"datetimeoffset'2013-08-30T18:49:00Z' ne \"\n \"datetimeoffset'2013-08-30T19:49:00+01:00'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"datetimeoffset'2013-08-30T18:49:00Z' ne \"\n \"datetimeoffset'2013-08-30T18:49:00+01:00'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\n \"guid'b3afeebc-9658-4699-9d9c-1df551fd6814' ne \"\n \"guid'b3afeebc-9658-4699-9d9c-1df551fd6814'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"guid'b3afeebc-9658-4699-9d9c-1df551fd6814' ne \"\n \"guid'3fa6109e-f09c-4c5e-a5f3-6cf38d35c9b5'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"X'DEADBEEF' ne binary'deadbeef'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"X'DEAD' ne binary'BEEF'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"2 ne null\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"null ne null\")\n self.assertTrue(value.value is False, \"Expected False\")", "def test_check_network(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n # Inputs are unconnected, check_network() should return False\n assert not network.check_network()\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Inputs are now connected, check_network() should return True\n assert network.check_network()", "def test_vncdr(backend, nqubits, noise, full_output, insertion_gate, readout):\n if backend.name == \"tensorflow\":\n import tensorflow as tf\n\n tf.config.threading.set_inter_op_parallelism_threads = 1\n tf.config.threading.set_intra_op_parallelism_threads = 1\n else:\n backend.set_threads(1)\n # Define the circuit\n c = get_circuit(nqubits)\n # Define the observable\n obs = np.prod([Z(i) for i in range(nqubits)])\n obs = SymbolicHamiltonian(obs, backend=backend)\n # Noise-free expected value\n exact = obs.expectation(backend.execute_circuit(c).state())\n # Noisy expected value without mitigation\n if \"calibration_matrix\" in readout.keys() or \"ncircuits\" in readout.keys():\n if nqubits == 1:\n p = cal_matrix_1q\n elif nqubits == 3:\n p = cal_matrix_3q\n # noise.add(ReadoutError(probabilities=p),gate=gates.M)\n state = backend.execute_circuit(noise.apply(c), nshots=10000)\n noisy = state.expectation_from_samples(obs)\n # Mitigated expected value\n estimate = vnCDR(\n circuit=c,\n observable=obs,\n backend=backend,\n noise_levels=range(3),\n noise_model=noise,\n nshots=10000,\n n_training_samples=20,\n insertion_gate=insertion_gate,\n full_output=full_output,\n readout=readout,\n )\n if full_output:\n estimate = estimate[0]\n assert np.abs(exact - estimate) <= np.abs(exact - noisy)", "def convert_logical_not(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0)\n g.add_node(op.output(\"Out\")[0], out)", "def graph_helper(device, output,input,target):\n output = output.clone().squeeze()\n corrects = torch.zeros(output.shape[0])\n for i in range(output.shape[0]): # goes through each iteration\n outputi = output[i]\n golden_label = convert_to_bits(device, outputi, input)\n target = target.view(target.size(0), -1)\n corrects[i] += torch.amin(golden_label == target, dim=[0]).sum().item() # counts the number that are the same i.e. correct predictions\n correct = corrects.cpu().detach().numpy()\n return correct", "def test_not_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def test_others(self):\n outputAssert = self.buildingTests([\"Hola me gust@ programar en ICC 1.03\"])\n self.assertTrue((outputAssert[0][4] == outputAssert[1][4] and outputAssert[0][5] == outputAssert[1][5]) ^ (outputAssert[0][4] == outputAssert[1][5]) , f\"El resultado debería ser: \\\"{outputAssert[1][5]}\\\"\")", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def test_XOR():\n\tk, outputs = 2, [0,1,1,0]\n\n\ttrue_pi0s = set(['00','11'])\n\ttrue_pi1s = set(['01','10'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('11',[],[[0,1]]),('00',[],[[0,1]])]\n\ttrue_ts1s = [('10',[[0,1]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def neural_result(self, input):\n n_output = self.network.activate(input)\n if n_output >= 0.5:\n return 2\n else:\n return 1", "def cmd_net_contest():\n\n print(\"DNS: %s\" % contest.check_dns())\n print(\"FTP: %s\" % contest.check_ftp())\n print(\"SSH: %s\" % contest.check_ssh())\n print(\"HTTP: %s\" % contest.check_http())\n print(\"HTTPS: %s\" % contest.check_https())", "def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult", "def xor_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 0],\n [1, 0, 1],\n [0, 1, 1],\n [0, 0, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])" ]
[ "0.7244407", "0.6555086", "0.60672414", "0.5958014", "0.5866374", "0.58132195", "0.57049644", "0.5648179", "0.5560035", "0.55518097", "0.55496335", "0.5549202", "0.5501493", "0.55004686", "0.5457115", "0.5452267", "0.5450071", "0.5425552", "0.5411698", "0.5404994", "0.54039335", "0.53640485", "0.53387254", "0.53343415", "0.5323857", "0.53171325", "0.53095984", "0.5304395", "0.53014135", "0.5277694" ]
0.723775
1
Test if execute_network returns the correct output for nongate devices. Tests switches, Dtypes and clocks.
def test_execute_non_gates(new_network): network = new_network devices = network.devices names = devices.names LOW = devices.LOW HIGH = devices.HIGH # Make different devices [SW1_ID, SW2_ID, SW3_ID, CL_ID, D_ID] = names.lookup(["Sw1", "Sw2", "Sw3", "Clock1", "D1"]) devices.make_device(SW1_ID, devices.SWITCH, 1) devices.make_device(SW2_ID, devices.SWITCH, 0) devices.make_device(SW3_ID, devices.SWITCH, 0) devices.make_device(CL_ID, devices.CLOCK, 1) devices.make_device(D_ID, devices.D_TYPE) # Make connections network.make_connection(SW1_ID, None, D_ID, devices.DATA_ID) network.make_connection(CL_ID, None, D_ID, devices.CLK_ID) network.make_connection(SW2_ID, None, D_ID, devices.SET_ID) network.make_connection(SW3_ID, None, D_ID, devices.CLEAR_ID) # Get device outputs, the expression is in a string here so that it # can be re-evaluated again after executing devices sw1_output = "network.get_output_signal(SW1_ID, None)" sw2_output = "network.get_output_signal(SW2_ID, None)" sw3_output = "network.get_output_signal(SW3_ID, None)" clock_output = "network.get_output_signal(CL_ID, None)" dtype_Q = "network.get_output_signal(D_ID, devices.Q_ID)" dtype_QBAR = "network.get_output_signal(D_ID, devices.QBAR_ID)" # Execute devices until the clock is LOW at the start of its # period clock_device = devices.get_device(CL_ID) network.execute_network() while clock_device.clock_counter != 1 or eval(clock_output) != LOW: network.execute_network() # The clock is not rising yet, Q could be (randomly) HIGH or LOW assert [eval(sw1_output), eval(sw2_output), eval(sw3_output), eval(clock_output)] == [HIGH, LOW, LOW, LOW] assert eval(dtype_Q) in [HIGH, LOW] assert eval(dtype_QBAR) == network.invert_signal(eval(dtype_Q)) network.execute_network() # the clock has risen # While sw1(DATA) is high, Q has now changed to HIGH assert [eval(sw1_output), eval(sw2_output), eval(sw3_output), eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [ HIGH, LOW, LOW, HIGH, HIGH, LOW] devices.set_switch(SW1_ID, LOW) # Sw1 is connected to DATA devices.set_switch(SW2_ID, HIGH) # Sw2 is connected to SET network.execute_network() # the clock is not rising yet network.execute_network() # the clock has risen # Even if sw1(DATA) is LOW, and the clock is rising, # sw2(SET) is HIGH, so Q is HIGH assert [eval(sw1_output), eval(sw2_output), eval(sw3_output), eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [ LOW, HIGH, LOW, HIGH, HIGH, LOW] devices.set_switch(SW1_ID, HIGH) # Sw1 is connected to DATA devices.set_switch(SW2_ID, LOW) # Sw2 is connected to SET devices.set_switch(SW3_ID, HIGH) # Sw3 is connected to CLEAR network.execute_network() # the clock is not rising yet network.execute_network() # the clock has risen # Even if sw1(DATA) is HIGH, and the clock is rising, # sw3(CLEAR) is HIGH, so Q is LOW assert [eval(sw1_output), eval(sw2_output), eval(sw3_output), eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [ HIGH, LOW, HIGH, HIGH, LOW, HIGH]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_execute_non_xor_gates(new_network, gate_id, switch_outputs,\n gate_output, gate_kind):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [AND1_ID, OR1_ID, NAND1_ID, NOR1_ID, SW1_ID, SW2_ID, SW3_ID, I1, I2,\n I3] = names.lookup([\"And1\", \"Or1\", \"Nand1\", \"Nor1\", \"Sw1\", \"Sw2\", \"Sw3\",\n \"I1\", \"I2\", \"I3\"])\n\n LOW = devices.LOW\n HIGH = devices.HIGH\n\n # Make devices\n gate_id = eval(gate_id)\n gate_kind = eval(gate_kind)\n devices.make_device(gate_id, gate_kind, 3)\n devices.make_device(SW1_ID, devices.SWITCH, 0)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n devices.make_device(SW3_ID, devices.SWITCH, 0)\n\n # Make connections\n network.make_connection(SW1_ID, None, gate_id, I1)\n network.make_connection(SW2_ID, None, gate_id, I2)\n network.make_connection(SW3_ID, None, gate_id, I3)\n\n # Set switches\n switches = [SW1_ID, SW2_ID, SW3_ID]\n for i, switch_output in enumerate(switch_outputs):\n devices.set_switch(switches[i], eval(switch_output))\n\n network.execute_network()\n assert network.get_output_signal(gate_id, None) == eval(gate_output)", "def test_get_network(self):\n pass", "def cmd_net_contest():\n\n print(\"DNS: %s\" % contest.check_dns())\n print(\"FTP: %s\" % contest.check_ftp())\n print(\"SSH: %s\" % contest.check_ssh())\n print(\"HTTP: %s\" % contest.check_http())\n print(\"HTTPS: %s\" % contest.check_https())", "def test_get_connected_output(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n # Inputs are unconnected, get_connected_output should return None\n assert network.get_connected_output(OR1_ID, I1) is None\n assert network.get_connected_output(OR1_ID, I2) is None\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n assert network.get_connected_output(OR1_ID, I1) == (SW1_ID, None)\n assert network.get_connected_output(OR1_ID, I2) == (SW2_ID, None)\n\n # Not a valid port for Sw1, get_connected_output should return None\n assert network.get_connected_output(SW1_ID, I2) is None", "def test_check_network(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n # Inputs are unconnected, check_network() should return False\n assert not network.check_network()\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Inputs are now connected, check_network() should return True\n assert network.check_network()", "def test_verify_list_of_devices_in_my_network():", "def test_execute_xor(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, XOR1_ID, I1, I2] = names.lookup(\n [\"Sw1\", \"Sw2\", \"Xor1\", \"I1\", \"I2\"])\n\n # Make devices\n devices.make_device(XOR1_ID, devices.XOR)\n devices.make_device(SW1_ID, devices.SWITCH, 0)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n\n # Make connections\n network.make_connection(SW1_ID, None, XOR1_ID, I1)\n network.make_connection(SW2_ID, None, XOR1_ID, I2)\n\n network.execute_network()\n assert new_network.get_output_signal(XOR1_ID, None) == devices.LOW\n\n # Set Sw1 to HIGH\n devices.set_switch(SW1_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.HIGH\n\n # Set Sw2 to HIGH\n devices.set_switch(SW2_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.LOW", "def test_will_get_nwis_return_response():\n\n expected = 200\n response = hf.get_nwis('01585200', 'dv', '2001-01-01', '2001-01-02')\n actual = response.status_code\n assert expected == actual\n print('NWIS is up and running!')", "def test_get_networks(self):\n pass", "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "def test_udp_alt_rectype_and_iteration():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"8.8.8.8\",\n \"google.com\",\n \"-t\",\n \"soa\",\n \"-i\",\n \"2\",\n ]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0", "def test_norn(task, result):\n if type(result) != str:\n c_print(f\"*** {task.host}: ERROR running Nornir task ***\")", "def test_verify_connection_to_a_device():", "def check_network(config_name, urls = ''):\n\n logging.info(\"calling obsolete network diagnotic. Use '-interactive' instead\")\n\n config = config_namespace.ConfigNameSpace({})\n config.ExecFile(config_name)\n # get relevant parameters from config file:\n dns_servers = string.split(config.namespace['BOT_DNS_SERVERS'], ',')\n\n if Check_Gateway(config.namespace['EXTERNAL_DEFAULT_ROUTE']) != 0:\n return 1\n\n good_dns_servers = 0\n for s in dns_servers:\n if Check_DNS(s) != 4: # all other errors are non-fatal\n good_dns_servers = good_dns_servers + 1\n # if no DNS servers are up, we give up:\n if good_dns_servers == 0:\n return 1\n\n # First check the SMTP server\n logging.info(\"testing SMTP server %s\" % config.namespace['SMTP_SERVER'] )\n Check_SMTP(config.namespace['SMTP_SERVER'],\n config.namespace['EXTERNAL_CRAWL_IP'])\n\n # what about NTP:\n logging.info(\"testing NTP server %s\" % config.namespace['NTP_SERVERS'])\n for s in config.namespace['NTP_SERVERS']:\n Check_NTP(s)\n\n # SYSLOG server:\n logging.info(\"testing SYSLOG server %s\" % config.namespace['SYSLOG_SERVER'] )\n Check_SYSLOG(config.namespace['SYSLOG_SERVER'])\n\n # OK, now walk over all collections and try to get starturls\n for u in urls:\n check_url(u, dns_servers)\n\n return 0", "def test_verify_state_of_a_device():", "def test_single_scan_while_pno(self):\n self.log.info(\"Check connection through PNO for reference network\")\n current_network = self.dut.droid.wifiGetConnectionInfo()\n self.log.info(\"Current network: {}\".format(current_network))\n asserts.assert_true('network_id' in current_network, NETWORK_ID_ERROR)\n asserts.assert_true(current_network['network_id'] >= 0, NETWORK_ERROR)\n self.log.info(\"Kicking PNO for reference network\")\n self.attenuators[ATTENUATOR].set_atten(90)\n time.sleep(10) #wait for PNO to be kicked\n self.log.info(\"Starting single scan while PNO\")\n self.wifi_scanner_single_scan(self.default_scan_setting)\n self.attenuators[ATTENUATOR].set_atten(0)\n self.log.info(\"Check connection through PNO for reference network\")\n time.sleep(30) #wait for connection through PNO\n current_network = self.dut.droid.wifiGetConnectionInfo()\n self.log.info(\"Current network: {}\".format(current_network))\n asserts.assert_true('network_id' in current_network, NETWORK_ID_ERROR)\n asserts.assert_true(current_network['network_id'] >= 0, NETWORK_ERROR)\n time.sleep(10) #wait for IP to be assigned\n asserts.assert_true(\n wutils.validate_connection(self.dut, self.ping_addr),\n \"Error, No internet connection for current network\")\n wutils.wifi_forget_network(self.dut,\n self.reference_networks[0][\"2g\"][\"SSID\"])", "def test_using_mirror_output_type():\n\n def check_correct_type(index):\n # Force a race condition\n if index == 0:\n sleep(0.1)\n if index % 2 == 0:\n with _using_mirror_output_type():\n sleep(0.5)\n return cuml.global_settings.output_type == \"mirror\"\n else:\n output_type = test_output_types_str[index]\n with using_output_type(output_type):\n sleep(0.5)\n return cuml.global_settings.output_type == output_type\n\n results = [\n delayed(check_correct_type)(index)\n for index in range(len(test_output_types_str))\n ]\n\n assert (delayed(all)(results)).compute()", "def test_network(bpn, test_data):\n DisplayNetwork.display_green(\"[INFO] Started to test the network\")\n output = bpn.Run(np.array(test_data))\n return output", "def test_run_nicv(self):\n\n traces, keys, plain = FileLoader.main(CONST_DEFAULT_TRACES_FILE,\n CONST_DEFAULT_KEYS_FILE,\n CONST_DEFAULT_PLAIN_FILE)\n\n result = NICV.run(traces, keys, plain)\n\n self.assertTrue(1398 in result)", "def display_nornir_results(nr_result):\n print(menu('Interface Update'))\n for host in nr_result:\n if not nr_result[host].failed:\n if nr_result[host].changed:\n print(Fore.YELLOW + f'{host}: True')\n else:\n print(Fore.GREEN + f'{host}: False')\n else:\n print(Fore.RED + f'{host}: FAILED')", "def test_net_ping(self):\n\n self.assertEquals(self.scanner.ping(type='net'), True)\n\n # Test timeout\n self.assertRaises(\n ScanError,\n self.scanner.ping,\n {'type': 'net', 'timeout': 1.0e-16})", "def test_udp_alt_rectype():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"8.8.8.8\",\n \"google.com\",\n \"-t\",\n \"txt\",\n \"-i\",\n \"1\",\n ]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0", "def validate_network(options):\n\n # Start marker for time measure\n start = time.time()\n\n #------------#\n # INPUTS #\n #------------#\n\n # Our input network 1\n input_network_file = options.network_file\n type_id = options.type_id\n network_format = options.network_format\n name_input_network = 'INPUT'\n\n # HIPPIE network\n hippie_file = '/home/quim/Databases/hippie/HIPPIE-current.mitab.txt'\n hippie_type_id = 'geneID' # It can be geneID or UniprotEntry\n output_hippie_file = '/home/quim/data/networks/HIPPIE/HIPPIE.{}.multifields'.format(hippie_type_id)\n output_hippie_newID_file = '/home/quim/data/networks/HIPPIE/HIPPIE.{}.multifields'.format(type_id)\n hippie_network_format = 'multi-fields'\n\n # ConsensusPathDB network\n ConsensusPathDB_file = '/home/quim/data/networks/ConsensusPathDB/ConsensusPathDB_human_PPI'\n output_ConsensusPath_file = '/home/quim/data/networks/ConsensusPathDB/ConsensusPathDB_human_PPI.multifields'\n output_ConsensusPath_newID_file = '/home/quim/data/networks/ConsensusPathDB/ConsensusPathDB_human_PPI.{}.multifields'.format(type_id)\n consensus_network_format = 'multi-fields'\n\n # I2D network\n I2D_file = '/home/quim/data/networks/I2D/i2d.2_9.Public.HUMAN.tab'\n output_I2D_file = '/home/quim/data/networks/I2D/i2d.2_9.Public.HUMAN.multifields'\n output_I2D_newID_file = '/home/quim/data/networks/I2D/i2d.2_9.Public.HUMAN.{}.multifields'.format(type_id)\n I2D_network_format = 'multi-fields'\n\n\n #------------------------#\n # DEFINE OUR NETWORK #\n #------------------------#\n\n # Define the input network\n network = NA.Network(input_network_file, type_id, network_format)\n\n print('{} network'.format(name_input_network))\n print('Number of edges: {}'.format(len(network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(network.get_nodes())))\n\n\n #---------------------------#\n # DEFINE HIPPIE NETWORK #\n #---------------------------#\n\n if not fileExist(output_hippie_file):\n hippie_instance = VA.HippieParser(hippie_file)\n hippie_instance.parse()\n hippie_network = hippie_instance.write_network_file(output_hippie_file, hippie_network_format, hippie_type_id)\n else:\n hippie_network = NA.Network(output_hippie_file, hippie_type_id, hippie_network_format)\n\n # Translate HIPPIE to 'type_id'\n if type_id.lower() != hippie_type_id.lower():\n if not fileExist(output_hippie_newID_file):\n hippie_network = VA.translate_network_from_BIANA(hippie_network, hippie_type_id, type_id, output_hippie_newID_file)\n else:\n hippie_network = NA.Network(output_hippie_newID_file, type_id, hippie_network_format)\n\n print('HIPPIE network')\n print('Number of edges: {}'.format(len(hippie_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(hippie_network.get_nodes())))\n\n\n #------------------------------------#\n # DEFINE CONSENSUSPATHDB NETWORK #\n #------------------------------------#\n\n if not fileExist(output_ConsensusPath_file):\n consensus_instance = VA.ConsensusPathDBParser(ConsensusPathDB_file)\n consensus_network_uniprot = consensus_instance.parse(output_ConsensusPath_file, consensus_network_format)\n else:\n consensus_network_uniprot = NA.Network(output_ConsensusPath_file, 'uniprotentry', consensus_network_format)\n\n # Translate ConsensusPathDB to 'type_id'\n if type_id.lower() != 'uniprotentry':\n if not fileExist(output_ConsensusPath_newID_file):\n consensus_network = VA.translate_network_from_BIANA(consensus_network_uniprot, 'uniprotentry', type_id, output_ConsensusPath_newID_file)\n else:\n consensus_network = NA.Network(output_ConsensusPath_newID_file, type_id, consensus_network_format)\n else:\n consensus_network = consensus_network_uniprot\n\n print('ConsensusPath (uniprotentry) network')\n print('Number of edges: {}'.format(len(consensus_network_uniprot.get_edges())))\n print('Number of nodes: {}\\n'.format(len(consensus_network_uniprot.get_nodes())))\n\n print('ConsensusPath network')\n print('Number of edges: {}'.format(len(consensus_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(consensus_network.get_nodes())))\n\n\n #------------------------#\n # DEFINE I2D NETWORK #\n #------------------------#\n\n if not fileExist(output_I2D_file):\n I2D_instance = VA.I2DParser(I2D_file)\n I2D_network_uniprot = I2D_instance.parse(output_I2D_file, I2D_network_format)\n else:\n I2D_network_uniprot = NA.Network(output_I2D_file, 'uniprotaccession', I2D_network_format)\n\n # Translate I2D to 'type_id'\n if type_id.lower() != 'uniprotaccession':\n if not fileExist(output_I2D_newID_file):\n I2D_network = VA.translate_network_from_BIANA(I2D_network_uniprot, 'uniprotaccession', type_id, output_I2D_newID_file)\n else:\n I2D_network = NA.Network(output_I2D_newID_file, type_id, I2D_network_format)\n else:\n I2D_network = I2D_network_uniprot\n\n print('I2D (uniprotaccession) network')\n print('Number of edges: {}'.format(len(I2D_network_uniprot.get_edges())))\n print('Number of nodes: {}\\n'.format(len(I2D_network_uniprot.get_nodes())))\n\n print('I2D network')\n print('Number of edges: {}'.format(len(I2D_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(I2D_network.get_nodes())))\n\n\n #----------------------------------#\n # CHECK OVERLAP OF NODES/EDGES #\n #----------------------------------#\n\n print_summary_overlap(network, hippie_network, name_input_network, 'HIPPIE')\n print_summary_overlap(network, consensus_network, name_input_network, 'CONSENSUSPATHDB')\n print_summary_overlap(network, I2D_network, name_input_network, 'I2D')\n\n # print_summary_overlap(hippie_network, consensus_network, 'HIPPIE', 'CONSENSUSPATHDB')\n # print_summary_overlap(hippie_network, I2D_network, 'HIPPIE', 'I2D')\n # print_summary_overlap(consensus_network, I2D_network, 'CONSENSUSPATHDB', 'I2D')\n\n\n # End marker for time\n end = time.time()\n print('\\nTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\\n'.format(end - start, (end - start) / 60))", "def main():\n return run_network_interface_check()", "def _checknet():\n exit_code = os.system('ping -c 1 www.baidu.com 1>/dev/null 2>&1')\n return exit_code", "def test_netstat():\n ret = (\n \" Proto Local Address Foreign Address State PID\\n\"\n \" TCP 127.0.0.1:1434 0.0.0.0:0 LISTENING 1728\\n\"\n \" UDP 127.0.0.1:1900 *:* 4240\"\n )\n mock = MagicMock(return_value=ret)\n with patch.dict(win_network.__salt__, {\"cmd.run\": mock}):\n assert win_network.netstat() == [\n {\n \"local-address\": \"127.0.0.1:1434\",\n \"program\": \"1728\",\n \"proto\": \"TCP\",\n \"remote-address\": \"0.0.0.0:0\",\n \"state\": \"LISTENING\",\n },\n {\n \"local-address\": \"127.0.0.1:1900\",\n \"program\": \"4240\",\n \"proto\": \"UDP\",\n \"remote-address\": \"*:*\",\n \"state\": None,\n },\n ]", "def test_verify_state_of_a_device_when_disconnected_from_the_device():", "async def network_select():\n msg = \"\"\"\\\nWelcome to Proof Wallet,\nthe dedicated PSBT multisig UI for Bitcoin Core.\n\nChoose a network:\n(1) Mainnet\n(2) Testnet\n(3) Regtest\n\"\"\"\n return await ux_show_story(msg, ['1','2','3','q'])", "def connect_to_wifi_network(SSID,Passphrase,security_mode):\r\n read_outputDict={}\r\n read_outputDict[\"status\"]=''\r\n #default path to connect wifi\r\n script_path=\"/usr/local/autotest/cros/scripts\"\r\n # os.chdir() is used to change dir to wifi script path\r\n change_dir = os.chdir(script_path)\r\n #cmd is used to connect to SSID with/without passphrase\r\n connect_cmd=\"./wifi connect \"+ SSID +\" \"+Passphrase +\" \"+ security_mode +\" >\" + \"status.txt\"\r\n #Popen then cmd and get th output to validate whether is connected or not\r\n get_output=subprocess.Popen(connect_cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True) \r\n \"\"\"if get_output.stderr:\r\n raise error.TestFail(\"Failed to connect to network\",SSID)\r\n else:\r\n print(\"Error \",get_output.stderr.readlines()) \"\"\" \r\n time.sleep(Delay_time)", "def test_udp_alt_iteration():\n cmd = [\"python\", \"dnsck/dnsck.py\", \"-s\", \"8.8.8.8\", \"google.com\", \"-i\", \"1\"]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0" ]
[ "0.64358634", "0.6286758", "0.6224356", "0.6207054", "0.6135582", "0.6115254", "0.6070223", "0.6026147", "0.6020904", "0.591166", "0.58820087", "0.58704305", "0.58060014", "0.57573897", "0.57547915", "0.5727722", "0.56890357", "0.56707734", "0.56616163", "0.561187", "0.56103134", "0.55883694", "0.5573987", "0.5570962", "0.5569847", "0.55654174", "0.55628824", "0.555556", "0.5545129", "0.5541755" ]
0.7138289
0
Renders a Markdownformatted string as HTML.
def markdown_to_html(s): return markdown(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_markdown(text):\n return clean_markdown_html(markdown(force_str(text), **MARKDOWN_KWARGS))", "def render_markdown(text):\n return markdown(text, **MARKDOWN_KWARGS)", "def mdhtml_to_html(data_str):\n mdrenderer = mistune.Renderer()\n markdown = mistune.Markdown(renderer=mdrenderer)\n return markdown(data_str)", "def render_content(self):\n return mark_safe(markdown(self.content))", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def htmlForMarkdown(md):\n return mdProcessor.convert(md)", "def markdown(value):\n return Markup(md(value))", "def to_markdown(html_string, safe_tags=None, safe_attrs=None):\n # out = StringIO()\n # for f in parse_fragments(html_string, safe_tags=None, safe_attrs=None):\n # handlers.process_tag_events(f, out)\n # return normalize(out.getvalue())\n return handlers.render(*parse_fragments(html_string, safe_tags))", "def markdown_text(context, text, autoescape=True):\n includes = ''\n if 'render_markdown_src' not in context:\n # The first markdown in the template - include JS.\n context['render_markdown_src'] = True\n # Ask Webpack for the location of the asset.\n includes = '\\n'.join(webpack_utils.get_as_tags('common-render-markdown'))\n result_html = html.format_html(\n '<span class=\"markdown\">{}</span>{}',\n text,\n safestring.mark_safe(includes),\n )\n return result_html", "def markdown(s):\n md = markdown_module.Markdown(MARKDOWN_EXTENSIONS, safe_mode='remove')\n return mark_safe(md.convert(s))", "def markdown(text, *args, **kwargs):\n md = StMarkdown(*args, **kwargs)\n return md.convert(text)", "def markdown(self, text):\n\n # Remove rel attributes as they are not supported by html2markdown\n text = re.sub(r' rel=\".+?\">', \">\", text)\n\n # Convert html to markdown\n text = html2markdown.convert(text)\n\n # Decode [<>&] characters\n text = text.replace(\"&lt;\", \"<\").replace(\"&gt;\", \">\").replace(\"&amp;\", \"&\")\n\n # Wrap as Rich Markdown\n return Markdown(text)", "def html(self):\n html = markdown.markdown('\\n'.join(self.body))\n if self.style:\n return premailer.transform('<style>\\n' + self.style +\n '\\n</style>\\n' + html)\n return html", "def render(self):\n self._render_text = self.content.replace('\\n', '<br>') # deal with new line\n return render_str(\"post.html\", p = self)", "def render(text):\n if text is None:\n return None\n\n # We use a non-standard math extension to Markdown which is delimited\n # by either `$$` or `\\( some maths \\)`. The escaped brackets are\n # naturally converted into literal brackets in Markdown, so to preserve\n # them we'll double escape them.\n text = text.replace(\"\\\\(\", \"\\\\\\\\(\").replace(\"\\\\)\", \"\\\\\\\\)\")\n\n return _get_cleaner().clean((RENDER_MARKDOWN(text)))", "def md2html():\n # Get the markdown text from the request\n data = json.loads(request.data)\n md_text = data['md_text']\n\n # Convert the markdown text to html format\n html_text = convert(md_text)\n\n # Generate response and send it to the front end\n response_dict = {\"html_text\": html_text}\n response = jsonify(response_dict)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response", "def render_markdown_raw(self, text: uplink.Body):\n pass", "def html_manual_format(string):\n return html_div(string, \"manualfmt\")", "def __html__(self, file_path:str) -> str:\n with open(f\"{file_path}\", \"r\") as mdfile: # Parse markdown file\n text = mdfile.read()\n html = self.md.convert(text) # Convert the markdown content text to hmtl\n return html", "def markdown_render_conditional(text, rich_text):\n if rich_text:\n return render_markdown(text)\n else:\n return escape(text)", "def markdown(text):\n text = gfm(text)\n text = markdown_lib.markdown(text)\n return text", "def output_to_html(string_data):\n raise NotImplementedError(\"This function is not yet Implemented!\")", "def render_note(note: str) -> str:\n note = emojize(note)\n note = markdown(note, extensions=['nl2br'])\n return note", "def markdown(text, **kwargs):\n import markdown\n return markdown.markdown(text, **kwargs)", "def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands", "def render(self):\n\n self._render_text = self.content.replace('\\n', '<br>')\n return render_str('post_template.html', post=self, user=self.user)", "def htmlise(s):\n return '<div><pre class=\"tablecell\">' + html.escape(s) + '</pre></div>'", "def render(self, value, context=None):\n if self.raw_html is not None:\n return format_html(self.raw_html)\n else:\n return ''", "def render_md(filepath, file):\n try:\n with open(path.join(str(filepath[0]), str(file))) as fd:\n mkdwn = fd.read()\n html = markdown(mkdwn)\n except Exception:\n html = None\n\n return html", "def render_html(self):\n return self.template.render(content=self.content, **self.styles)" ]
[ "0.7279775", "0.7157913", "0.69744706", "0.6868016", "0.6853341", "0.6828946", "0.67198783", "0.6678728", "0.65148413", "0.6497659", "0.64447343", "0.6409846", "0.63674206", "0.62986237", "0.6293463", "0.6277161", "0.6252812", "0.62270266", "0.62042814", "0.61876", "0.6187383", "0.61362565", "0.61228406", "0.61113364", "0.61065775", "0.6066084", "0.6059458", "0.6041919", "0.60334504", "0.59909874" ]
0.7307166
0
Splits a commadelimited string.
def split_by_comma(s): return s.strip().split(",")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def separate_comma(s):\n return s.split(',')", "def split_by_comma_and_whitespace(s):\r\n return re.split(r'[\\s,]+', s)", "def split_by_comma_and_whitespace(a_str):\r\n return re.split(r'[\\s,]', a_str)", "def split_value(string):\n split = string.split(',')\n result = []\n\n level = 0\n buf = []\n for entry in split:\n level += entry.count('(')\n level -= entry.count(')')\n\n buf.append(entry)\n if level == 0:\n result.append(','.join(buf))\n buf = []\n return result", "def split_line(line: str) -> [str]:\n return line.strip().split(',')", "def split_into_columns(s):\n\ts = re.sub(',,,', ',0,0,', s)\n\ts = re.sub(',,', ',0,', s)\n\treturn s.split(',')", "def split(a):\r\n compos = [-1] # compos stores the positions of the relevant commas in the argument string\r\n compos.extend(t[2][1] for t in generate_tokens(StringIO(a).readline) if t[1] == ',')\r\n compos.append(len(a))\r\n return [ a[compos[i]+1:compos[i+1]] for i in xrange(len(compos)-1)]", "def commaSplitter(str):\n # leave this here as a reminder of what I should do to make the argument parsing more robust\n\n # if sqrt != int(sqrt):\n # msg = \"%r is not a perfect square\" % string\n # raise argparse.ArgumentTypeError(msg)\n # return value\n return str.split(',')", "def split(string):\n names = []\n index = 0\n last = 0\n for letter in string:\n if ((letter == ',') or (index == (len(string) - 1))):\n if (index == (len(string) - 1)):\n names.append(string[last:(index+1)])\n else:\n names.append(string[last:index])\n last = index+2\n index += 1\n return names", "def safe_split(string, sep=','):\n regex = re.escape(sep) + r'\\s*(?![^\\[\\]]*\\])(?![^()]*\\))'\n return re.split(regex, string)", "def _split_makeotf_options(comma_str):\n if not comma_str.startswith('-'):\n comma_str = '-' + comma_str\n return comma_str.split(',')", "def split_choices(choices_string):\n return [x.strip() for x in choices_string.split(\",\") if x.strip()]", "def split(self, text):\n\n return [x.strip() for x in text.split(\",\")]", "def split_values(self, value):\n if value:\n return [s.strip() for s in value.split(',')]\n else:\n return []", "def parse(text):\n # Make sure that there's text to be split\n if text == None:\n return text\n return text.split(',')", "def split_line(line):\n if ',' in line:\n return [a.strip() for a in line.split(',')]\n return line.split()", "def comma_list(s):\n\n return tuple(int(v) for v in s.split(\",\"))", "def split_conf_str(string):\n return list(filter(None, string.split(\",\")))", "def split_by_commas(maybe_s: str) -> tuple[str, ...]:\n if not maybe_s:\n return ()\n parts: list[str] = []\n split_by_backslash = maybe_s.split(r'\\,')\n for split_by_backslash_part in split_by_backslash:\n splitby_comma = split_by_backslash_part.split(',')\n if parts:\n parts[-1] += ',' + splitby_comma[0]\n else:\n parts.append(splitby_comma[0])\n parts.extend(splitby_comma[1:])\n return tuple(parts)", "def split(self, string):\n return (re.split('; |, |: |\"(\"|\"(\"|;|,|:| |', string))", "def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()", "def split_on_commas(string):\n items = []\n char_buffer = []\n openings = []\n for i, char in enumerate(string):\n if char == ',' and len(openings) == 0:\n items.append(\"\".join(char_buffer))\n char_buffer = []\n continue\n elif char == ' ' and len(char_buffer) == 0:\n continue\n elif char == '(' or char == '[':\n openings.append(char)\n elif char == ')':\n if openings.pop() != '(':\n raise Exception('Invalid bracket end \")\", col {}.'.format(i))\n elif char == ']':\n if openings.pop() != '[':\n raise Exception('Invalid bracket end \"]\", col {}.'.format(i))\n char_buffer.append(char)\n items.append(\"\".join(char_buffer))\n return items", "def _split_parameters(self, parameters):\n if not parameters:\n return []\n return [parameter.strip() for parameter in parameters.split(', ')]", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def commaStringParse(string):\n dels = []\n cur = \"\"\n length = len(string)\n for c in string:\n # skip spaces outside words\n if c == \" \" and cur == \"\":\n continue\n # new delegation found\n elif c == \",\":\n dels.append(cur)\n cur = \"\"\n # last name in list\n elif string.index(c) == length - 1:\n cur += c\n dels.append(cur)\n else:\n cur += c\n return dels", "def from_csv_line(line):\r\n return line.strip().split(',')", "def split(text, delim=','):\n return [x.strip() for x in text.split(delim)]", "def split_cmdline_filter_items(string):\n filter_items = string.split(',')\n return filter_items", "def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []", "def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []" ]
[ "0.8214036", "0.7340147", "0.7091655", "0.7032393", "0.70248574", "0.69270283", "0.6926893", "0.6860048", "0.6846079", "0.6821014", "0.6803655", "0.67259437", "0.66994894", "0.66705036", "0.6661326", "0.66231436", "0.66043615", "0.65586185", "0.6540317", "0.65376675", "0.6520199", "0.6479943", "0.64524126", "0.64165837", "0.6334932", "0.6310105", "0.63014525", "0.63003063", "0.629613", "0.629613" ]
0.84413457
0
Handles Clair's `fixed_in_version`, which _may_ be URLencoded. The API guarantee is only that the field is a string, so encoding it's slightly weaselly, but only slightly.
def maybe_urlencoded(fixed_in: str) -> str: try: d = urllib.parse.parse_qs(fixed_in) # There may be additional known-good keys in the future. return d["fixed"][0] except (ValueError, KeyError): return fixed_in
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_new_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('1202.1234v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v12'))", "def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version", "def version_in(self, version_in):\n\n self._version_in = version_in", "def test_old_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('gr-qc/9901123v3'))", "def dump_version(input_bytes):\n return dump_from_release(input_bytes, \"version\")", "def test_get_short_version(self):\n pass", "def fixed_in(self):\n fixed_in = self.fixed_artifact()\n fix_available_in = fixed_in.version if fixed_in and fixed_in.version != 'None' else None\n\n # NOTE: semver version format indicates a range where package\n # is vulnerable (as opposed to a value where anythng < value\n # is vulnerable, and the fix itself is known to exist), so we prepend a 'not' to indicate 'fix is available, if not in semver range'\n if fixed_in and fixed_in.version_format in ['semver']:\n # Github Advisories can add the real version where there is a fix if any.\n metadata = fixed_in.fix_metadata or {}\n first_patched_version = metadata.get('first_patched_version')\n if first_patched_version:\n return first_patched_version\n\n if fix_available_in and fixed_in.fix_metadata and fixed_in.fix_metadata.get('fix_exists', False):\n fix_available_in = \"! {}\".format(fix_available_in)\n else:\n fix_available_in = None\n\n return fix_available_in", "def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def encode_ver(v):\n t = split_ver(v)\n return t[0] << 16 | t[1] << 8 | t[2]", "def unsafe_version(version):\n\n return version.replace(\"_\", \".\")", "def encoded_query_str(request):\n return updated_query_str(request)", "def _build_uri_get_version(self, version=None, no_version=False):\n if not version and no_version:\n version = None\n elif not version and not no_version:\n version = self.UNI_VERSION\n elif version and no_version:\n LOG.debug(\n 'Version has been specified along with no_version flag, '\n 'ignoring no_version flag and using version {ver}'.format(\n ver=version))\n return version", "def coerce_version(value):\n if not isinstance(value, Version):\n value = Version.from_string(value)\n return value", "def __init__(self, value: str) -> None:\n try:\n id_part, version_part = self.split('v', 1)\n self.arxiv_id = Identifier(id_part)\n self.version = int(version_part)\n except ValueError as e:\n raise ValueError(f'Not a valid version identifier: {value}') from e", "def __encode_importent_info(info):\r\n return hashlib.sha256(str(info).encode()).hexdigest()", "def test_get_lis_version_should_return_a_string(self):\n lis_version = get_lis_version()\n self.assertIsInstance(lis_version, ustr)", "def version_bytes(self) -> str:\n return pulumi.get(self, \"version_bytes\")", "def versioned(filename, version, force_version=False, full_path=True):\n if not '.' in filename:\n return None\n\n if USE_VERSIONING or force_version:\n dotindex = filename.rindex('.')\n filename = u'%s.%s%s' % (filename[:dotindex], version, filename[dotindex:])\n\n if full_path:\n return static(filename)\n\n return filename", "def test_normalize_percent_encoding_in_querystring():\n assert (normalize_url(\"http://example.com/?a=b%c2\") ==\n \"http://example.com/?a=b%C2\")", "def validate_short_url(self, value: str) -> str:\n url_id = self.context.get(\"url_id\") # just in update mode we have id.\n\n if url_id: # for update step old and new short_value could be same.\n try:\n old_short_url = URL.objects.get(id=url_id).short_url\n except URL.DoesNotExist:\n raise serializers.ValidationError(\"url does not exists!\")\n if old_short_url == value:\n return value\n\n if value and url_validator(value):\n raise serializers.ValidationError(\n \"custom short_url could not be URL itself.Please try for sequence of string instead of a valid URL!\"\n )\n return value", "def _strip_version(endpoint):\n if endpoint.endswith('/'):\n endpoint = endpoint[:-1]\n url_bits = endpoint.split('/')\n if re.match(r'v\\d+\\.?\\d*', url_bits[-1]):\n endpoint = '/'.join(url_bits[:-1])\n return endpoint", "def b2_url_encode(s):\n return quote(s.encode('utf-8'))", "def test_percent_encode_querystring():\n assert (normalize_url(\"http://example.com/?a=hello{}\") ==\n \"http://example.com/?a=hello%7B%7D\")", "def is_vulnerable_version(self, version_str):\n raise NotImplementedError()", "def get_friendly_of_version(self, ofproto):\n if ofproto.OFP_VERSION == 1:\n _of_version = \"1.0\"\n elif ofproto.OFP_VERSION == 4:\n _of_version = \"1.3\"\n else:\n _of_version = \"Unknown version \" + \\\n str(ofproto.OFP_VERSION)\n return _of_version", "def test_computed_url(self):\n t = self.create_request_object()\n self.assertEqual(\"metadata/libraries/Fixitol(Dev)/versions/1234\", t.url_path())", "def safe_version(version):\n\n return version.replace(\".\", \"_\")", "def convert_version_from_depsolver(semantic_version):\n return str(semantic_version)", "def test_parse_package_url_custom_prefix():\n rv = versioning.to_remote_version(\n package_name='fake',\n package_version='1.0.1-alpha.1',\n bucket='fake-bucket',\n root_prefix='foo/bar',\n )\n rv_url = versioning.parse_package_url(rv.url)\n assert rv == rv_url, 'Expect URL parsing to be consistent.'" ]
[ "0.52483535", "0.5137429", "0.51280016", "0.49726486", "0.49645323", "0.49600667", "0.4959355", "0.49344546", "0.49142966", "0.49102196", "0.48660696", "0.48568073", "0.48517373", "0.48187718", "0.47451013", "0.4744391", "0.47432333", "0.47277656", "0.47126156", "0.4712107", "0.46838167", "0.46823484", "0.4679054", "0.4644776", "0.46435127", "0.46395004", "0.462016", "0.46153572", "0.46084046", "0.45963776" ]
0.55728
0
Check whether this manifest was preempted by another worker. That would be the case if the manifest references a manifestsecuritystatus, or if the reindex threshold is no longer valid.
def should_skip_indexing(manifest_candidate): if getattr(manifest_candidate, "manifestsecuritystatus", None): return manifest_candidate.manifestsecuritystatus.last_indexed >= reindex_threshold return len(manifest_candidate.manifestsecuritystatus_set) > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def x_overrun(self):\n return (self.status & 0x10) != 0", "def has_receiver(self):\n return self.balance < 0", "def z_overrun(self):\n return (self.status & 0x40) != 0", "def _allow_reset(self):\r\n return (self.child_state == self.DONE and self.child_attempts < self.max_attempts)", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def checkStatus(self):\n return None", "def custom_assess_status_check(self):\n check_config_set = []\n if self.backup_target_type == \"nfs\":\n check_config_set = ['nfs-shares']\n elif self.backup_target_type == \"s3\":\n check_config_set = [\n \"tv-s3-secret-key\",\n \"tv-s3-access-key\",\n \"tv-s3-region-name\",\n \"tv-s3-bucket\",\n \"tv-s3-endpoint-url\"]\n unset_config = [c for c in check_config_set if not hookenv.config(c)]\n if unset_config:\n return \"blocked\", \"{} configuration not set\".format(\n ', '.join(unset_config))\n # For s3 support backup-target-type should be set to 'experimental-s3'\n # as s3 support is pre-production. The self.backup_target_type\n # property will do any transaltion needed.\n if self.backup_target_type not in [\"nfs\", \"s3\"]:\n return \"blocked\", \"Backup target type not supported\"\n return None, None", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def CheckPrereq(self):\n assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)\n self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)\n self.cluster = self.cfg.GetClusterInfo()\n cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]\n\n self.op.disks = self._LookupDiskMods()\n\n assert self.instance is not None, \\\n \"Cannot retrieve locked instance %s\" % self.op.instance_name\n\n self.warn = []\n\n if (self.op.pnode_uuid is not None and\n self.op.pnode_uuid != self.instance.primary_node and\n not self.op.force):\n instance_info = self._GetInstanceInfo(cluster_hvparams)\n\n if instance_info.fail_msg:\n self.warn.append(\"Can't get instance runtime information: %s\" %\n instance_info.fail_msg)\n elif instance_info.payload:\n raise errors.OpPrereqError(\n \"Instance is still running on %s\" %\n self.cfg.GetNodeName(self.instance.primary_node),\n errors.ECODE_STATE)\n pnode_uuid = self.instance.primary_node\n assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)\n\n node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))\n pnode_info = self.cfg.GetNodeInfo(pnode_uuid)\n\n assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)\n group_info = self.cfg.GetNodeGroup(pnode_info.group)\n\n # dictionary with instance information after the modification\n ispec = {}\n\n self._CheckHotplug()\n\n self._PrepareNicCommunication()\n\n # disks processing\n assert not (self.op.disk_template and self.op.disks), \\\n \"Can't modify disk template and apply disk changes at the same time\"\n\n if self.op.disk_template:\n self._PreCheckDiskTemplate(pnode_info)\n\n self._PreCheckDisks(ispec)\n\n self._ProcessHVParams(node_uuids)\n be_old = self._ProcessBeParams()\n\n self._ValidateCpuParams()\n self._ProcessOsParams(node_uuids)\n self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)\n\n # make self.cluster visible in the functions below\n cluster = self.cluster\n\n def _PrepareNicCreate(_, params, private):\n self._PrepareNicModification(params, private, None, None,\n {}, cluster, pnode_uuid)\n return (None, None)\n\n def _PrepareNicAttach(_, __, ___):\n raise errors.OpPrereqError(\"Attach operation is not supported for NICs\",\n errors.ECODE_INVAL)\n\n def _PrepareNicMod(_, nic, params, private):\n self._PrepareNicModification(params, private, nic.ip, nic.network,\n nic.nicparams, cluster, pnode_uuid)\n return None\n\n def _PrepareNicRemove(_, params, __):\n ip = params.ip\n net = params.network\n if net is not None and ip is not None:\n self.cfg.ReleaseIp(net, ip, self.proc.GetECId())\n\n def _PrepareNicDetach(_, __, ___):\n raise errors.OpPrereqError(\"Detach operation is not supported for NICs\",\n errors.ECODE_INVAL)\n\n # Verify NIC changes (operating on copy)\n nics = [nic.Copy() for nic in self.instance.nics]\n ApplyContainerMods(\"NIC\", nics, None, self.nicmod, _PrepareNicCreate,\n _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,\n _PrepareNicDetach)\n if len(nics) > constants.MAX_NICS:\n raise errors.OpPrereqError(\"Instance has too many network interfaces\"\n \" (%d), cannot add more\" % constants.MAX_NICS,\n errors.ECODE_STATE)\n\n # Pre-compute NIC changes (necessary to use result in hooks)\n self._nic_chgdesc = []\n if self.nicmod:\n # Operate on copies as this is still in prereq\n nics = [nic.Copy() for nic in self.instance.nics]\n ApplyContainerMods(\"NIC\", nics, self._nic_chgdesc, self.nicmod,\n self._CreateNewNic, None, self._ApplyNicMods,\n self._RemoveNic, None)\n # Verify that NIC names are unique and valid\n utils.ValidateDeviceNames(\"NIC\", nics)\n self._new_nics = nics\n ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)\n else:\n self._new_nics = None\n ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)\n\n if not self.op.ignore_ipolicy:\n ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,\n group_info)\n\n # Fill ispec with backend parameters\n ispec[constants.ISPEC_SPINDLE_USE] = \\\n self.be_new.get(constants.BE_SPINDLE_USE, None)\n ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,\n None)\n\n # Copy ispec to verify parameters with min/max values separately\n if self.op.disk_template:\n count = ispec[constants.ISPEC_DISK_COUNT]\n new_disk_types = [self.op.disk_template] * count\n else:\n old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)\n add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)\n dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n if dev_type == constants.DT_DISKLESS and add_disk_count != 0:\n raise errors.ProgrammerError(\n \"Conversion from diskless instance not possible and should have\"\n \" been caught\")\n\n new_disk_types = ([d.dev_type for d in old_disks] +\n [dev_type] * add_disk_count)\n ispec_max = ispec.copy()\n ispec_max[constants.ISPEC_MEM_SIZE] = \\\n self.be_new.get(constants.BE_MAXMEM, None)\n res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,\n new_disk_types)\n ispec_min = ispec.copy()\n ispec_min[constants.ISPEC_MEM_SIZE] = \\\n self.be_new.get(constants.BE_MINMEM, None)\n res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,\n new_disk_types)\n\n if res_max or res_min:\n # FIXME: Improve error message by including information about whether\n # the upper or lower limit of the parameter fails the ipolicy.\n msg = (\"Instance allocation to group %s (%s) violates policy: %s\" %\n (group_info, group_info.name,\n utils.CommaJoin(set(res_max + res_min))))\n raise errors.OpPrereqError(msg, errors.ECODE_INVAL)", "def check_allow_reset(self):\r\n if not self.ready_to_reset:\r\n if self.current_task_number > 0:\r\n last_response_data = self.get_last_response(self.current_task_number - 1)\r\n current_response_data = self.get_current_attributes(self.current_task_number)\r\n\r\n if (current_response_data['min_score_to_attempt'] > last_response_data['score']\r\n or current_response_data['max_score_to_attempt'] < last_response_data['score']):\r\n self.state = self.DONE\r\n self.ready_to_reset = True\r\n\r\n return self.ready_to_reset", "def is_trashed(self):\n return self.has_label(TRASHED_LABEL)", "def fault(self):\n return (self.status == self.STATUS_FAULT)", "def race_condition():\n if len(allocated_pids) != len(set(allocated_pids)):\n return True\n else:\n return False", "def is_pwned(self) -> bool:\n return self.w3.balance(self.sender) > self.initial_balance", "async def _do_work_claim(self) -> bool:\n # 1. Ask the LTA DB for the next Bundle to be deleted\n # configure a RestClient to talk to the LTA DB\n lta_rc = ClientCredentialsAuth(address=self.lta_rest_url,\n token_url=self.lta_auth_openid_url,\n client_id=self.client_id,\n client_secret=self.client_secret,\n timeout=self.work_timeout_seconds,\n retries=self.work_retries)\n self.logger.info(\"Asking the LTA DB for a Bundle to check for TransferRequest being finished.\")\n pop_body = {\n \"claimant\": f\"{self.name}-{self.instance_uuid}\"\n }\n response = await lta_rc.request('POST', f'/Bundles/actions/pop?source={self.source_site}&dest={self.dest_site}&status={self.input_status}', pop_body)\n self.logger.info(f\"LTA DB responded with: {response}\")\n bundle = response[\"bundle\"]\n if not bundle:\n self.logger.info(\"LTA DB did not provide a Bundle to check. Going on vacation.\")\n return False\n # update the TransferRequest that spawned the Bundle, if necessary\n await self._update_transfer_request(lta_rc, bundle)\n # even if we processed a Bundle, take a break between Bundles\n return False", "def crashed(self):\n\n return len(self.backtrace) > 0 or (self.run_as_script and self.return_code != 0)", "def is_insufficient_permissions(self):\n return self._tag == 'insufficient_permissions'", "def _check_unstake_result(self) -> None:\n balance_score = self.icx.get_balance(self.address) - self._daily_reward.get()\n if balance_score > 0:\n unstake_info_list = self.getUserUnstakeInfo()\n for each_info in unstake_info_list:\n value_to_transfer = each_info[0]\n if value_to_transfer <= balance_score:\n self._send_ICX(each_info[1], value_to_transfer)\n self._linked_list_var.remove(self._linked_list_var._head_id.get())\n break", "def can_request_more():\n # Note: Files are restored in pairs (so we multiply by 2)\n active_requests = jobtracker.query(\"SELECT IFNULL(SUM(numrequested), 0) \" \\\n \"FROM requests \" \\\n \"WHERE status='waiting'\", fetchone=True)\n to_download = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status NOT IN ('downloaded', \" \\\n \"'added', \" \\\n \"'deleted', \" \\\n \"'terminal_failure')\")\n if active_requests == None:\n\tactive_requests = 0\n num_to_restore = active_requests\n num_to_download = len(to_download)\n used = get_space_used()\n reserved = get_space_committed()\n\n can_request = ((num_to_restore+num_to_download) < config.download.numrestored) and \\\n (used+reserved < config.download.space_to_use)\n return can_request", "def _check_procmon_failures(self, target):\n if target.procmon:\n self._fuzz_data_logger.open_test_step(\"Contact process monitor\")\n self._fuzz_data_logger.log_check(\"procmon.post_send()\")\n if target.procmon.post_send():\n self._fuzz_data_logger.log_pass(\"No crash detected.\")\n else:\n self._fuzz_data_logger.log_fail(\n \"procmon detected crash on test case #{0}: {1}\".format(self.total_mutant_index,\n target.procmon.get_crash_synopsis()))\n return True\n return False", "def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers", "def checkExternalReward(self):\r\n\r\n self.externalReward, res = self.receiver.getExternalReward()\r\n\r\n return res", "def not_converging(self):\n if len(self.rundir) >= int(self.settings[\"run_limit\"]):\n return True\n return False", "def applications_can_be_modified(self):\n return self.status in [\n ApplicationBatchStatus.DRAFT,\n ApplicationBatchStatus.RETURNED,\n ]", "def check_status(self):", "def already_processed(self):\n # If the flag file has been created by a previous run\n # or if any of the rules have already been re-ordered\n # then we shouldn't make any more changes and instead\n # the system needs to be rebooted.\n return self.syspaths.flag_exists", "def check_lock(self):\n if self._lockfilename is None:\n print \"No lockfile specified in the configuration for the application.\"\n sys.exit(1)\n lockers = self.config.options('locks')\n for locker in lockers:\n lockfilename = self.config.get('locks', locker)\n if os.path.isfile(lockfilename):\n if not AppHandler.is_running(lockfilename):\n AppHandler.remove_lock(lockfilename)\n else:\n print \"Process is currently running. Please wait for it to finish.\"\n sys.exit(1)", "def check_unconfirmed_operations(self):\n if self:\n logwarning(\n style((f'There are still {self.n_actions} operations '\n 'in the queue at program exit!'), **HEADER_STYLE))", "def verify_package_status(self):\n pass", "def verify_package_status(self):\n pass" ]
[ "0.5434844", "0.541006", "0.5406191", "0.53775686", "0.53569597", "0.52894694", "0.5256828", "0.52423096", "0.52342397", "0.5217433", "0.5182849", "0.51582813", "0.51306504", "0.5123424", "0.51226157", "0.5121449", "0.511326", "0.50790226", "0.5077256", "0.50669265", "0.5063547", "0.50630355", "0.5059484", "0.5053406", "0.5051411", "0.5050196", "0.50309324", "0.5021881", "0.5020711", "0.5020711" ]
0.6024165
0