query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Map profiling information to that we consider relevant.
def map_profile_info(profile): result = map( lambda p: { 'callcount': p.callcount, 'time': p.totaltime, 'name': p.code if isinstance(p.code, str) else p.code.co_name, 'file': None if isinstance(p.code, str) else p.code.co_filename}, profile.getstats()) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile", "def _prepare_profile(data):\n PATTERN = \"<method '\"\n\n def _rename_call(name):\n i = len(PATTERN)\n return name[i:name.index(\"'\", i)]\n\n result = defaultdict(lambda: {'callcount': 0, 'time': 0.0})\n for profile in data:\n for call in profile:\n if ('sql' in call['name']\n and call['name'].startswith(PATTERN)):\n name = _rename_call(call['name'])\n result[name]['callcount'] += call['callcount']\n result[name]['time'] += call['time']\n return result", "def _profile_module(self):\n with open(self._run_object, 'r') as srcfile:\n src_code = srcfile.read()\n code = compile(src_code, self._run_object, 'exec')\n try:\n with _CodeHeatmapCalculator() as prof:\n exec(code, self._globs, None)\n except SystemExit:\n pass\n\n heatmaps = []\n for filename, heatmap in prof.heatmap.items():\n if os.path.isfile(filename):\n heatmaps.append(\n self._format_heatmap(\n filename, heatmap, prof.execution_count[filename]))\n\n run_time = sum(heatmap['runTime'] for heatmap in heatmaps)\n return {\n 'objectName': self._run_object,\n 'runTime': run_time,\n 'heatmaps': heatmaps\n }", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()", "def conclusion_summary_map(self):\n pass", "def profile_map_reduce(func, args, kwargs, func_result):\n (collection, map_fn, reduce_fn) = args[:3]\n\n report_kvs = _profile_query(collection)\n report_kvs['Map_Function'] = map_fn\n report_kvs['Reduce_Function'] = reduce_fn\n\n return report_kvs", "def profile(self, data):\n\n # need to store the output of each morphism in the analysis, which forms the training data later on\n self.profile = {}\n cur_output = data\n for morph in self.analysis.morphisms:\n cur_name = morph.name\n cur_output = morph.apply(cur_output)\n self.profile[cur_name] = cur_output", "def _init_profiles(self):\n self._profiles = {\n k: np.zeros((len(self.hybrid_time_index), len(self.hybrid_meta)),\n dtype=np.float32)\n for k in OUTPUT_PROFILE_NAMES}", "def _addCounterToMap(probeMap, counter, index):\n if counter.probe in probeMap:\n probeMap[counter.probe].append(index)\n else:\n probeMap.update({counter.probe : [index]})", "def _do_mapping(self):\n pass", "def _enable_profiling():\n import cProfile\n import atexit\n global _profiler\n _profiler = cProfile.Profile()\n _profiler.enable()\n atexit.register(_profile_atexit)", "def _profile_package(self):\n with _CodeHeatmapCalculator() as prof:\n try:\n runpy.run_path(self._run_object, run_name='__main__')\n except SystemExit:\n pass\n\n heatmaps = []\n for filename, heatmap in prof.heatmap.items():\n if os.path.isfile(filename):\n heatmaps.append(\n self._format_heatmap(\n filename, heatmap, prof.execution_count[filename]))\n\n run_time = sum(heatmap['runTime'] for heatmap in heatmaps)\n return {\n 'objectName': self._run_object,\n 'runTime': run_time,\n 'heatmaps': heatmaps\n }", "def get_profile_stats():\n return p_stats", "def __init__(self):\n self._profiling_mode = False\n self._total_time_ms = 0.0\n self._traced_records = []\n self._statistical_results = {}", "def profile(x):\n return x", "def profiler(self):\r\n\r\n class Task(object):\r\n \"Private class to nicely wrap up the profile data\"\r\n def __init__(self, block, addr):\r\n self.block = block\r\n self.addr = addr\r\n self.name = None\r\n def tidy(self, sym):\r\n self.name = sym.varfind(self.addr).name\r\n self.CPU_FRAC = sym.constfind(\"$profiler.CPU_FRACTION_FIELD\").value\r\n def __repr__(self):\r\n if self.name is None:\r\n raise Exception(\"Need to call the tidy method before using\")\r\n return \"%-50s - %2.1f %%\" % (self.name, self.block[self.CPU_FRAC]/1000)\r\n\r\n\r\n # get the head of the list and a couple of constants\r\n head = self._core.sym.varfind(\"$profiler.last_addr\").addr\r\n NULL = self._core.sym.constfind(\"$profiler.LAST_ENTRY\").value\r\n SIZE = self._core.sym.constfind(\"$profiler.STRUC_SIZE\").value\r\n NEXT = self._core.sym.constfind(\"$profiler.NEXT_ADDR_FIELD\").value\r\n\r\n # get the first address\r\n curr = self._core.dm[head]\r\n\r\n # read all the structures off the chip as fast as we can\r\n tasks = []\r\n while curr != NULL:\r\n block = self._core.dm[curr:(curr+SIZE)]\r\n tasks.append(self.Task(block, curr))\r\n curr = block[NEXT]\r\n\r\n # now fill in the other bits\r\n for t in tasks:\r\n t.tidy(self._core.sym)\r\n\r\n # finally return\r\n return tasks", "def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])", "def profile_function(self):\n with _CodeHeatmapCalculator() as prof:\n result = self._run_object(*self._run_args, **self._run_kwargs)\n code_lines, start_line = inspect.getsourcelines(self._run_object)\n\n source_lines = []\n for line in code_lines:\n source_lines.append(('line', start_line, line))\n start_line += 1\n\n filename = os.path.abspath(inspect.getsourcefile(self._run_object))\n heatmap = prof.heatmap[filename]\n run_time = sum(time for time in heatmap.values())\n return {\n 'objectName': self._object_name,\n 'runTime': run_time,\n 'result': result,\n 'timestamp': int(time.time()),\n 'heatmaps': [{\n 'name': self._object_name,\n 'heatmap': heatmap,\n 'executionCount': prof.execution_count[filename],\n 'srcCode': source_lines,\n 'runTime': run_time\n }]\n }", "def profile(self) -> Optional[Any]:\n\n def get_profile_attribute(numpy_data, attr_name):\n if isinstance(numpy_data, dict):\n return {key: getattr(array, attr_name) for key, array in numpy_data.items()}\n else:\n return getattr(numpy_data, attr_name)\n\n profile = {\n \"features_shape\": get_profile_attribute(self._features, \"shape\"),\n \"features_size\": get_profile_attribute(self._features, \"size\"),\n \"features_nbytes\": get_profile_attribute(self._features, \"nbytes\"),\n }\n if self._targets is not None:\n profile.update(\n {\n \"targets_shape\": get_profile_attribute(self._targets, \"shape\"),\n \"targets_size\": get_profile_attribute(self._targets, \"size\"),\n \"targets_nbytes\": get_profile_attribute(self._targets, \"nbytes\"),\n }\n )\n\n return profile", "def calc_profile(self, phases):\n raise NotImplementedError()", "def calculate_profile(transformed_sequences: list) -> dict:\n sequence_length = len(transformed_sequences)\n profile = {\n \"A\": [0] * sequence_length,\n \"C\": [0] * sequence_length,\n \"G\": [0] * sequence_length,\n \"T\": [0] * sequence_length\n }\n\n for base_index in range(sequence_length):\n for base in transformed_sequences[base_index]:\n profile[base][base_index] += 1\n \n return list(profile.values())", "def profile(self):\n\n return dict(width=self.width, height=self.height, crs=self.crs, \n interleave=self.interleave, resampling=self.resampling)", "def applyMapping(self):\n pass", "def audit_process():\n st_types, pc_types = audit(OSMFILE)\n #pprint.pprint(dict(st_types))\n #pprint.pprint(dict(pc_types))\n\n correct_name = {}\n for st_type, ways in st_types.iteritems():\n for name in ways:\n better_name = update_name(name, mapping)\n correct_name[name] = better_name\n #print name, \"=>\", better_name\n \n correct_code = {}\n for _, pc_type in pc_types.iteritems():\n for code in pc_type:\n better_code = update_postalcode(code)\n correct_code[code] = better_code\n #print code, \"=>\", better_code\n \n return correct_name, correct_code", "def get_memory_visit_lookup(self) -> Dict[str, int]:\n return self.memory_visit_lookup", "def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]", "def get_profiling_data(profiler: 'IProfiler') -> ProfilerData:\n\n top_level_dict = json.loads(profiler.as_json())\n armnn_data = top_level_dict[\"ArmNN\"]\n #Get the inference measurements dict, this will be just one value for key starting with \"inference_measurements\"\n inference_measurements = [v for k, v in armnn_data.items() if k.startswith(\"inference_measurements_\")][0]\n\n #Get the execution data dict, this will be just one value for key starting with \"Execute_\"\n execution_data = [v for k, v in inference_measurements.items() if k.startswith(\"Execute_\")][0]\n\n workload_data = {}\n inference_data = {}\n for exec_key, exec_value in execution_data.items():\n # Check all items with a type.\n if \"type\" in exec_value and exec_value[\"type\"] == \"Event\":\n for event_key, event_value in exec_value.items():\n if event_key.startswith(\"Wall clock time_#\") and event_value[\"type\"] == \"Measurement\":\n time_data = __get_wall_clock_times__(event_value)\n time_data[\"backend\"] = __get_backend(exec_key)\n workload_data[exec_key] = time_data\n # This is the total inference time map\n if exec_key.startswith(\"Wall clock time_#\") and exec_value[\"type\"] == \"Measurement\":\n time_data = __get_wall_clock_times__(exec_value)\n inference_data.update(time_data)\n return ProfilerData(inference_data=inference_data, per_workload_execution_data=workload_data)", "def testStepBuildStatsMap(self):\n self._StringToMapHelper(data_types.StepBuildStatsMap, data_types.BuildStats)", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def process_request(self, request):\n self.db.set_profiling_level(0)\n try:\n self.start_ts = self.db.system.profile.find()\\\n .sort(\"ts\", pymongo.DESCENDING)\\\n .limit(1)[0].get('ts')\n except IndexError:\n self.start_ts = None\n\n self.db.set_profiling_level(2)" ]
[ "0.5740802", "0.56392205", "0.56055224", "0.55328506", "0.54870385", "0.54597306", "0.54565483", "0.5436571", "0.54253584", "0.5375065", "0.5319913", "0.52781445", "0.5247227", "0.5228473", "0.5214851", "0.5212649", "0.51746607", "0.511492", "0.5113408", "0.5090468", "0.50846833", "0.50416297", "0.50222325", "0.501553", "0.499862", "0.497708", "0.49747634", "0.4969554", "0.49483192", "0.4947822" ]
0.7137595
0
NK table mapping binary sequence to value.
def nk_table(self): return self.map("keys", "values")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeValMap(self,value = 'readcount'):\n self.valMap = np.zeros(len(self))\n self.valMap = self.valMap-1\n myTmp = []\n for x in range(0,len(self)):\n myTmp.append([])\n for i in self.children:\n for j in range(i.start,i.end+1):\n myTmp[j-self.start].append(i.__dict__[value])\n for nt in range(0,len(myTmp)):\n if len(myTmp[nt])>0:\n self.valMap[nt]=sum(myTmp[nt])/len(myTmp[nt])", "def _parse_value_label_table(self, sfile):\n byteorder = self._byteorder\n \n nentries = unpack(byteorder + 'l', sfile.read(4))[0]\n txtlen = unpack(byteorder + 'l', sfile.read(4))[0]\n off = []\n val = []\n txt = []\n for i in range(nentries):\n off.append(unpack(byteorder+'l',sfile.read(4))[0])\n for i in range(nentries):\n val.append(unpack(byteorder+'l',sfile.read(4))[0])\n \n txt_block = unpack(str(txtlen) + \"s\", sfile.read(txtlen))\n txt = [t.decode('iso-8859-1') \n for b in txt_block for t in b.split(b'\\0')]\n \n # put (off, val) pairs in same order as txt\n sorter = list(zip(off, val))\n sorter.sort()\n \n # dict of val[i]:txt[i]\n table = {sorter[i][1]: txt[i] for i in range(len(sorter))}\n \n return table", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def dd_valley_value_map_nb(record, ts):\n return ts[record['valley_idx'], record['col']]", "def k_map(self):\n\t\tt1 = time.time()\n\t\tmapping_matrix = [] \n\t\tfor index in self.mapping:\n\t\t\tvector = np.zeros(len(self.unique_char),dtype=float)\n\t\t\tvector[index] = 1.0\n\t\t\tmapping_matrix.append(vector)\n\t\tprint(\"Time creating k map {:.3f} sec\".format(time.time()-t1))\n\t\tself.mapping_matrix = mapping_matrix\n\t\treturn mapping_matrix", "def build_inverse_barcode_map(seqs):\r\n inverse_map = {}\r\n map_count = defaultdict(int)\r\n for (label, seq) in seqs:\r\n (map_id, seq_id) = label.split()[:2]\r\n map_id = map_id.split(\"_\")[0]\r\n inverse_map[seq_id] = map_id\r\n map_count[map_id] += 1\r\n\r\n return (inverse_map, map_count)", "def __init__(self):\n self.lookup_table = bytes.maketrans(bytes(range(1<<8)),bytes((bin(i).count('1') for i in range(1<<8))))\n self.byteorder = sys.byteorder", "def get_map(self):\n\n self.mp = defaultdict(lambda : ord('x'))\n y, x = 0, 0\n while True:\n cond, output = self.ic()\n\n if cond: break\n # New row of the print out\n if output == 10:\n y += 1\n x = 0\n # Assign the value to the map\n else:\n self.mp[y,x] = output\n x += 1\n \n return self.mp", "def dd_start_value_map_nb(record, ts):\n return ts[record['start_idx'], record['col']]", "def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass", "def truthtable(self):\n table = []\n for i in xrange(self.length):\n inputs = []\n binary = bin(i).lstrip('0b')\n for i in xrange(len(binary)):\n inputs.append(int(binary[i]))\n inputs.append(1)\n table.append(self.compute(inputs))\n return table", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def binary_to_seq():\n bin_seq, dico_binary, comp_seq, file_comp = utf8_to_binary()\n \n #for each binary value associate the corresponding letter (key) \n #according to the dictionnary \n dna_seq = \"\"\n reading_binary = \"\"\n for value in bin_seq:\n reading_binary += value\n for letter, code in dico_binary.items():\n if code == reading_binary:\n dna_seq += letter\n reading_binary = \"\"\n break\n \n #print(dna_seq, bin_seq, comp_seq, file_comp)\n return dna_seq, bin_seq, comp_seq, file_comp", "def oracle(t):\n for entry in t.table:\n model = {e.v: e.b for e in entry}\n t.table[entry] = getTruthVal(t.formula, model)", "def Values(self) -> _n_1_t_4:", "def _get_bin_map_of_number(number, length):\n empty_map = '0' * length\n bin_map_long = empty_map + str(bin(number))[2:]\n return bin_map_long[-length:]", "def map_binary_values(x) -> int:\n return _bm.get(x, -1)", "def nmer_dictionary(self,n,dic={}):\n if self.sequence == \"\":\n self.fetchSequence()\n self.sequence = self.sequence.upper()\n for i in range(0,len(self.sequence)-n):\n subseq = self.sequence[i:][:n]\n dic[subseq]=1+dic.get(subseq,0)\n del subseq\n return dic", "def huffman_construction(seq) :\n # Calculating frequency\n freq = {}\n for c in seq:\n if c in freq:\n freq[c] += 1\n else:\n freq[c] = 1\n\n freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n nodes = freq\n\n while len(nodes) > 1:\n (key1, c1) = nodes[-1]\n (key2, c2) = nodes[-2]\n nodes = nodes[:-2]\n node = NodeTree(key1, key2)\n nodes.append((node, c1 + c2))\n nodes = sorted(nodes, key=lambda x: x[1], reverse=True)\n\n huffman_code = huffman_code_tree(nodes[0][0])\n\n #construction of the binary sequence code\n #for each items of the sequence we search the corresponding code \n #in the dictionnary example : {\"T\":001}\n #and add the corresponding code in a list and join it \n binary_list= []\n for element in seq:\n for key in huffman_code.keys():\n if element == key:\n binary_list += huffman_code[key]\n binary_seq =\"\".join(binary_list)\n\n return huffman_code, binary_seq", "def values(self):\n value_item = 0x0\n\n for _ in range(0, self._number):\n value_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(value_item))\n\n d = HBINCell(self._buf, value_offset, self)\n v = VKRecord(self._buf, d.data_offset(), self)\n value_item += 4\n yield v", "def keysequence(value):\r\n return value.toString()", "def map_table(asm):\n # Dictionary of mappings between pre-defined names\n table = {\n \"SP\" : 0,\n \"LCL\" : 1,\n \"ARG\" : 2,\n \"THIS\" : 3,\n \"THAT\" : 4,\n \"SCREEN\": 16384,\n \"KBD\" : 24576,\n }\n # R0-R15\n for i in range(0, 16):\n table[\"R\" + str(i)] = i\n\n # Add user-defined names i.e. variables and gotos\n variables_list = [] # list of all @-values\n reg = 16 # start after R15\n count = -1 # keep track of instruction memory position\n\n for line in asm:\n parsed, flag = parser(line)\n\n if flag == \"GOTO_INSTRUCTION\":\n table[parsed] = count + 1 # add next position after goto\n elif flag == \"A_DECIMAL\":\n count += 1\n elif flag == \"A_INSTRUCTION\":\n if parsed not in variables_list:\n variables_list.append(parsed) # append to list if it doesn't exist\n count += 1\n elif flag == \"C_INSTRUCTION\":\n count += 1\n\n for i in variables_list:\n try:\n table[i]\n except KeyError:\n table[i] = reg # if key doesn't exist add it\n reg += 1\n\n return table", "def _generate_table(self):\n for i in xrange(32):\n dest = [0]\n gw = [0]\n self._table.append(\n {'destination': dest, 'gateway': gw}\n )", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def _map_B(self, obs_seq):\n B_map = np.ones((self.n_states, len(obs_seq)))\n\n for j in range(self.n_states):\n for t, obs in enumerate(obs_seq):\n for i, symbol in enumerate(obs):\n if symbol == self.MISSING or (symbol is np.nan or symbol != symbol):\n # if the symbol is missing, use the maximum likelihood symbol for that state\n temp_symbol = np.argmax(\n self.B[i][j]\n )\n B_map[j][t] *= self.B[i][j][temp_symbol]\n else:\n B_map[j][t] *= self.B[i][j][symbol]\n return B_map", "def one_hot_encode_single(mapping: dict[str, int], value: Optional[str]) -> IntArray:\n encoded_value = np.zeros((1, len(mapping)))\n if not pd.isna(value):\n code = mapping[str(value)]\n encoded_value[0, code] = 1\n return encoded_value", "def generateLookupTable(base = None, verbose = True):\n \n pool = Pool(cpu_count());\n lut = pool.map(matchIndex, range(2**26),chunksize=2**26/8/cpu_count());\n \n return np.array(lut, dtype = bool);", "def __call__(self):\n return {self.idx: rle_encoding(self.mask)}", "def syndrome_decoding_table(self):\n parity_check = self.get_parity_check_matrix()\n\n size = 2**(self.n-self.k) - 1\n iteration_counter = 0\n weight_counter = -1\n\n self.syndrome_table = {}\n\n for i in range(size):\n base_vector = np.zeros((1, self.n), dtype=int)\n\n # increase the weight by 1 every time the loop exceed the vector size.\n if iteration_counter == self.n:\n iteration_counter = 0\n weight_counter += 1\n base_vector[0, :weight_counter] = 1\n\n syndrome_vector = base_vector[0, :]\n syndrome_vector[iteration_counter] = 1\n syndrome = (1*np.matmul(syndrome_vector, parity_check)) % 2\n if tuple(syndrome) not in self.syndrome_table:\n self.syndrome_table[tuple(syndrome)] = 1*syndrome_vector\n iteration_counter += 1\n\n return self.syndrome_table", "def __init__(self):\n self.space = 1000\n self.hash_table = [Node(-1, -1)] * self.space" ]
[ "0.5793329", "0.5744892", "0.56919354", "0.5584746", "0.5507807", "0.5425487", "0.53992283", "0.5373337", "0.53448844", "0.5336639", "0.52844787", "0.527202", "0.5269434", "0.5232513", "0.5224803", "0.5220393", "0.5209991", "0.51919585", "0.51794624", "0.51602954", "0.5154623", "0.5141409", "0.51239383", "0.5114458", "0.5111789", "0.50328666", "0.50212204", "0.49975377", "0.4996177", "0.49929875" ]
0.6788704
0
Set the order (K) of the NK model.
def set_order(self, K): self.K = K # point to order self.order = self.K self._keys = np.array(["".join(r) for r in it.product('01', repeat=self.K)]) # Reset phenotypes self.data['phenotypes'] = np.empty(self.n, dtype=float)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_order(self, order):\n self.order = order", "def set_order(self, order):\n self.order = order", "def setOrder(self, order):\n\t\tself.orderInData = order", "def _setordering_customer_50K(self, val):\n self.swift_obj.OrderingCustomer_K = val\n self.swift_obj.OrderingCustomer_K.swiftTag = '50K'", "def order(self, order):\n self._order = order", "def SetOrder(self, order):\n if self.__order != order:\n self.__order = order\n self.Modified()", "def setOrder(self, verbose = 1):\n\n self.order = np.arange(self.atoms.shape[0])\n if verbose > 0:\n string = \"Updated the saved order\"\n ut.infoPrint(string)", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def set_coefs_order(self, order):\n # Attach an epistasis model.\n self.order = order\n self.add_epistasis()\n self.epistasis.data.values = np.zeros(self.epistasis.n)\n self.epistasis.data.values[0] = 1\n return self", "def set_document_order(self, order):\n self.set_value_into_input_field(self.order_text_field_locator, order)", "def order(self, order=0):\n # type: (int) -> Entity\n self.type_def['order'] = order\n\n return self", "def setOrder(self, *args):\n return _libsbml.CompartmentGlyph_setOrder(self, *args)", "def set_task_order(self, order):\n for task in self.tasks:\n task.order = order", "def setLatticeOrder(self):\n\t\taccNodes = self.getNodes()\n\t\telemInLine = {}\n\t\tfor i in range(len(accNodes)):\n\t\t\telem = accNodes[i]\t\t\t\n\t\t\telemname = elem.getName()\n\t\t\tif(elemInLine.has_key(elemname)):\n\t\t\t\telemInLine[elemname] += 1\n\t\t\telse:\telemInLine[elemname] = 1\n\t\t\tnode = self.getNodes()[i]\n\t\t\tnode.setParam(\"TPName\",node.getName()+\"_\"+str(elemInLine[elemname]))\n\t\t\t#node.setParam(\"sequence\",i+1)\n\t\t\t#print \"debug node\",node.getName(),node.getParamsDict()", "def set_order_weight(self):\n\n for child in self.all_children():\n for rule in self.options['ordering']:\n if child.lineage_test(rule):\n child.order_weight = rule['order']", "def set_order_weight(self):\n\n for child in self.all_children():\n for rule in self.options['ordering']:\n if child.lineage_test(rule):\n child.order_weight = rule['order']", "def set_bond_order(molecule, bond_index, bond_order):\n return molecule.SetBondOrder(bond_index, bond_order)", "def set_fitorder(self, fitorder):\n self.__fitorder = fitorder", "def order(self):\n return self.n", "def order ( self ) :\n return self.__order", "def setPageOrder(self,value):\n self.PDFreactorConfiguration.in1[\"pageOrder\"] = value", "def order(self):\n return self.__order", "def order(self) -> float:\n return self._order", "def order(self):\n return self._degree + 1", "def k(self):\n return add(self.k_b(), self.k_m())", "def k(self):\n self.kTable()", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def K(self):\n return self._K" ]
[ "0.67037237", "0.67037237", "0.65358293", "0.6362102", "0.62988967", "0.62647235", "0.6218576", "0.62119216", "0.62119216", "0.62119216", "0.6013113", "0.593146", "0.5919525", "0.5821439", "0.572999", "0.56128156", "0.5604605", "0.5604605", "0.56034064", "0.5601273", "0.5580365", "0.55189365", "0.54056275", "0.5363626", "0.5359102", "0.5354078", "0.53489774", "0.5341737", "0.5314844", "0.5314447" ]
0.80582696
0
Set the values of the NK table by drawing from a uniform distribution between the given k_range.
def set_random_values(self, k_range=(0, 1)): if hasattr(self, "keys") is False: raise Exception("Need to set K first. Try `set_order` method.") self._values = np.random.uniform(k_range[0], k_range[1], size=len(self.keys)) self.build()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def determine_k(dataset, range_k, n_seed=30):\r\n range_Ks = np.arange(0,range_k,1,dtype=int) #range of delays to study\r\n h_K=np.zeros((10,range_k))\r\n \r\n for i in range(10):\r\n for k, K in enumerate(range_Ks):\r\n traj_matrix= embed.trajectory_matrix(dataset, K=K)\r\n labels= cl.kmeans_knn_partition(traj_matrix, n_seed)\r\n h= op_calc.get_entropy(labels)\r\n h_K[i,k]=h\r\n \r\n return(h_K)", "def set_uniform_Kk(self, clip=True):\n\t\t\n\t\tKk1_los = random_matrix([self.Mm], params=[self.lo_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.lo_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk1_his = random_matrix([self.Mm], params=[self.hi_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.hi_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk2_los = random_matrix([self.Mm], params=[self.lo_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.lo_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\tKk2_his = random_matrix([self.Mm], params=[self.hi_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.hi_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\t\n\t\tself.Kk1 = random_matrix([self.Mm, self.Nn], [Kk1_los, Kk1_his], \n\t\t\t\t\t\t\t\tsample_type='rank2_row_uniform', \n\t\t\t\t\t\t\t\tseed = self.seed_Kk1)\n\t\tself.Kk2 = random_matrix([self.Mm, self.Nn], [Kk2_los, Kk2_his], \n\t\t\t\t\t\t\t\tsample_type='rank2_row_uniform', \n\t\t\t\t\t\t\t\tseed = self.seed_Kk2)\n\t\t\n\t\tif clip == True:\n\t\t\tarray_dict = clip_array(dict(Kk1 = self.Kk1, Kk2 = self.Kk2))\n\t\t\tself.Kk1 = array_dict['Kk1']\n\t\t\tself.Kk2 = array_dict['Kk2']", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def k(self):\n self.kTable()", "def initializeDistribution(self):\n self.checkDistParams()\n\n self.lowerBound = min(self.mapping.keys())\n self.upperBound = max(self.mapping.keys())", "def generate_k(data_set, k):\n centers = []\n dimensions = len(data_set[0])\n min_max = defaultdict(int)\n\n for point in data_set:\n for i in range(dimensions):\n val = point[i]\n min_key = 'min_{0}d'.format(i)\n max_key = 'max_{0}d'.format(i)\n if min_key not in min_max or val < min_max[min_key]:\n min_max[min_key] = val\n if max_key not in min_max or val > min_max[max_key]:\n min_max[max_key] = val\n\n for _k in range(k):\n rand_point = []\n for i in range(dimensions):\n min_val = min_max['min_{0}d'.format(i)]\n max_val = min_max['max_{0}d'.format(i)]\n \n rand_point.append(uniform(min_val, max_val))\n centers.append(rand_point)\n return centers", "def set_uniform(self, n_rows: int = 2, n_columns: int = 2):\n self.n_rows = n_rows\n self.n_columns = n_columns\n self.c_matrix = [BaseDistribution(n_items = n_columns) for x in range(n_columns)]\n self.prior = BaseDistribution(n_items = n_rows)\n return self", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)", "def range(cls, n, num_partitions=None):\n\n return Table(Env.hc(), Env.hail().keytable.KeyTable.range(Env.hc()._jhc, n, joption(num_partitions)))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def random(self, k=1000, n=100):\n a = numpy.random.randint(k, size=n)\n _, self.counts = numpy.unique(a, return_counts=1)\n self.nk, self.zk = numpy.unique(self.counts, return_counts=1)\n return self", "def set_normal_Kk(self, clip=True):\n\t\t\n\t\tKk1_mus = random_matrix([self.Mm], params=[self.mu_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.mu_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk1_sigmas = random_matrix([self.Mm], params=[self.sigma_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.sigma_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk2_mus = random_matrix([self.Mm], params=[self.mu_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.mu_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\tKk2_sigmas = random_matrix([self.Mm], params=[self.sigma_Kk2_hyper_lo,\n\t\t\t\t\t\t\tself.sigma_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\t\n\t\tself.Kk1 = random_matrix([self.Mm, self.Nn], [Kk1_mus, Kk1_sigmas], \n\t\t\t\t\t\t\t\t\tsample_type='rank2_row_gaussian', \n\t\t\t\t\t\t\t\t\tseed = self.seed_Kk1)\n\t\tself.Kk2 = random_matrix([self.Mm, self.Nn], [Kk2_mus, Kk2_sigmas],\n\t\t\t\t\t\t\t\t\tsample_type='rank2_row_gaussian', \n\t\t\t\t\t\t\t\t\tseed = self.seed_Kk2)\n\t\t\n\n\t\tif clip == True:\n\t\t\tarray_dict = clip_array(dict(Kk1 = self.Kk1, Kk2 = self.Kk2))\n\t\t\tself.Kk1 = array_dict['Kk1']\n\t\t\tself.Kk2 = array_dict['Kk2']", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def _change_spacing(self, **kwargs):\n start_point = kwargs.get(\"start_point\")\n end_point = kwargs.get(\"end_point\")\n self.t[start_point:end_point] *= kwargs.get(\"gamma\")\n self._base(**kwargs)", "def setHistogramRange(self, mn, mx, padding=0.1):\n self.vb.enableAutoRange(self.vb.YAxis, False)\n if self.orientation == 'horizontal':\n self.vb.setXRange(mn, mx, padding)\n elif self.orientation == 'vertical':\n self.vb.setYrange(mn, mx, padding)\n # mn -= d*padding\n # mx += d*padding\n # self.range = [mn,mx]\n # self.updateRange()\n # self.vb.setMouseEnabled(False, True)\n # self.region.setBounds([mn,mx])", "def setRange(self, x_range, y_range):\n pass", "def range_table(self):\n raise NotImplementedError('Abstract method.')", "def generate_colormap(scale_range=(0.0, 1.0), hue_range=(0.8, 0.0),\n saturation_range=(1.0, 1.0), value_range=(0.8, 0.8),\n nan_color=(0.2, 0.2, 0.2, 1.0)):\n lookup_table = vtk.vtkLookupTable()\n lookup_table.SetRange(scale_range)\n\n lookup_table.SetHueRange(hue_range)\n lookup_table.SetSaturationRange(saturation_range)\n lookup_table.SetValueRange(value_range)\n lookup_table.SetNanColor(nan_color)\n lookup_table.Build()\n return lookup_table", "def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None", "def __init__(self, k: int) -> None:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n\n self.k = k", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def create_in_out_table(km, quantizer):\n in_table = km.cluster_centers_.flatten()\n qrange = quantizer.range().reshape(-1, 1).astype(np.float32)\n out_table = km.predict(qrange).ravel()\n return in_table, out_table", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def sample(self, k, with_replacement=False, weights=None):\n return Table.from_rows(\n self._sample(k, with_replacement, weights),\n self.column_labels)", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False) or (self.min == self.lowerBound and self.max == self.upperBound):\n self._distribution = distribution1D.BasicTriangularDistribution(self.apex,self.min,self.max)\n else:\n self.raiseAnError(IOError,'Truncated triangular not yet implemented')", "def set_range(self, value):\n self.gauge.SetRange(value)", "def autoHistogramRange(self):\n self.vb.enableAutoRange(self.vb.XAxis, True)\n self.vb.enableAutoRange(self.vb.YAxis, True)\n # self.range = None\n # self.updateRange()\n # self.vb.setMouseEnabled(False, False)\n\n # def updateRange(self):\n # self.vb.autoRange()\n # if self.range is not None:\n # self.vb.setYRange(*self.range)\n # vr = self.vb.viewRect()\n\n # self.region.setBounds([vr.top(), vr.bottom()])", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def convergence_table_row_data(k, _error_norms): \n \n# Convergence rate for the computation is an estimate of the order of convergence of the numerical method, \n# and it is computed as log_2(error[twice smaller grid resolution]/error[current grid resolution])\n def convergence_rate (k, norm): \n return abs(log(_error_norms[k-1][norm]) - log(_error_norms[k][norm]))/log(2)\n\n# Store the relevant data in the dictionary \n data = {}\n data[\"k\"] = k\n data[\"M\"] = 2**k \n data[\"h\"] = 2**(-k) \n data[\"sup_norm\"] = _error_norms[k][\"sup_norm\"] \n data[\"sup_norm_rate\"] = convergence_rate(k, \"sup_norm\") \n data[\"one_norm\"] = _error_norms[k][\"one_norm\"]\n data[\"one_norm_rate\"] = convergence_rate(k, \"one_norm\")\n data[\"two_norm\"] = _error_norms[k][\"two_norm\"]\n data[\"two_norm_rate\"] = convergence_rate(k, \"two_norm\")\n\n return data" ]
[ "0.57710737", "0.56050676", "0.5352598", "0.5342018", "0.5335359", "0.5318227", "0.5310305", "0.51940215", "0.51878417", "0.51309323", "0.513036", "0.5095839", "0.5073497", "0.5061649", "0.5057922", "0.5051695", "0.50410706", "0.5020852", "0.50112736", "0.50064665", "0.49961156", "0.49856526", "0.49833795", "0.49698883", "0.49368638", "0.49238318", "0.49164176", "0.491111", "0.49076498", "0.4892267" ]
0.7136088
0
Set the values of the NK table from a list/array of values.
def set_table_values(self, values): if len(values) != len(self.keys): raise Exception("Length of the values do not equal the length of " "NK keys. " "Length of keys is : %d" % (len(self.keys),)) self._values = values self.build()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_values(self, values=None, *args, **kwargs):\n valueList = []\n valueDict = {}\n if values is not None:\n # check type of {\\sf values} to get correctly the values\n if isinstance(values, list) or isinstance(values, tuple):\n valueList.extend(values)\n elif isinstance(values, dictionary): # values are provided in\n # form of a mapping between\n # identity and value\n valueDict.update(values)\n else: # values is an value\n valueList.append(values)\n else:\n valueDict.update(kwargs)\n \n if (len(valueList) <= 0) and (len(valueDict) <= 0):\n # Set value to the default\n for elem in self.db.values():\n elem.set_value(None)\n else: \n # Set the values in the list first\n for elem, value in itertools.izip(self.db, valueList):\n elem.set(value)\n # The values in the dictionary is added of\n # correct the ones are set by the list\n for elemId in valueDict.keys():\n if elemId in self.indices.keys():\n self.db[self.indices[elemId]].set(valueDict[elemId])\n return", "def setValues(self, values):\n [self.setValue(key, value) for key, value in values.iteritems()]", "def set_all_values(self, values):\n return self.display_table.set_all_values(values,root=self.display_table_root,include=self.params)", "def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)", "def setvalues(self, values):\n if len(values) != len(self):\n # FIXME: correct error to raise?\n raise ValueError('Value list is not the same length as the '\n 'OrderedDict.')\n self.update(zip(self, values))", "def __setitem__(self, values: Tuple[int, ...], new_value: float) -> None:\n self.table[values] = new_value", "def values(self, values):\n self.data.values = values", "def initMapping(self, values):\n for value, prob in values.items():\n self.set(value, prob)", "def set_value(self, value):\n for row in self.rows:\n row.set_values(value)", "def set_values(self, value):\n for i in range(len(self)):\n self._elements[i] = value", "def set_values(self,x):\n for i in range(len(self)):\n self[i].set_value(x[i])", "def setValues(self,values):\n for v,val in zip(self.values,map(float,values)):\n v.setText(str(val))", "def set_rows(self, values: List[str]):\n\n values_len = len(values)\n curr_row_count = len(self.row_layout.children())\n\n # adjust row count\n if values_len > curr_row_count:\n for _ in range(values_len - curr_row_count):\n self.add_row()\n elif values_len < curr_row_count:\n for _ in range(curr_row_count - values_len):\n last_row = self.row_layout.children()[-1]\n last_row.itemAt(1).widget().click()\n\n # set values\n idx = 0\n for row in self.row_layout.children():\n if self.possible_values is None:\n row.itemAt(0).widget().setText(values[idx])\n else:\n if values[idx] in self.possible_values:\n row.itemAt(0).widget().setCurrentIndex(self.possible_values.index(values[idx]))\n idx += 1", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def values(self, values):\n\n self._values = values", "def values(self, values):\n\n self._values = values", "def update(self, values: List[int]) -> None:\n ...", "def update(self, values: List[int]) -> None:\n ...", "def array_update(self, table_list):\r\n for tbl in table_list:\r\n x = kit.SQL_pull('name, subject_id', tbl)\r\n r = {i[0]: i[1] for i in x}\r\n h = {i[1]: tbl for i in x}\r\n \r\n self.refference.update(r)\r\n self.home_table.update(h)\r\n \r\n self.counts[tbl] = len(x)", "def setValue(self, mapList, value):\n self.__setInDict(self.__cfg, mapList, value)", "def _setVals(self, *args, **kwargs):\n pass", "def set(self, table):\n if table is None:\n return\n for name in table.dtype.names:\n self._set_column(name, table[name])", "def update(self, values):\n for k, v in six.iteritems(values):\n setattr(self, k, v)", "def setValue(self,val):\n for f,v in zip(self.fields,val):\n f.setValue(v)", "def update(self, values):\r\n for k, v in six.iteritems(values):\r\n setattr(self, k, v)" ]
[ "0.69007885", "0.679975", "0.6547312", "0.64566696", "0.6412549", "0.6412346", "0.63792694", "0.630354", "0.6177051", "0.60555947", "0.60262287", "0.6000296", "0.5967956", "0.5946116", "0.5946116", "0.5946116", "0.5946116", "0.5946116", "0.5946116", "0.59048855", "0.59048855", "0.58379465", "0.58379465", "0.58052826", "0.5798628", "0.575283", "0.5737511", "0.5654839", "0.56488264", "0.5645218" ]
0.7321782
0
Build phenotypes from NK table.
def build(self): nk_table = self.nk_table # Check for even interaction neighbor = int(self.order / 2) if self.order % 2 == 0: pre_neighbor = neighbor - 1 else: pre_neighbor = neighbor # Use NK table to build phenotypes phenotypes = np.zeros(self.n, dtype=float) for i in range(len(self.genotypes)): f_total = 0 for j in range(self.length): if j - pre_neighbor < 0: pre = self.binary[i][-pre_neighbor:] post = self.binary[i][j:neighbor + j + 1] f = "".join(pre) + "".join(post) elif j + neighbor > self.length - 1: pre = self.binary[i][j - pre_neighbor:j + 1] post = self.binary[i][0:neighbor] f = "".join(pre) + "".join(post) else: f = "".join( self.binary[i][j - pre_neighbor:j + neighbor + 1]) f_total += nk_table[f] phenotypes[i] = f_total self.data.phenotypes = phenotypes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_phenotype(phenotype_id, adapter):\n phenotype_obj = {}\n phenotype = adapter.hpo_term(phenotype_id)\n if phenotype:\n phenotype_obj[\"phenotype_id\"] = phenotype[\"hpo_id\"]\n phenotype_obj[\"feature\"] = phenotype[\"description\"]\n return phenotype", "def create_model():\n # Get list of all syllables: [\"<s>\", \"AH\", \"</s>\", \"<s>\", \"T\", ...]\n syllabifier = Syllabifier()\n all_syllables = syllabifier.all_syllables()\n\n # Count conditional probabilties of phoneme tuples\n tcf = TrigramCollocationFinder.from_words(all_syllables)\n bcf = BigramCollocationFinder.from_words(all_syllables)\n tri_dict = dict(sorted(tcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n bi_dict = dict(sorted(bcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n\n # Create dictionary to count cond prob all phoneme tuples\n accepted_phonemes = [i[0] for i in cmudict.phones()]\n accepted_phonemes.append('<s>')\n accepted_phonemes.append('</s>')\n phoneme_tups = [p for p in itertools.product(accepted_phonemes, repeat=3)]\n cond_probs_dict = dict([(char, 0) for char in phoneme_tups])\n\n for t in tri_dict:\n p1, p2, p3 = t[0], t[1], t[2]\n tri_count = tri_dict[t]\n bi_count = bi_dict[(p1, p2)]\n if bi_count > 1:\n cond_prob = tri_count * 1.0 / bi_count\n else:\n cond_prob = 0.0\n cond_probs_dict[(p1, p2, p3)] = cond_prob\n\n pickle.dump(cond_probs_dict, open(COND_PROBS_PATH, \"wb\"))\n return", "def make_markov_table(ngrams, preset_table={}):\n markov_table = preset_table\n\n for ngram in ngrams:\n if isinstance(ngram, str):\n if not ngram[:-1] in markov_table:\n markov_table[ngram[:-1]] = []\n markov_table[ngram[:-1]].append(ngram[-1])\n else:\n if not tuple(ngram[:-1]) in markov_table:\n markov_table[tuple(ngram[:-1])] = []\n markov_table[tuple(ngram[:-1])].append(ngram[-1])\n\n return markov_table", "def build_ngram_vocab(self, n):\n max_ngram_per_word = 0\n ngram_dict = collections.defaultdict(int)\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n _word = '^' + word + '$'\n ngram_counts = len(_word) - n + 1\n if ngram_counts > max_ngram_per_word:\n max_ngram_per_word = ngram_counts\n for i in range(ngram_counts):\n ngram = _word[i:i + n]\n ngram_dict[ngram] += 1\n\n unk_ngram_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(ngram_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_ngram_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_ngram_list, max_ngram_per_word", "def test_make_otu_table_taxonomy(self):\r\n otu_map_lines = \"\"\"0\tABC_0\tDEF_1\r\n1\tABC_1\r\nx\tGHI_2\tGHI_3\tGHI_77\r\nz\tDEF_3\tXYZ_1\"\"\".split('\\n')\r\n taxonomy = {'0': ['Bacteria', 'Firmicutes'],\r\n 'x': ['Bacteria', 'Bacteroidetes']}\r\n obs = make_otu_table(\r\n otu_map_lines,\r\n taxonomy,\r\n constructor=DenseOTUTable)\r\n exp = \"\"\"{\"rows\": [{\"id\": \"0\", \"metadata\": {\"taxonomy\": [\"Bacteria\", \"Firmicutes\"]}}, {\"id\": \"1\", \"metadata\": {\"taxonomy\": [\"None\"]}}, {\"id\": \"x\", \"metadata\": {\"taxonomy\": [\"Bacteria\", \"Bacteroidetes\"]}}, {\"id\": \"z\", \"metadata\": {\"taxonomy\": [\"None\"]}}], \"format\": \"Biological Observation Matrix 0.9dev\", \"data\": [[1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0], [0.0, 1.0, 0.0, 1.0]], \"columns\": [{\"id\": \"ABC\", \"metadata\": null}, {\"id\": \"DEF\", \"metadata\": null}, {\"id\": \"GHI\", \"metadata\": null}, {\"id\": \"XYZ\", \"metadata\": null}], \"generated_by\": \"QIIME 1.4.0-dev, svn revision 2532\", \"matrix_type\": \"dense\", \"shape\": [4, 4], \"format_url\": \"http://biom-format.org\", \"date\": \"2011-12-21T00:19:30.961477\", \"type\": \"OTU table\", \"id\": null, \"matrix_element_type\": \"float\"}\"\"\"\r\n self.assertEqual(\r\n parse_biom_table(obs.split('\\n')),\r\n parse_biom_table(exp.split('\\n')))", "def _process_phenotype(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'phenotype'))\n logger.info(\"processing phenotype\")\n\n line_counter = 0\n\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (phenotype_id, uniquename, observable_id, attr_id, value,\n cvalue_id, assay_id) = line\n\n # 8505\tunspecified\n # 20142\tmesothoracic leg disc | somatic clone 87719 60468 60468 60468\n # 8507\tsex comb | ectopic 88877 60468 60468 60468\n # 8508\ttarsal segment\t83664 60468 60468 60468\n # 18404\toocyte | oogenesis stage S9\t86769 60468 60468 60468\n # for now make these as phenotypic classes\n # will need to xref at some point\n phenotype_key = phenotype_id\n phenotype_id = None\n phenotype_internal_id = self._makeInternalIdentifier(\n 'phenotype', phenotype_key)\n phenotype_label = None\n self.label_hash[phenotype_internal_id] = uniquename\n cvterm_id = None\n if observable_id != '' \\\n and int(observable_id) == 60468:\n # undefined - typically these are already phenotypes\n if cvalue_id in self.idhash['cvterm']:\n cvterm_id = self.idhash['cvterm'][cvalue_id]\n phenotype_id = self.idhash['cvterm'][cvalue_id]\n elif observable_id in self.idhash['cvterm']:\n # observations to anatomical classes\n cvterm_id = self.idhash['cvterm'][observable_id]\n phenotype_id = \\\n self.idhash['cvterm'][observable_id] + 'PHENOTYPE'\n if cvterm_id is not None and cvterm_id in self.label_hash:\n phenotype_label = self.label_hash[cvterm_id]\n phenotype_label += ' phenotype'\n self.label_hash[phenotype_id] = phenotype_label\n else:\n logger.info('cvtermid=%s not in label_hash', cvterm_id)\n\n else:\n logger.info(\n \"No observable id or label for %s: %s\",\n phenotype_key, uniquename)\n\n # TODO store this composite phenotype in some way\n # as a proper class definition?\n self.idhash['phenotype'][phenotype_key] = phenotype_id\n\n # assay_id is currently only \"undefined\" key=60468\n\n if not self.testMode and\\\n limit is not None and line_counter > limit:\n pass\n else:\n if phenotype_id is not None:\n # assume that these fit into the phenotypic uberpheno\n # elsewhere\n model.addClassToGraph(phenotype_id, phenotype_label)\n line_counter += 1\n\n return", "def makeTableNamesList(n, ):", "def phenotypes(self):\n\t\treturn Phenotype.PhenotypesByPatient(self.id, self.host)", "def build_table(oS, audio, config):\n temp = os.path.join(audio, \"temp.csv\")\n table = pd.DataFrame()\n for wav_file in os.listdir(audio):\n if (\"vocal\" in wav_file and \"button\" not in wav_file and\n wav_file.endswith(\".wav\")):\n ursi = wav_file[:9]\n condition, trial = wav_file[-13:-4].lstrip(\"_\").split(\"_\", 1)\n oScommand = oS + \" -C \" + config + \" -I \" + os.path.join(audio,\n wav_file) + \" -O \" + temp\n print(oScommand)\n subprocess.call(oScommand, shell=True)\n features = read_temp(temp)\n os.remove(temp)\n try:\n trial_no = str(int(trial[-3:]))\n except:\n trial_no = str(int(wav_file.split(\"exp\")[1].rstrip(\".wav\"))\n )\n table = table.append({\"URSI\": ursi.upper(), \"stranger\":\n condition, \"observation\": trial_no, **features},\n ignore_index=True)\n return(table.sort_values([\"URSI\", \"stranger\", \"observation\"]))", "def mono_table():\n return Table(\n {\n \"obs_id\": [1, 1, 1, 1, 1, 2],\n \"event_id\": [1, 1, 1, 2, 2, 1],\n \"tel_id\": [1, 2, 3, 5, 7, 1],\n \"hillas_intensity\": [1, 2, 0, 1, 5, 9],\n \"hillas_width\": [0.1, 0.2, 0.1, 0.1, 0.2, 0.1] * u.deg,\n \"hillas_length\": 3 * ([0.1, 0.2, 0.1, 0.1, 0.2, 0.1] * u.deg),\n \"dummy_tel_energy\": [1, 10, 4, 0.5, 0.7, 1] * u.TeV,\n \"dummy_tel_is_valid\": [\n True,\n True,\n True,\n True,\n False,\n False,\n ],\n \"classifier_tel_prediction\": [1, 0, 0.5, 0, 0.6, 1],\n \"classifier_tel_is_valid\": [\n True,\n True,\n False,\n True,\n True,\n True,\n ],\n \"disp_tel_alt\": [58.5, 58, 62.5, 72, 74.5, 81] * u.deg,\n \"disp_tel_az\": [12.5, 15, 13, 21, 20, 14.5] * u.deg,\n \"disp_tel_is_valid\": [\n True,\n False,\n True,\n True,\n True,\n True,\n ],\n }\n )", "def phenotypes(self):\n return self.data.phenotypes.values", "def build_unigram_table(occ_dict, table_size):\n unigram_table = list()\n\n Z = sum(v**(3/4) for v in occ_dict.values())\n for k, v in occ_dict.items():\n p_wi = (v**(3/4))/Z\n unigram_table += [k]*int(p_wi*table_size)\n return unigram_table", "def _build_micro_tree_tables(self):\n # A mapping that associates micro tree encoding with its corresponding table.\n self._micro_tables = {}\n\n # A mapping that stores the encoding of each micro tree.\n self._codes = {}\n\n # For every micro tree compute a simle table to answer LA queries.\n for p in self._micro_roots:\n code, f, f_inv = self._encode(p) # encode the micro tree\n self._codes[p.index()] = code, f, f_inv\n if code not in self._micro_tables: # build a simple table if needed\n repr_tree = self._decode(code)\n self._micro_tables[code] = LA_table(repr_tree)", "def build_morpheme_vocab(self):\n max_morph_per_word = 0\n morpheme_dict = collections.defaultdict(int)\n splitter = \"@@\"\n for token in self.train_data:\n if token == self.eos or token == self.sos:\n continue\n token = '^' + token + '$'\n morphemes = token.split(splitter)\n if len(morphemes) > max_morph_per_word:\n max_morph_per_word = len(morphemes)\n for morpheme in morphemes:\n morpheme_dict[morpheme] += 1\n\n unk_morpheme_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(morpheme_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_morpheme_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_morpheme_list, max_morph_per_word", "def buildJustInferlinkTextTableFromScratch(output_file = None):\r\n\r\n table = []\r\n properties = [':phone', ':email', ':physical_address', ':posting_date', ':location', ':name', ':service']\r\n for property in properties:\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n if property == ':phone':\r\n tmp['inferlink_text'] = 'build_match_clause_and'\r\n else:\r\n tmp['inferlink_text'] = 'build_match_clause'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n\r\n table.append(dict)\r\n\r\n\r\n\r\n if output_file:\r\n file = codecs.open(output_file, 'w', 'utf-8')\r\n for entry in table:\r\n json.dump(entry, file)\r\n file.write('\\n')\r\n file.close()", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def simple_genotype_matrix(n, p):\n genotypes = np.zeros(shape=(n, p))\n for item in range(0, p):\n genotypes[:, item] = np.random.binomial(1, np.random.uniform(0.1, 0.5, 1), n)\n\n return genotypes", "def __init__ ( self , phenotypes ) :\n\t\tfor k , v in phenotypes.items():\n\t\t\tassert type( k ) is str , 'phenotype keys must be strings'\n\t\t\tassert v[1] > v[0] , 'upper bound of ' + k + ' must be greater than the lower bound'\n\t\t\tassert type( v[1] ) is int and type( v[0] ) is int, ' (!) recent change means bounds need to be in ints now: https://github.com/zafarali/metastasis/issues/17'\n\n\t\tself.phenotypes = phenotypes", "def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)", "def __makeNgrams(self, n):\n # start_time = time.time()\n ngrams = dict()\n itergrams = dict()\n\n for k in range(2,n+1):\n itergrams[k] = list(nltk.ngrams(self.words, k))\n\n for k, grams in itergrams.items():\n kgrams = defaultdict(Counter)\n for gram in grams: \n kgram = list(gram)\n key = ' '.join(kgram[:k-1])\n kgrams[key].update({kgram[-1]})\n ngrams[k] = kgrams\n # print ('finish gen ', k, 'grams at ', time.time()-start_time)\n return ngrams", "def __init__ ( self , phenotypes ):\n\t\tself.counts = {}\n\t\tfor k , v in phenotypes.items():\n\t\t\tassert type( k ) is str , 'phenotype keys must be strings'\n\t\t\t\n\t\t\tself.counts[ k ] = 0\n\n\t\t\tassert v[1] > v[0] , 'upper bound of ' + k + ' must be greater than the lower bound'\n\n\t\tself.phenotypes = phenotypes", "def nk_table(self):\n return self.map(\"keys\", \"values\")", "def ctable_poyos():\n\t#quito espacios y no agrego _ por convencion (_ para relaciones y no atributos)\n\tcur.execute(\"CREATE TABLE poyo (\\\n\t\t\t\tpokedex INT,\\\n\t\t\t\tnombre VARCHAR(40) NOT NULL PRIMARY KEY,\\\n\t\t\t\ttype1 VARCHAR(20),\\\n\t\t\t\ttype2 VARCHAR(20),\\\n\t\t\t\thptotal INT,\\\n\t\t\t\tlegendary NUMBER(1))\"\n\t\t\t\t)\n\tconnection.commit()\n\t# Uso NUMBER(1) en vez de BOOLEAN para representar los booleanos\n\t# Uso usecols que guarda solo algunas columnas\n\tpkmn = pandas.read_csv('pokemon.csv',sep=\",\",usecols=(0, 1, 2, 3 , 4, 12))\n\t# Leer fila por fila para pillar True/False y NaN\n\tadd_row_pkmn = (\"\"\"\n\t\t\t\t\tINSERT INTO poyo \n\t\t \t\t\t(pokedex, nombre, type1, type2, hptotal, legendary) \n\t\t \t\t\tVALUES (:1,:2,:3,:4,:5,:6)\"\"\"\n\t\t \t\t\t)\n\tfor d in pkmn.values:\n\t\tt2 = d[3]\n\t\tbln = 0\n\t\tif not isinstance(t2, str):\n\t\t\t# Caso NaN\n\t\t\tt2 = \"\"\n\t\tif d[-1] == True:\n\t\t\tbln = 1\n\t\trow_pkmn = [int(d[0]), d[1], d[2], t2, int(d[4]), bln]\n\t\tcur.execute(add_row_pkmn, row_pkmn)\n\t\tconnection.commit()", "def config_antenna(tbl):\r\n\r\n antenna = tbl.data\r\n for i in range(0,tbl.data.size):\r\n antenna[i]['ANNAME'] = 'FIRI_%i' % i\r\n antenna[i]['ANTENNA_NO'] = i\r\n antenna[i]['ARRAY'] = 1\r\n antenna[i]['FREQID'] = 1\r\n antenna[i]['NO_LEVELS'] = 12\r\n antenna[i]['POLTYA'] = 'X'\r\n antenna[i]['POLTYB'] = 'X'\r\n antenna[i]['POLAA'] = np.array(0)\r\n antenna[i]['POLAB'] = np.array(0)\r\n\r\n tbl.data = antenna\r\n return tbl", "def _process_genotypes(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'genotype'))\n logger.info(\"building labels for genotypes\")\n geno = Genotype(g)\n fly_tax = 'NCBITaxon:7227'\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (genotype_num, uniquename, description, name) = line\n\n # if self.testMode is True:\n # if int(object_key) not in self.test_keys.get('genotype'):\n # continue\n\n # add the internal genotype to pub mapping\n genotype_id = 'MONARCH:FBgeno'+str(genotype_num)\n self.idhash['genotype'][genotype_num] = genotype_id\n\n if description == '':\n description = None\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode and \\\n int(genotype_num) not in \\\n self.test_keys['genotype']:\n continue\n\n model.addIndividualToGraph(\n genotype_id, uniquename,\n Genotype.genoparts['intrinsic_genotype'],\n description)\n # we know all genotypes are in flies\n # FIXME we assume here they are in melanogaster,\n # but that isn't necessarily true!!!\n # TODO should the taxon be == genomic background?\n geno.addTaxon(fly_tax, genotype_id)\n genotype_iid = self._makeInternalIdentifier(\n 'genotype', genotype_num)\n model.addComment(\n genotype_id, genotype_iid)\n if name.strip() != '':\n model.addSynonym(genotype_id, name)\n\n return", "def build_hap_dict(self, obs_tab, leg_tab, hap_tab, number_of_haplotypes):\n\n hap_dict = dict()\n mismatches = 0\n combined = {pos: (ref,alt,hap) for (chr_id,pos,ref,alt),hap in zip(leg_tab, hap_tab)}\n missing = 3*(None,)\n\n b = (1 << number_of_haplotypes) - 1 #### equivalent to int('1'*number_of_haplotypes,2)\n\n for (pos, read_id, base) in obs_tab:\n ref, alt, hap = combined.get(pos, missing)\n if base==alt:\n hap_dict[(pos,base)] = hap\n elif base==ref:\n hap_dict[(pos,base)] = hap ^ b ### ^b flips all bits of the binary number, hap_tab[ind] using bitwise xor operator.\n else:\n mismatches += 1\n\n fraction_of_matches = 1-mismatches/len(obs_tab)\n\n return hap_dict, fraction_of_matches", "def prepare_table(self):\n i = 0\n for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:\n self.table.insertColumn(i)\n self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))\n i += 1\n for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',\n 'tlak_ztraty[Pa]', 'hmotnost[kg]']:\n self.table.insertColumn(i)\n self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))\n i += 1", "def generate_aliased_tables_for_labelling(properties):\n aliased_joins = []\n for prop_i, prop in enumerate(properties):\n if prop == 0: # recall we are faking sitelinks as property 0\n label_table = label_misc\n join_key = 'src'\n elif prop in [Properties.DATE_OF_BIRTH.value, Properties.DATE_OF_DEATH.value]:\n label_table = None # there is no join to be made\n join_key = None # there is no join to be made\n else:\n label_table = label\n join_key = 'qid'\n aliased_label = aliased(label_table, name=f\"label_{prop_i}\") if label_table else None\n join_data = {'label_table': aliased_label, 'join_key': join_key}\n aliased_joins.append(join_data)\n return aliased_joins", "def buildJustReadabilityTextTableFromScratch(output_file = None):\r\n\r\n table = []\r\n properties = [':phone', ':email', ':physical_address', ':posting_date', ':location', ':name', ':service']\r\n for property in properties:\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n if property == ':phone':\r\n tmp['readability_text'] = 'build_match_clause_and'\r\n else:\r\n tmp['readability_text'] = 'build_match_clause'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n\r\n table.append(dict)\r\n\r\n\r\n\r\n if output_file:\r\n file = codecs.open(output_file, 'w', 'utf-8')\r\n for entry in table:\r\n json.dump(entry, file)\r\n file.write('\\n')\r\n file.close()", "def get_tags_for_NOx_HONO(AllTags=False):\n diags = [\n # Version 6 tags\n 'ProdHNO2fromHvNIT', 'ProdHNO2fromHvNITs', 'ProdHNO2fromHvNITD1',\n 'ProdHNO2fromHvNITD2', 'ProdHNO2fromHvNITD3', 'ProdHNO2fromHvNITD4',\n 'ProdNO2fromHvNIT', 'ProdNO2fromHvNITs', 'ProdNO2fromHvNITD1',\n 'ProdNO2fromHvNITD2', 'ProdNO2fromHvNITD3', 'ProdNO2fromHvNITD4',\n 'ProdNO2fromHONO', 'ProdHNO2fromOHandNO', 'ProdHNO2fromHET',\n 'ProdNOnHO2ChannelA', 'ProdNOnHO2ChannelB',\n # Version 7 tags\n 'ProdHNO3fromNO2nOH','ProdNO3fromHNO3nOH',\n 'PhotNO2', 'PhotHNO3', 'PhotHNO2',\n 'ProdHNO3fromHetNO3', 'ProdNITfromHetNO3','ProdNITsfromHetNO3',\n ]\n prefix = 'TN{:0>3}'\n tags = [prefix.format(i+1) for i in range(len(diags))]\n # pair up numbering (so that runs with different diagnostics have same #s)?\n d = dict(zip(diags, tags))\n # Include the automatic tagging of NOx\n def mk_KPP_tag_from_rxn_str(rxn_str=None, search_str=None,\n prefix='ProdfromRXN', ):\n \"\"\"\n Create a variable for reaction\n \"\"\"\n reactants = rxn_str.split('=')[0]\n reactants = reactants.replace(' + ', '_n_')\n reactants = reactants.replace(' {+M} ', '_M_').strip()\n products = rxn_str.split('=')[-1]\n products = products.replace(' + ', '_n_')\n products = products.replace(' {+M} ', '_M_').strip()\n products = products.replace(' {+M}', '_M').strip()\n products = products[:10]\n # Return a new reaction string\n return'{}_{}_{}_to_{}'.format(prefix, search_str, reactants, products)\n\n if AllTags:\n DataRoot = get_local_folder('DataRoot')\n folder = '{}{}'.format(DataRoot, '/ARNA/Misc/')\n# FName = 'Tagged_reactions_in_Standard_v12.9.1_ARNA_v8_POx_tagged.csv'\n FName = 'Tagged_reactions_in_Standard_v12.9_ARNA_v9_PL_NOx_tagged.csv'\n df = pd.read_csv(folder+FName)\n# df['RxnName'] = df['rxn_str'].map(mk_KPP_tag_from_rxn_str)\n df['RxnName'] = df.apply(lambda x:\n mk_KPP_tag_from_rxn_str(rxn_str=x['rxn_str'],\n search_str = x['search_str'], ),\n axis=1)\n\n # combine into main dictionary\n d2 = dict(zip( df['RxnName'], df['tag'].values ) )\n d = AC.merge_two_dicts(d, d2)\n return d" ]
[ "0.59477466", "0.571061", "0.56416917", "0.545999", "0.5419914", "0.5342602", "0.52819085", "0.52390313", "0.51898134", "0.5189629", "0.5152559", "0.5144572", "0.51429605", "0.5127938", "0.51271623", "0.50853604", "0.5077953", "0.5057735", "0.5053897", "0.50402", "0.5030632", "0.5009791", "0.499493", "0.49942338", "0.49880007", "0.49863744", "0.49519405", "0.49518624", "0.49192935", "0.49117655" ]
0.7407428
0
Achieves "def get_caseX_indices_matching_symbol" but on a list of symbols.
def get_caseX_indices_matching_symbol_list(result,match_symbols = ["NA"]): # # Sub-function # def get_caseX_indices_matching_symbol(result,match_symbol = "NA"): """ get a list of indices of case4 (see: @caseX) elements from @result Args: result: @@result: an @uncollasped punnet square, which represents the @punnet imported as an R-like dataframe from: ../R/punnet.csv match_symbol: default: "NA", the string that represents elements in the @@punnet that have been killed, carrying a critically-short @telomere, T0 Returns: @todo """ caseX_matching = np.in1d(result.ravel(),[match_symbol]).reshape(result.shape) # Case4: (_T0)&(__): critically short, lethal T0-carring diploid individuals die caseX_indices_tmp = np.where(caseX_matching) # locations (indicies) caseX_indices = zip(caseX_indices_tmp[0],caseX_indices_tmp[1]) return caseX_indices # # Main # caseX_indices = [] # indexes are (x,y) coordinates on the punnet (result) that the symbol is found in, e.g. (2,6) symbol_to_index = {} for symbol in match_symbols: #..Generate matching indices on @Punnet-table (result) match_index = get_caseX_indices_matching_symbol(result,symbol) #..Multiple matching indices to symbol AND symbol is a gamete frequency if (len(match_index)>1) and (not symbol == "NA"): symbol_to_index[symbol] = match_index #..Multiple matching indices to symbol AND symbol=="NA" (i.e. death) elif (len(match_index)>1) and (symbol == "NA"): caseX_indices = match_index symbol_to_index[symbol] = match_index break #..Only one matching index to symbol else: caseX_indices.append(match_index) symbol_to_index[symbol] = match_index return caseX_indices, symbol_to_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _index_symbols(symbols):\n symbol_start_indices = []\n next_start_index = 0\n for symbol in symbols:\n entry_count = count_entries(symbol)\n if entry_count > EXAMPLE_SIZE:\n symbol_start_indices.append(next_start_index)\n next_start_index += entry_count - EXAMPLE_SIZE\n total_examples = next_start_index\n return symbol_start_indices, total_examples", "def tokenLookup(instrument_df,symbol_list):\n token_list = []\n for symbol in symbol_list:\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\n return token_list", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def tokenLookup(instrument_df, symbol_list):\n token_list = []\n for symbol in symbol_list:\n token_list.append(int(instrument_df[instrument_df.tradingsymbol == symbol].instrument_token.values[0]))\n return token_list", "def find1symbols(symbol, reel):\n for i in range(len(reel)):\n if reel[i] == symbol:\n return i", "def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes", "def find_symbols(self, **kw):\n return list(self.ifind_symbols(**kw))", "def find_symbols(lst):\n ret = []\n for ii in lst:\n ret += [find_symbol(ii)]\n return ret", "def findings_2_idx(findings, corner_2_idx, funcx, funcy):\n idx = []\n for finding in findings:\n x, y = finding\n mesh = np.array(np.meshgrid(funcx(x), funcy(y))).swapaxes(1,2).reshape(2,-1).T\n idx.extend([corner_2_idx(c) for c in mesh])\n\n return np.unique(idx)", "def __symbols_are_close_in_equation(symbol_1_indices, symbol_2_indices):\n\n for index_1 in symbol_1_indices:\n for index_2 in symbol_2_indices:\n if index_1 == index_2 + 1:\n return index_2, index_1\n elif index_2 == index_1 + 1:\n return index_1, index_2\n return -1, -1", "def map_case_to_punnet_indices(caseX, caseX_sym_to_coord):\n\n caseCoord_to_punnetCoord = {}\n\n for y,j in enumerate(caseX):\n for x,i in enumerate(j):\n\n case_coord = (y,x)\n\n punnet_coord = caseX_sym_to_coord[i]\n\n caseCoord_to_punnetCoord[case_coord] = punnet_coord\n\n return caseCoord_to_punnetCoord", "def find3symbols(symbol1, symbol2, symbol3, reel):\n for i in range(len(reel)-3):\n if reel[i] == symbol1 and reel[i+1] == symbol2 and reel[i+2]== symbol3:\n return i", "def find_letter_indices(words, letter):\n\n return []", "def get_source_indices(sent, dic):\n clean_sent = cleanup_sentence(sent)\n words = clean_sent.split(' ')\n n_words = len(words) + 1 # counting for the </s>\n indices = np.zeros(n_words)\n cnt = 0\n nsrc_unk = 0\n unk_idx = dic.symbol_to_index[\"<unk>\"]\n eos_idx = dic.symbol_to_index[\"</s>\"]\n for i, word in enumerate(words):\n wid = dic.symbol_to_index.get(word, None)\n if wid is None:\n indices[cnt] = unk_idx\n nsrc_unk += 1\n else:\n indices[cnt] = wid\n if wid == unk_idx:\n nsrc_unk += 1\n cnt += 1\n indices[cnt] = eos_idx\n cnt += 1\n return indices, indices.shape[0], nsrc_unk", "def find2symbols(symbol1, symbol2, reel):\n for i in range(len(reel)-2):\n if reel[i] == symbol1 and reel[i+1] == symbol2:\n return i", "def tokens_to_idxs(self, token_seqs, lexicon):\n idx_seqs = [[lexicon[token] if token in lexicon else lexicon['<UNK>'] for \n token in token_seq] for token_seq in token_seqs]\n return idx_seqs", "def get_predicate_indices(tags: List[str]) -> List[int]:\n return [ind for ind, tag in enumerate(tags) if \"V\" in tag]", "def _soft_idx(x, y):\n return _silent_idx(_alphanum_list(x), _alphanum(y))", "def ifind_at(self, x, y):\n for sym in self.itersymbols():\n bx0,by0,bx1,by1 = sym.sym.bbox()\n if bx0 <= x <= bx1 and by0 <= y <= by1:\n yield sym.sym", "def _init_symbol_tracker(self):\n # Initialize with an empty set\n atoms_indx = {symb: set([]) for symb in self.symbols}\n\n # Populate the sets\n for atom in self.atoms:\n symb = atom.symbol\n atoms_indx[symb].add(atom.index)\n return atoms_indx", "def compute_char_indices(\n context_qas: List[ContextQuestionAnswer]\n ) -> Dict[str, int]:\n chars: Set[str] = set()\n for ctx in context_qas:\n for tok in ctx.tokens:\n chars.update(set(char for char in tok.word))\n for qa in ctx.qas:\n for tok in qa.tokens:\n chars.update(set(char for char in tok.word))\n char_mapping: Dict[str, int] = {\n char: idx for idx, char in enumerate(chars, 2)\n } # idx 1 reserved for UNK\n return char_mapping", "def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def match(\n x: Any,\n table: Iterable,\n nomatch: Any = -1,\n # incomparables ...,\n base0_: bool = None,\n) -> Iterable[int]:\n base = int(not get_option(\"which_base_0\", base0_))\n return Array(\n [\n list(table).index(elem) + base if elem in table else nomatch\n for elem in x\n ],\n dtype=int,\n )", "def _get_indices(self, parts: List[str], keys: List[str]):\n for key in keys:\n yield parts.index(key)", "def names_to_indices(names, ordered_names):\r\n indices = []\r\n names_list = list(names)\r\n for ordered_name in ordered_names:\r\n if ordered_name in names_list:\r\n indices.append(names_list.index(ordered_name))\r\n return array(indices)", "def getFeaturesIndices(self, tag, history, in_data=True):\n indices = super().getFeaturesIndices(tag, history, in_data)\n word = history.getWord()\n position = history.getIndex()\n for suffix in self.data.getSuffixesForWord(word):\n self.__checkFeatureIndex__(self.__f101__((suffix, tag)), indices)\n for prefix in self.data.getPrefixesForWord(word):\n self.__checkFeatureIndex__(self.__f102__((prefix, tag)), indices)\n self.__checkFeatureIndex__(self.__f105__(tag), indices)\n self.__checkFeatureIndex__(self.__fNum__(word), indices)\n self.__checkFeatureIndex__(self.__fCap__(word, position), indices)\n return indices", "def process_index(index, intensity, interaction_symbol):\n return tuple(index.split(interaction_symbol))", "def indmatch(ra1, dec1, ra2, dec2, tol):\n m = match(ra1, dec1, ra2, dec2, tol)\n c = m.ind > -1\n i1 = c.nonzero()[0]\n i2 = m.ind[c]\n return i1, i2" ]
[ "0.65377474", "0.6477875", "0.6457072", "0.6457072", "0.64341956", "0.61545986", "0.6037521", "0.6014018", "0.58861655", "0.5723973", "0.5679167", "0.5665417", "0.5510377", "0.54939115", "0.54873234", "0.5473831", "0.546451", "0.54531556", "0.5384212", "0.53800964", "0.53782827", "0.5325455", "0.5298102", "0.5259755", "0.5250705", "0.52476764", "0.52384716", "0.5203329", "0.5198002", "0.5188049" ]
0.74190253
0
Maps the locations of symbols in caseX to the corresponding symbols in
def map_case_to_punnet_indices(caseX, caseX_sym_to_coord): caseCoord_to_punnetCoord = {} for y,j in enumerate(caseX): for x,i in enumerate(j): case_coord = (y,x) punnet_coord = caseX_sym_to_coord[i] caseCoord_to_punnetCoord[case_coord] = punnet_coord return caseCoord_to_punnetCoord
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_symbol_mapping():\n normal_items = [\"+\", \"-\"]\n unicode_items = [chr(0x2070 + i) for i in range(10, 12)]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_items, unicode_items))", "def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}", "def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))", "def get_caseX_indices_matching_symbol_list(result,match_symbols = [\"NA\"]):\n #\n # Sub-function\n #\n def get_caseX_indices_matching_symbol(result,match_symbol = \"NA\"):\n \"\"\" get a list of indices of case4 (see: @caseX) elements from @result\n\n Args:\n result: @@result: an @uncollasped punnet square, which represents the @punnet imported as an R-like dataframe from: ../R/punnet.csv\n match_symbol: default: \"NA\", the string that represents elements in the @@punnet that have been killed, carrying a critically-short @telomere, T0\n Returns:\n @todo\n\n \"\"\"\n\n caseX_matching = np.in1d(result.ravel(),[match_symbol]).reshape(result.shape) # Case4: (_T0)&(__): critically short, lethal T0-carring diploid individuals die\n caseX_indices_tmp = np.where(caseX_matching) # locations (indicies)\n caseX_indices = zip(caseX_indices_tmp[0],caseX_indices_tmp[1])\n\n return caseX_indices\n #\n # Main\n #\n caseX_indices = [] # indexes are (x,y) coordinates on the punnet (result) that the symbol is found in, e.g. (2,6)\n\n symbol_to_index = {}\n\n for symbol in match_symbols:\n\n #..Generate matching indices on @Punnet-table (result) \n match_index = get_caseX_indices_matching_symbol(result,symbol)\n\n #..Multiple matching indices to symbol AND symbol is a gamete frequency \n if (len(match_index)>1) and (not symbol == \"NA\"):\n\n symbol_to_index[symbol] = match_index\n\n #..Multiple matching indices to symbol AND symbol==\"NA\" (i.e. death)\n elif (len(match_index)>1) and (symbol == \"NA\"):\n caseX_indices = match_index\n symbol_to_index[symbol] = match_index\n break\n\n #..Only one matching index to symbol\n else:\n caseX_indices.append(match_index)\n symbol_to_index[symbol] = match_index\n\n return caseX_indices, symbol_to_index", "def symbol_state_map(self):\n map = {}\n for state in self:\n map[state.symbol] = state\n map.update(self.symbol_synonyms)\n if not self.case_sensitive:\n for state in self:\n if state.symbol.islower():\n map[state.symbol.upper()] = state\n else:\n map[state.symbol.lower()] = state\n for symbol, state in self.symbol_synonyms.items():\n if symbol.islower():\n map[symbol.upper()] = state\n else:\n map[symbol.lower()] = state\n return map", "def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def create_symbol_to_possible_cell_mapping(self):\r\n symbols_to_cells = defaultdict(set)\r\n for cell in self.iterate_empty_cells():\r\n for symbol in cell.get_possible_symbols():\r\n symbols_to_cells[symbol].add(cell)\r\n return symbols_to_cells", "def map():", "def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def test_mouse_sym_to_ens(self):\n\n mapper = EnsemblMapper(\n from_type='symbol',\n to_type='ensembl',\n host=HOST,\n from_organism='mmusculus')\n mapped = mapper.map_ids(['Trp53', 'Brca1'])\n\n assert mapped == ['ENSMUSG00000059552', 'ENSMUSG00000017146']", "def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1", "def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def colorMap(self, char):\n return {\n #'.': 'sienna',\n #'G': 'sienna',\n '.': 'moccasin',\n 'G': 'moccasin',\n 'O': 'black',\n '@': 'black',\n 'S': 'OliveDrab1',\n 'T': 'green4',\n 'W': 'SkyBlue3',\n 'k': 'green3',\n 'D': 'red'\n }[char]", "def get_letter_to_code_mappings():\n return {\n \"a\": \"Alfa\", \"b\": \"Bravo\", \"c\": \"Charlie\", \"d\": \"Delta\", \"e\": \"Echo\",\n \"f\": \"Foxtrot\", \"g\": \"Golf\", \"h\": \"Hotel\", \"i\": \"India\", \"j\":\n \"Juliett\", \"k\": \"Kilo\", \"l\": \"Lima\", \"m\": \"Mike\", \"n\": \"November\", \"o\":\n \"Oscar\", \"p\": \"Papa\", \"q\": \"Quebec\", \"r\": \"Romeo\", \"s\": \"Sierra\", \"t\":\n \"Tango\", \"u\": \"Uniform\", \"v\": \"Victor\", \"w\": \"Whiskey\", \"x\": \"Xray\",\n \"y\": \"Yankee\", \"z\": \"Zulu\", \"0\": \"Zero\", \"1\": \"One\", \"2\": \"Two\", \"3\":\n \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\", \"7\": \"Seven\", \"8\":\n \"Eight\", \"9\": \"Niner\", \"=\": \"Equals\", \"?\": \"Query\", \"/\": \"Slash\", \",\":\n \"Comma\", \".\": \"Stop\", \":\": \"Colon\", \"'\": \"Apostrophe\", \"-\": \"Dash\",\n \"(\": \"Open\", \")\": \"Close\", \"@\": \"At\",\n }", "def ifind_at(self, x, y):\n for sym in self.itersymbols():\n bx0,by0,bx1,by1 = sym.sym.bbox()\n if bx0 <= x <= bx1 and by0 <= y <= by1:\n yield sym.sym", "def compute_char_indices(\n context_qas: List[ContextQuestionAnswer]\n ) -> Dict[str, int]:\n chars: Set[str] = set()\n for ctx in context_qas:\n for tok in ctx.tokens:\n chars.update(set(char for char in tok.word))\n for qa in ctx.qas:\n for tok in qa.tokens:\n chars.update(set(char for char in tok.word))\n char_mapping: Dict[str, int] = {\n char: idx for idx, char in enumerate(chars, 2)\n } # idx 1 reserved for UNK\n return char_mapping", "def gene_symbol_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene Symbol\"]\n resD[keyI] = valueI\n\n return resD", "def char_mapping(sentences, lower):\n chars = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(chars)\n dico[\"<PAD>\"] = 10000001\n dico['<UNK>'] = 10000000\n char_to_id, id_to_char = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in chars)\n ))\n return dico, char_to_id, id_to_char", "def resolution_map(names, env):\n return dict(zip(names, [resolve(n, env) for n in names]))", "def get_map(self):\n\n self.mp = defaultdict(lambda : ord('x'))\n y, x = 0, 0\n while True:\n cond, output = self.ic()\n\n if cond: break\n # New row of the print out\n if output == 10:\n y += 1\n x = 0\n # Assign the value to the map\n else:\n self.mp[y,x] = output\n x += 1\n \n return self.mp", "def research_pos(self, map_list, character): \n list_pos = []\n for y in range(15): \n for x, c in enumerate(map_list[y]):\n if character in c and c == character:\n list_pos.append((x*50, y*50)) \n return list_pos", "def _do_mapping(self):\n pass", "def map_word(word, charmap):\n return [charmap[c] for c in word]", "def set_character(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'G'", "def test_GetSymbolMapping_parameter_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo?q=hello at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def convert_case(sym):\n lower = sym\n upper = sym\n\n enc = sym >> 8\n\n if enc == 0: # latin1\n if ((sym >= keysymdef.keysyms[\"A\"]) and (sym <= keysymdef.keysyms[\"Z\"])):\n lower += (keysymdef.keysyms[\"a\"] - keysymdef.keysyms[\"A\"])\n elif ((sym >= keysymdef.keysyms[\"a\"]) and (sym <= keysymdef.keysyms[\"z\"])):\n upper -= (keysymdef.keysyms[\"a\"] - keysymdef.keysyms[\"A\"])\n elif ((sym >= keysymdef.keysyms[\"Agrave\"])\n and (sym <= keysymdef.keysyms[\"Odiaeresis\"])):\n lower += (keysymdef.keysyms[\"agrave\"] - keysymdef.keysyms[\"Agrave\"])\n elif ((sym >= keysymdef.keysyms[\"agrave\"])\n and (sym <= keysymdef.keysyms[\"odiaeresis\"])):\n upper -= (keysymdef.keysyms[\"agrave\"] - keysymdef.keysyms[\"Agrave\"])\n elif ((sym >= keysymdef.keysyms[\"Ooblique\"]) and (sym <= keysymdef.keysyms[\"Thorn\"])):\n lower += (keysymdef.keysyms[\"oslash\"] - keysymdef.keysyms[\"Ooblique\"])\n elif ((sym >= keysymdef.keysyms[\"oslash\"]) and (sym <= keysymdef.keysyms[\"thorn\"])):\n upper -= (keysymdef.keysyms[\"oslash\"] - keysymdef.keysyms[\"Ooblique\"])\n elif enc == 1: # latin2\n # Assume the KeySym is a legal value (ignore discontinuities)\n if (sym == keysymdef.keysyms[\"Aogonek\"]):\n lower = keysymdef.keysyms[\"aogonek\"]\n elif (sym >= keysymdef.keysyms[\"Lstroke\"] and sym <= keysymdef.keysyms[\"Sacute\"]):\n lower += (keysymdef.keysyms[\"lstroke\"] - keysymdef.keysyms[\"Lstroke\"])\n elif (sym >= keysymdef.keysyms[\"Scaron\"] and sym <= keysymdef.keysyms[\"Zacute\"]):\n lower += (keysymdef.keysyms[\"scaron\"] - keysymdef.keysyms[\"Scaron\"])\n elif (sym >= keysymdef.keysyms[\"Zcaron\"] and sym <= keysymdef.keysyms[\"Zabovedot\"]):\n lower += (keysymdef.keysyms[\"zcaron\"] - keysymdef.keysyms[\"Zcaron\"])\n elif (sym == keysymdef.keysyms[\"aogonek\"]):\n upper = keysymdef.keysyms[\"Aogonek\"]\n elif (sym >= keysymdef.keysyms[\"lstroke\"] and sym <= keysymdef.keysyms[\"sacute\"]):\n upper -= (keysymdef.keysyms[\"lstroke\"] - keysymdef.keysyms[\"Lstroke\"])\n elif (sym >= keysymdef.keysyms[\"scaron\"] and sym <= keysymdef.keysyms[\"zacute\"]):\n upper -= (keysymdef.keysyms[\"scaron\"] - keysymdef.keysyms[\"Scaron\"])\n elif (sym >= keysymdef.keysyms[\"zcaron\"] and sym <= keysymdef.keysyms[\"zabovedot\"]):\n upper -= (keysymdef.keysyms[\"zcaron\"] - keysymdef.keysyms[\"Zcaron\"])\n elif (sym >= keysymdef.keysyms[\"Racute\"] and sym <= keysymdef.keysyms[\"Tcedilla\"]):\n lower += (keysymdef.keysyms[\"racute\"] - keysymdef.keysyms[\"Racute\"])\n elif (sym >= keysymdef.keysyms[\"racute\"] and sym <= keysymdef.keysyms[\"tcedilla\"]):\n upper -= (keysymdef.keysyms[\"racute\"] - keysymdef.keysyms[\"Racute\"])\n elif enc == 2: # latin3\n # Assume the KeySym is a legal value (ignore discontinuities)\n if (sym >= keysymdef.keysyms[\"Hstroke\"] and sym <= keysymdef.keysyms[\"Hcircumflex\"]):\n lower += (keysymdef.keysyms[\"hstroke\"] - keysymdef.keysyms[\"Hstroke\"])\n elif (sym >= keysymdef.keysyms[\"Gbreve\"] and sym <= keysymdef.keysyms[\"Jcircumflex\"]):\n lower += (keysymdef.keysyms[\"gbreve\"] - keysymdef.keysyms[\"Gbreve\"])\n elif (sym >= keysymdef.keysyms[\"hstroke\"] and sym <= keysymdef.keysyms[\"hcircumflex\"]):\n upper -= (keysymdef.keysyms[\"hstroke\"] - keysymdef.keysyms[\"Hstroke\"])\n elif (sym >= keysymdef.keysyms[\"gbreve\"] and sym <= keysymdef.keysyms[\"jcircumflex\"]):\n upper -= (keysymdef.keysyms[\"gbreve\"] - keysymdef.keysyms[\"Gbreve\"])\n elif (sym >= keysymdef.keysyms[\"Cabovedot\"]\n and sym <= keysymdef.keysyms[\"Scircumflex\"]):\n lower += (keysymdef.keysyms[\"cabovedot\"] - keysymdef.keysyms[\"Cabovedot\"])\n elif (sym >= keysymdef.keysyms[\"cabovedot\"]\n and sym <= keysymdef.keysyms[\"scircumflex\"]):\n upper -= (keysymdef.keysyms[\"cabovedot\"] - keysymdef.keysyms[\"Cabovedot\"])\n elif enc == 3: # latin4\n # Assume the KeySym is a legal value (ignore discontinuities)\n if (sym >= keysymdef.keysyms[\"Rcedilla\"] and sym <= keysymdef.keysyms[\"Tslash\"]):\n lower += (keysymdef.keysyms[\"rcedilla\"] - keysymdef.keysyms[\"Rcedilla\"])\n elif (sym >= keysymdef.keysyms[\"rcedilla\"] and sym <= keysymdef.keysyms[\"tslash\"]):\n upper -= (keysymdef.keysyms[\"rcedilla\"] - keysymdef.keysyms[\"Rcedilla\"])\n elif (sym == keysymdef.keysyms[\"ENG\"]):\n lower = keysymdef.keysyms[\"eng\"]\n elif (sym == keysymdef.keysyms[\"eng\"]):\n upper = keysymdef.keysyms[\"ENG\"]\n elif (sym >= keysymdef.keysyms[\"Amacron\"] and sym <= keysymdef.keysyms[\"Umacron\"]):\n lower += (keysymdef.keysyms[\"amacron\"] - keysymdef.keysyms[\"Amacron\"])\n elif (sym >= keysymdef.keysyms[\"amacron\"] and sym <= keysymdef.keysyms[\"umacron\"]):\n upper -= (keysymdef.keysyms[\"amacron\"] - keysymdef.keysyms[\"Amacron\"])\n elif enc == 6: # cyrillic\n # Assume the KeySym is a legal value (ignore discontinuities)\n if (sym >= keysymdef.keysyms[\"Serbian_DJE\"]\n and sym <= keysymdef.keysyms[\"Serbian_DZE\"]):\n lower -= (keysymdef.keysyms[\"Serbian_DJE\"] - keysymdef.keysyms[\"Serbian_dje\"])\n elif (sym >= keysymdef.keysyms[\"Serbian_dje\"]\n and sym <= keysymdef.keysyms[\"Serbian_dze\"]):\n upper += (keysymdef.keysyms[\"Serbian_DJE\"] - keysymdef.keysyms[\"Serbian_dje\"])\n elif (sym >= keysymdef.keysyms[\"Cyrillic_YU\"]\n and sym <= keysymdef.keysyms[\"Cyrillic_HARDSIGN\"]):\n lower -= (keysymdef.keysyms[\"Cyrillic_YU\"] - keysymdef.keysyms[\"Cyrillic_yu\"])\n elif (sym >= keysymdef.keysyms[\"Cyrillic_yu\"]\n and sym <= keysymdef.keysyms[\"Cyrillic_hardsign\"]):\n upper += (keysymdef.keysyms[\"Cyrillic_YU\"] - keysymdef.keysyms[\"Cyrillic_yu\"])\n elif enc == 7: # greek\n if (sym >= keysymdef.keysyms[\"Greek_ALPHAaccent\"]\n and sym <= keysymdef.keysyms[\"Greek_OMEGAaccent\"]):\n lower += (keysymdef.keysyms[\"Greek_alphaaccent\"] -\n keysymdef.keysyms[\"Greek_ALPHAaccent\"])\n elif (sym >= keysymdef.keysyms[\"Greek_alphaaccent\"]\n and sym <= keysymdef.keysyms[\"Greek_omegaaccent\"] and\n sym != keysymdef.keysyms[\"Greek_iotaaccentdieresis\"] and\n sym != keysymdef.keysyms[\"Greek_upsilonaccentdieresis\"]):\n upper -= (keysymdef.keysyms[\"Greek_alphaaccent\"] -\n keysymdef.keysyms[\"Greek_ALPHAaccent\"])\n elif (sym >= keysymdef.keysyms[\"Greek_ALPHA\"]\n and sym <= keysymdef.keysyms[\"Greek_OMEGA\"]):\n lower += (keysymdef.keysyms[\"Greek_alpha\"] - keysymdef.keysyms[\"Greek_ALPHA\"])\n elif (sym >= keysymdef.keysyms[\"Greek_alpha\"]\n and sym <= keysymdef.keysyms[\"Greek_omega\"] and\n sym != keysymdef.keysyms[\"Greek_finalsmallsigma\"]):\n upper -= (keysymdef.keysyms[\"Greek_alpha\"] - keysymdef.keysyms[\"Greek_ALPHA\"])\n elif enc == 0x14: # armenian\n if (sym >= keysymdef.keysyms[\"Armenian_AYB\"]\n and sym <= keysymdef.keysyms[\"Armenian_fe\"]):\n lower = sym | 1\n upper = sym & ~1\n return lower, upper", "def remap_to_state_alphabet_by_symbol(self,\n state_alphabet,\n purge_other_state_alphabets=True):\n symbol_state_map = state_alphabet.symbol_state_map()\n for vi, vec in enumerate(self.taxon_seq_map.values()):\n for ci, cell in enumerate(vec):\n cell.value = symbol_state_map[cell.value.symbol]\n for ct in self.character_types:\n ct.state_alphabet = state_alphabet\n if purge_other_state_alphabets:\n self.state_alphabets = [state_alphabet]\n self.default_state_alphabet = state_alphabet", "def translateLoc(loc):\n\tif(loc[0].isalpha()):\t\n\t\treturn [int(loc[1:])-1,colDict[loc[0]],'V']\n\telse:\n\t\treturn [int(loc[:-1])-1, colDict[loc[-1]],'H']", "def test_modify_coords(self):\n xyz1 = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((1.53830201, 0.86423425, 0.07482439), (0.94923576, -0.20847619, -0.03881977),\n (-0.56154542, -0.31516675, -0.05011465), (-1.18981166, 0.93489731, 0.17603211),\n (1.49712659, -1.15833718, -0.15458647), (-0.87737433, -0.70077243, -1.02287491),\n (-0.87053611, -1.01071746, 0.73427128), (-0.48610273, 1.61361259, 0.11915705))}\n xyz2 = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((-0.48629842, 0.00448354, 0.00136213), (0.97554967, -0.0089943, -0.00273253),\n (2.13574353, -0.01969098, -0.00598223), (-0.88318669, -0.63966273, -0.78887729),\n (-0.87565097, -0.35336611, 0.95910491), (-0.86615712, 1.01723058, -0.16287498))}\n xyz3 = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.77437517, 0.17200669, 0.18524832), (-1.64613785, -0.19208096, 0.80434075),\n (-0.40774525, 0.26424657, -0.07952902), (-0.26203276, 2.09580334, -0.05090198),\n (-0.67096595, -0.16397552, -1.42109845), (0.89264107, -0.40136991, 0.41083574),\n (2.12441624, -0.1300863, -0.44918504), (-1.50623429, -1.27619307, 0.9524955),\n (-1.45114032, 0.18501518, 1.82167553), (-1.59654975, 2.25615634, -0.09052499),\n (-1.65730431, -0.11079255, -1.400057), (0.74870779, -1.48997779, 0.41386971),\n (1.10331691, -0.11082471, 1.44762119), (2.41262211, 0.92463409, -0.42840126),\n (1.95758158, -0.4244074, -1.48990015), (2.97418137, -0.70882619, -0.0719403))}\n xyz4 = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.2713687423422115, -0.7423678681688866, -0.6322577211421921),\n (-0.08008635702808505, -0.40741599130374034, 0.2550353232234618),\n (-0.5452666768773297, -0.20159898814584978, 1.588840559327411),\n (0.6158080809151276, 0.8623086771891557, -0.21553636846891006),\n (1.9196775903993375, 1.0155396004927764, 0.5174563928754532),\n (3.0067486097953653, 1.0626738453913969, -0.05177300486677717),\n (-2.012827991034863, 0.06405231524730193, -0.6138583677564631),\n (-0.9611224758801538, -0.9119047827586647, -1.6677831987437075),\n (-1.7781253059828275, -1.6433798866337939, -0.27003123559560865),\n (0.6204384954940876, -1.2502614603989448, 0.2715082028581114),\n (-1.0190238747695064, -1.007069904421531, 1.8643494196872146),\n (0.014234510343435022, 1.753076784716312, -0.005169050775340246),\n (0.827317336700949, 0.8221266348378934, -1.2893801191974432),\n (1.8498494882204641, 1.107064846374729, 1.6152311353151314))}\n xyz5 = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'H', 'C', 'C',\n 'N', 'H', 'H', 'C', 'H', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'C', 'H', 'H', 'H',\n 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'O', 'O', 'C', 'O', 'H', 'H', 'H'),\n 'isotopes': (14, 12, 12, 12, 1, 1, 12, 12, 12, 12, 1, 1, 12, 12, 12, 1, 12, 12, 14, 1, 1, 12, 1, 12, 12,\n 12, 1, 1, 1, 1, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 12, 16, 1, 1, 1),\n 'coords': ((-0.766219, -0.248648, -0.347086), (0.667812, -0.150498, -0.496932),\n (-1.490842, 1.000959, -0.245328), (1.311194, -1.339578, -1.19388),\n (0.976451, 0.831716, -0.911173), (1.231101, -0.062221, 0.660162),\n (-1.346406, -1.400789, 0.294395), (-1.022138, 2.069095, 0.533928),\n (-2.673271, 1.125443, -1.008282), (2.575265, -0.94966, -1.974365),\n (1.534634, -2.14679, -0.467576), (0.584227, -1.791819, -1.905459),\n (-0.574689, -2.103356, 1.24726), (-2.643838, -1.861964, -0.035016),\n (-1.73741, 3.268914, 0.549347), (-0.105632, 1.96688, 1.126589),\n (-3.134563, -0.04419, -1.826788), (-3.378705, 2.332664, -0.970971),\n (3.611589, -0.28425, -1.113057), (2.30114, -0.222978, -2.774031),\n (2.969795, -1.853671, -2.489377), (-1.04268, -3.284134, 1.815898),\n (0.388329, -1.696921, 1.570938), (-3.645512, -1.174123, -0.925823),\n (-3.088386, -3.061615, 0.555145), (-2.911462, 3.400813, -0.198004),\n (-1.376219, 4.102013, 1.150524), (-3.935589, 0.254447, -2.531702),\n (-2.298405, -0.411572, -2.461402), (-4.293927, 2.444159, -1.549116),\n (4.776265, 0.123769, -1.959689), (4.064268, -1.169457, 0.001273),\n (-2.30222, -3.77607, 1.457834), (-0.433782, -3.814872, 2.545573),\n (-4.135291, -1.935447, -1.571709), (-4.453058, -0.768805, -0.272612),\n (-4.078335, -3.442593, 0.302875), (-3.465321, 4.337257, -0.179068),\n (5.500278, 0.67338, -1.336133), (5.30611, -0.707961, -2.446036),\n (4.433161, 0.821539, -2.74083), (4.954327, -0.743379, 0.488676),\n (4.300156, -2.200598, -0.295594), (3.265545, -1.194959, 0.769181),\n (-2.671885, -4.702569, 1.890597), (1.78286, 0.089948, 1.873468),\n (1.758606, 1.382484, 2.130308), (2.973471, 2.040706, 1.623336),\n (2.813335, 2.256698, 0.248083), (2.919925, 3.030613, 2.105087),\n (3.858517, 1.438684, 1.858856), (3.005024, 1.410381, -0.277159))}\n xyz6 = {'symbols': ('N', 'C', 'C', 'H', 'C', 'H', 'H', 'N', 'H', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H',\n 'H', 'H', 'O', 'O', 'H', 'C', 'H', 'H', 'O', 'H'),\n 'isotopes': (14, 12, 12, 1, 12, 1, 1, 14, 1, 12, 12, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 16, 16, 1, 12, 1, 1, 16, 1),\n 'coords': ((2.608231, -0.458895, 1.629197), (2.408715, 0.132166, 0.318653),\n (1.174426, -0.323822, -0.471554), (3.304408, -0.071078, -0.291093),\n (-0.13532, 0.016735, 0.225918), (1.210534, 0.150539, -1.46601),\n (1.221625, -1.416078, -0.631885), (-1.316045, -0.574442, -0.379686),\n (-0.086456, -0.362851, 1.260573), (-1.468231, -0.411368, -1.77232),\n (-2.505886, -0.419831, 0.432347), (-2.403425, -0.886127, -2.107496),\n (-0.621099, -0.850903, -2.320815), (-3.364172, -0.88926, -0.068909),\n (-2.767365, 0.637288, 0.628231), (-2.360065, -0.927144, 1.400068),\n (2.574849, -1.475283, 1.579253), (1.886591, -0.170591, 2.284831),\n (2.375177, 1.228181, 0.441157), (-0.231725, 1.121336, 0.301367),\n (-1.455199, 0.947478, -2.255384), (-2.58006, 1.611276, -1.811891),\n (-3.315019, 1.53868, -2.760245), (-3.713498, 1.338038, -4.025244),\n (-4.754452, 0.99077, -4.021055), (-3.584519, 2.351475, -4.444827),\n (-2.87635, 0.381401, -4.513467), (-1.966974, 0.665311, -4.338804))}\n mol1 = converter.molecules_from_xyz(xyz1)[1]\n mol2 = converter.molecules_from_xyz(xyz2)[1]\n mol3 = converter.molecules_from_xyz(xyz3)[1]\n mol4 = converter.molecules_from_xyz(xyz4)[1]\n mol5 = converter.molecules_from_xyz(xyz5)[1] # a TS\n mol6 = converter.molecules_from_xyz(xyz6)[1] # a TS\n\n # test atom modification types\n modification_type = 'atom'\n\n # test R_atom modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450948408691, 1.6253138441202686, 0.042870253583423557),\n (-0.02582727173313104, 0.39833637030950975, 0.9010563970736782),\n (-0.02582727173313104, -1.003336361301907, 0.3272239637891734),\n (-0.02582727173313104, -1.003336361301907, -1.0899990532469916),\n (-0.08138177769352953, 0.465646654907214, 2.0002403496097383),\n (0.865704477722866, -1.5264119285073852, 0.6825623354173815),\n (-0.9185767861007101, -1.5268489957651346, 0.6785930201570352),\n (0.14577602706217008, -0.07998849407327513, -1.367625604543457))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 0], -1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01167393998357115, -0.5225807439329089, -0.9899595616178738),\n (-0.040525509131742084, 0.26844387347263365, -2.2633625897949208),\n (0.01167393998357115, -0.5225807439329089, 1.4216698859880004),\n (0.01167393998357115, 0.8926022581407576, 1.3456557382334218),\n (0.11202785529567173, -2.2718515121487206, 0.04691079079738447),\n (-0.8954040276884763, -0.8508241498293034, 1.9356427400340799),\n (0.8880330020652463, -0.8439168226596885, 1.990234136037933),\n (-0.13167393678263156, 1.1200467154192293, 0.4039467156910099))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), - new_val, 5)\n\n # test A_atom modification\n indices, new_val = [2, 1, 0], 140\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.011940763595588438, -0.90654939253321, -1.1784203714214114),\n (0.011940763595588438, -0.90654939253321, 0.05065327345758153),\n (-0.02531707366035523, 0.06629439921242253, 1.2108932996837143),\n (0.011940763595588438, 1.5283906429141458, 0.05806971900412017),\n (0.03285612994605798, -1.8458593499019589, 0.6277855724118742),\n (-0.9645745795119229, 0.3758422785924207, 1.4467600455414558),\n (0.8166299978590752, 0.37902049128771864, 1.551524925579085),\n (-0.10465928281651019, 1.2266969334608921, -0.8663115945839973))}\n\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test changing an angle to 180 degrees\n indices, new_val = [0, 1, 2], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.0019281473980474666, 1.559641181574566, 1.013927346529066),\n (-0.0019281473980474772, 0.42219553322547265, 0.548267146825631),\n (-0.0019281473980474772, -0.9794771983859442, -0.025565286458873793),\n (-0.0019281473980474772, -0.9794771983859442, -1.4427883034950388),\n (-0.05748265335844597, 0.4895058178231769, 1.6474510993616909),\n (0.8896036020579495, -1.5025527655914221, 0.32977308516933435),\n (-0.8946776617656266, -1.5029898328491718, 0.32580376990898796),\n (0.16967515139725364, -0.05612933115731222, -1.7204148547915041))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val)\n\n # test changing a 180 degree angle to something else\n indices, new_val = [0, 1, 2], 120\n expected_xyz = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((0.7757362507465277, 0.4478716325630875, 0.7767867108403768),\n (-0.3207007101270898, -0.18515666614565915, 0.04582870107149262),\n (-0.3207007101270898, -0.18515666614565915, -1.1144190466784232),\n (-0.3207007101270898, 0.8374974028016162, 1.8964626512298475),\n (-1.2063452316056904, -0.6964838693490394, 1.8964625790172804),\n (0.5649437124447699, -0.6964840572534022, 1.896462566459638))}\n new_xyz = converter.modify_coords(coords=xyz2, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol2)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol2), new_val, 5)\n\n # test D_atom modification\n indices, new_val = [0, 1, 2, 3], 30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.3812553590829658, 1.4249753409811934, 0.24885596109763952),\n (0.13588307254069157, 0.47112021672976, 0.8262208968300058),\n (0.13588307254069157, -0.9305525148816568, 0.25238846354550093),\n (0.13588307254069157, -0.9305525148816568, -1.1648345534906641),\n (0.08032856658029308, 0.5384305013274643, 1.9254048493660656),\n (1.0274148219966885, -1.4536280820871348, 0.6077268351737091),\n (-0.7568664418268876, -1.4540651493448844, 0.6037575199133627),\n (0.30748637133599266, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [3, 2, 1, 0], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.17268751280677364, -0.941696827801256, -1.1487068217042242),\n (-0.17268751280677364, -0.941696827801256, 0.08036682317476873),\n (-0.17268751280677364, 0.3328411496875977, 0.8986107061160642),\n (0.4830966870190505, 1.3983204216355287, 0.23286144075770054),\n (-0.18773471865125574, -1.8811191078717768, 0.6574991306756568),\n (-1.0994105700891015, 0.3771264916699556, 1.4764735369276594),\n (0.6806108103574798, 0.3121359507669669, 1.5812384626874982),\n (-0.2075631130119835, 1.1944491200970329, -0.8365980489813365))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n indices, new_val = [0, 1, 2, 3], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.37739906428687087, 1.4249753409811934, 0.24885596109763952),\n (-0.13973936733678652, 0.47112021672976, 0.8262208968300058),\n (-0.13973936733678652, -0.9305525148816568, 0.25238846354550093),\n (-0.13973936733678652, -0.9305525148816568, -1.1648345534906641),\n (-0.195293873297185, 0.5384305013274643, 1.9254048493660656),\n (0.7517923821192105, -1.4536280820871348, 0.6077268351737091),\n (-1.0324888817043656, -1.4540651493448844, 0.6037575199133627),\n (0.0318639314585146, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n # test group modification types\n modification_type = 'group'\n\n # test R_group modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450815440741, 1.625313844153823, 0.04287025350146201),\n (-0.02582727144301671, 0.39833637029935165, 0.9010563970984908),\n (-0.02582727144301671, -1.0033363613120652, 0.327223963813986),\n (-0.02582727144301671, -1.0033363613120652, -1.089999053222179),\n (-0.0813817733100206, 0.4656466548101805, 2.0002403498467567),\n (0.8657044801882787, -1.5264119271233758, 0.6825623320367284),\n (-0.9185767836497759, -1.5268489971713646, 0.6785930235919653),\n (0.1457760273522844, -0.07998849408343323, -1.3676256045186443))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test A_group modification\n indices, new_val = [0, 1, 2], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01997925208754263, 1.625852603711386, 0.708691800251658),\n (-0.009887200766722545, 0.3981406366172051, 0.6591605436173553),\n (-0.009887200766722545, -1.0035320949942117, 0.08532811033285048),\n (-0.009887200766722545, -1.0035320949942117, -1.3318949067033146),\n (-0.06544170263372645, 0.465450921128034, 1.7583444963656214),\n (0.8816445508645728, -1.5266076608055221, 0.44066647855559316),\n (-0.9026367129734817, -1.5270447308535111, 0.4366971701108293),\n (0.16171609802857856, -0.08018422776557976, -1.6095214579997799))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 2, 5], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.45549818019466204, 1.8548729964273216, 0.8440028131622062),\n (-0.2667929723517851, 0.6671106629415136, 1.42912314652022),\n (-0.2163066356464933, -0.45426196440936106, 0.30526758056697156),\n (1.3109140692843337, 0.4741705899686004, -0.12165329723035323),\n (-1.3557392716759613, 0.27771606050413156, -0.16203238949855803),\n (-0.2163066356464933, -1.8492005047245035, -0.34944907261899716),\n (-0.2163066356464933, -1.8492005047245035, -1.87604687202156),\n (-1.0601386155429, 0.3401156691690679, 2.122303234960202),\n (0.6302934527577109, 0.5164940342603479, 2.051815682570846),\n (1.143418340718557, 1.3271327629309078, 0.9043191341647172),\n (-1.5046641822171405, 0.8405156651772538, 0.6362234563562041),\n (-1.1248176985937233, -2.3816433802478305, -0.03815279071754074),\n (0.6330922017716909, -2.4415422695908298, 0.013011559357363423),\n (0.707681641272436, -1.4302805756837962, -2.2843133571390752),\n (-1.061876978104781, -1.2808214124615414, -2.27542464397285),\n (-0.30131566361820894, -2.876339919190297, -2.2463334380185054))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [5, 2, 1], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.2917048572251579, -1.5727341554069034, -1.3423072397835754),\n (0.2917048572251579, -1.5727341554069034, -0.0048638500194817524),\n (0.2917048572251579, -0.06886266257406626, 0.5064553318371674),\n (-1.363795569744117, -0.1202634403830567, -0.28936363114537844),\n (1.2964570556359054, 0.04149003667864859, -0.508809719558267),\n (0.4099139249017979, 1.1367441270166645, 1.4588451220109844),\n (0.29481769872300884, 2.504661621457458, 0.7909713103796479),\n (1.1685736645928884, -2.0373473546555556, 0.47685945259484286),\n (-0.5312728539867155, -2.0767912763680947, 0.5278926826114716),\n (-1.2231052441089643, -1.4156454828005882, -0.6216441060907665),\n (1.4364524039686508, -0.9213654475865127, -0.6804052856633311),\n (1.3966722481626304, 1.107137467791805, 1.9397033126698722),\n (-0.33241474313836356, 1.0625526837349102, 2.2633130452338497),\n (-0.7009351031697479, 2.671307058557274, 0.3706911401148234),\n (1.0334518240640673, 2.6225101662569066, -0.007826505507309234),\n (0.474437928409419, 3.293432289151483, 1.52916604039102))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 4)\n\n # test D_group modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.751853407099498, 1.1325746654576616, 0.9630889493590222),\n (0.2705229494881336, 0.5773506493576217, 0.5667369568416694),\n (0.2705229494881336, -0.8243220822537951, -0.00709547644283548),\n (0.2705229494881336, -0.8243220822537951, -1.4243184934790005),\n (0.21496844352773511, 0.644660933955326, 1.6659209093777292),\n (1.1620546989441305, -1.347397649459273, 0.34824289518537266),\n (-0.6222265648794455, -1.3478347167170226, 0.3442735799250263),\n (0.4421262482834347, 0.09902578497483683, -1.7019450447754658))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [5, 2, 1, 0], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.3034340517195509, -1.6113639549493641, -1.7901391417129255),\n (0.3034340517195509, -1.6113639549493641, -0.45269575194883194),\n (0.3034340517195509, -0.10749246211652697, 0.058623429907817215),\n (-1.3193844356755215, 0.6746571866866746, -0.30380395501671575),\n (1.3282593544657135, 0.581298860926198, -0.6678526090506967),\n (0.30343405171955073, -0.05040119820033895, 1.5985091447581203),\n (0.26233878444784786, 1.3540223173114139, 2.1955071424316666),\n (1.1803028491569083, -2.0759771588261957, 0.029027564277707585),\n (-0.5195436704231056, -2.115421071566818, 0.08006076790649397),\n (-1.414911803320983, 0.05150877481380545, -1.4915662613668217),\n (1.2907872270567131, 0.05736052141866721, -1.5046434284929022),\n (1.2266505257705096, -0.5178979180455376, 1.965811882691859),\n (-0.5283478351927398, -0.6406189828710822, 2.0028687871657294),\n (-0.6775241224477067, 1.8658969637383576, 1.9706253328328829),\n (1.0896028263747624, 1.9687229189733981, 1.8276430689661958),\n (0.35031987670665765, 1.2957313570336282, 3.285560142931404))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n # test groups modification types\n modification_type = 'groups'\n\n # test D_groups modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.7692326765134374, 1.1252152574374596, 0.9810655314575423),\n (0.25314357064244697, 0.5699912505374165, 0.5847135445433043),\n (0.25314357064244697, -0.8316815836112654, 0.010881153979294123),\n (0.25314357064244697, -0.8316815836112654, -1.4063419471715688),\n (1.2326181278103254, 1.0755945976230115, 0.6133000157238186),\n (1.1446752957640132, -1.3547571699433192, 0.3662195585064876),\n (-0.6396059141384572, -1.3551941756763426, 0.3622501790547312),\n (0.4247468609767439, 0.09166629658280878, -1.6839684605765641))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=xyz1, indices=[4, 1, 2, 3], mol=mol1),\n 176.7937925, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=[4, 1, 2, 3], mol=mol1),\n 279.5679938, 5)\n\n indices, new_val = [5, 2, 1, 0], 100\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.17617288317697363, -1.4263876505749937, -1.3907356765118228),\n (0.17617288317697363, -1.4263876505749937, -0.05329233131383648),\n (0.17617288317697363, 0.07748361087633482, 0.4580268316508156),\n (0.8541264407563205, 1.1799297944814306, -0.8464435250524343),\n (1.0315484892431994, 0.12891222316318918, 1.606136465715537),\n (-1.2415001838455297, 0.5175023395992786, 0.8716616732793354),\n (-2.371148423802697, -0.377635430276555, 0.3685473045279144),\n (1.0530416597996317, -1.8910009834245878, 0.42843102214143425),\n (-0.646804798256715, -1.930444842122042, 0.47946418053365614),\n (1.322524386187, 0.1392850561843193, -1.55769653865906),\n (1.5807657244329665, 0.9071634481807671, 1.3438012611373469),\n (-1.4308626545937098, 1.5181627982792263, 0.46103575662853813),\n (-1.3101730016766409, 0.6090291604729325, 1.9628224613881304),\n (-2.328405219901557, -1.376683205512397, 0.811273322532136),\n (-2.345556604764221, -0.47877786163003033, -0.7207928024513892),\n (-3.3382397150969996, 0.059047399283163715, 0.6394658008190603))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [4, 3, 1, 0], 236.02\n expected_xyz = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.3420713780282814, -0.726846939196746, -1.8608060734620697),\n (-0.3420713780282814, -0.726846939196746, -0.33809952744080163),\n (-1.5199121786498575, -1.3903247017047589, 0.12046140490433599),\n (-0.3420713780282814, 0.692986716189357, 0.21142750813209843),\n (0.8346249371329908, 0.870417947793265, 1.130523629422891),\n (1.8415843350511496, 1.49899165752528, 0.8160475329621943),\n (-1.232802341934429, -0.22348356564525385, -2.2527724067647172),\n (0.5474409007790566, -0.2291658204558631, -2.2587884226234842),\n (-0.36650899336409903, -1.7525658745827613, -2.2443893713107435),\n (0.5235538883628821, -1.286773819894118, 0.03414982827280788),\n (-1.525486055520759, -2.2842579938670644, -0.2668197974505191),\n (-1.246930807816442, 0.9000033565709169, 0.7927934676101465),\n (-0.26242043164905693, 1.4290013064896112, -0.5956842516835208),\n (0.739203033547077, 0.4163114365921572, 2.132044487804084))}\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4), new_val, 5)\n\n # test 1-indexed input\n indices = [5, 4, 2, 1]\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4, index=1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4, index=1),\n new_val, 5)\n\n # test TSs\n indices = [19, 10, 4, 2]\n fragments = [[46, 47, 48, 49, 50, 51, 52], [f + 1 for f in range(45)]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz5, torsion=indices, index=1), 56.83358841, 3)\n new_xyz = converter.modify_coords(coords=xyz5,\n indices=indices,\n new_value=300,\n modification_type='groups',\n mol=mol5,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 300, places=3)\n\n indices = [1, 2, 3, 5]\n fragments = [[f + 1 for f in range(23)], [24, 25, 26, 27, 28]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz6, torsion=indices, index=1), 62.30597206, 3)\n new_xyz = converter.modify_coords(coords=xyz6,\n indices=indices,\n new_value=200,\n modification_type='groups',\n mol=mol6,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 200, places=3)\n \n coords={'coords': ((-0.7862825353221515, -0.28824023055636216, 0.4782944637692894),\n (0.21968869054702736, 0.40094256193652866, -0.2919820499085219),\n (-0.07796443595084417, 0.5692847962524797, -1.6621913220858304),\n (-1.102200211589376, -1.1132157833188596, -0.01879031191901484),\n (-1.5973749070505925, 0.29546848172306867, 0.6474145668621136),\n (0.4237940503863438, 1.3660724867336205, 0.19101403432872205),\n (1.1352054736534014, -0.1980893380251006, -0.2652264470061931),\n (-0.7497944593402266, 1.258221857416732, -1.7507029654486272)),\n 'isotopes': (14, 12, 16, 1, 1, 1, 1, 1),\n 'symbols': ('N', 'C', 'O', 'H', 'H', 'H', 'H', 'H')}\n indices=[3, 0, 1, 2]\n new_value=53.76\n modification_type=\"groups\"\n mol=Molecule(smiles=\"NCO\")\n new_xyz = converter.modify_coords(coords=coords,\n indices=indices,\n new_value=new_value,\n modification_type=modification_type,\n mol=mol)\n self.assertTrue(type(new_xyz[\"coords\"][0][0] is float))", "def setElementsCoordinates(self, symbol, x, y):\n #If it is the start element\n if symbol == \"D\":\n self._set_start((x,y))\n\n #If it is the end of the level element\n elif symbol == \"F\":\n self._set_end((x,y))\n\n #If it is a spike\n elif symbol == \"S\":\n self._get_spikes().append((x,y))\n \n #If it is a scroll\n elif symbol == \"P\":\n self._get_scrolls().append((x,y))\n\n #If it is a key\n elif symbol == \"K\":\n self._get_keys().append((x,y))" ]
[ "0.6319196", "0.6069674", "0.5674592", "0.5491752", "0.54740614", "0.5442556", "0.5403927", "0.53647107", "0.53445184", "0.5340918", "0.5296028", "0.5274964", "0.5239597", "0.5212809", "0.51993406", "0.5174405", "0.5146997", "0.51462525", "0.5117898", "0.5112995", "0.5112307", "0.5091445", "0.5076641", "0.5068753", "0.50336987", "0.5033325", "0.5025076", "0.50146407", "0.5013571", "0.50078714" ]
0.67383003
0
Replaces elements of the using elemts of caseX_collapsed via a map_caseX_to_punnet
def replace_punnet_with_collapsed_cases(punnet,map_caseX_to_punnet,caseX_collapsed): for case_coord in map_caseX_to_punnet.keys(): punnet_coords = map_caseX_to_punnet[case_coord] for punnet_coord in punnet_coords: punnet[punnet_coord] = caseX_collapsed[case_coord] return punnet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _map_elements_vectorized(self, unused_nid_map, model, unused_j, unused_dim_max,\n unused_nid_cp_cd, plot=True, xref_loads=True):\n self.gui.isubcase_name_map = {1: ['Nastran', '']}\n grid = self.gui.grid\n\n nelements = self.nelements\n if nelements == 0:\n return None\n idtype = get_numpy_idtype_for_vtk()\n log = self.log\n cell_types_array, cell_offsets_array, nids_list, eids_array, results = add_vectorized_elements(\n model, nelements, idtype, log)\n\n if cell_types_array.min() == 0:\n\n # all the non-elemental cards should be listed\n # it's not hugely important, but it cleans up dev error messages\n skip_cards = [\n 'CONM2',\n #'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', 'PLOTEL',\n 'PARAM',\n #'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CVISC',\n 'TABLEM1', 'TABLEM2', 'TABLEM3', 'TABLEM4',\n 'TABLED1', 'TABLED2', 'TABLED3', 'TABLED4', 'TABLEST',\n 'MAT1', 'MAT2', 'MAT4', 'MAT5', 'MAT8', 'MAT9', 'MAT10',\n 'MATT1', 'MATT2', 'MATT8',\n 'MATS1', 'MATHP',\n\n 'PLOAD', 'PLOAD1', 'PLOAD2', 'FORCE', 'PLOAD4', 'LOAD',\n 'MAT1', 'PSHEAR', 'PSHELL', 'PTUBE', 'PDAMP',\n 'PELAST', 'PBEND', 'PBEAM', 'PCOMP', 'PCOMPG', 'PBAR', 'PSOLID',\n 'PLPLANE', 'PLSOLID',\n 'PROD', 'PELAS', 'PVISC', 'PBUSH1D', 'PBUSH2D',\n #'EPOINT',\n #'CQUADR', 'CTRIAR', 'SPOINT',\n #'CQUAD8', 'CTRIA6',\n 'ENDDATA',\n 'CORD2R', 'CORD2C', 'CORD2S', 'CORD1R', 'CORD1C', 'CORD1S',\n 'GRID', 'SPOINT', 'EPOINT', 'TF',\n\n 'RFORCE', 'RFORCE1', 'RFORCE2', 'FORCE', 'FORCE1', 'FORCE2',\n 'MOMENT', 'MOMENT1', 'MOMENT2', 'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4',\n 'LOAD', 'TLOAD1', 'TLOAD2', 'DLOAD', 'LSEQ', 'DAREA',\n 'RLOAD1', 'RLOAD2',\n\n 'SUPORT', 'SUPORT1', 'MPC', 'MPCADD', 'RBE1', 'RBE2', 'RBE3', 'RBAR', 'RCROSS',\n 'SPCADD', 'SPC', 'SPC1', 'SPCD', 'SPCAX', 'DMIG', 'DMI', 'DMIJ', 'DMIJI', 'DMIK',\n\n 'AELIST', 'AELINK', 'AESURF', 'AESURFS', 'AERO', 'AEROS', 'TRIM',\n 'FLUTTER', 'DIVERG',\n 'CAERO1', 'CAERO2', 'CAERO3', 'CAERO4', 'CAERO5',\n 'PAERO1', 'PAERO2', 'PAERO3', 'PAERO4', 'PAERO5',\n 'SPLINE1', 'SPLINE2', 'SPLINE3', 'SPLINE4', 'SPLINE5', 'SPLINE6', 'SPLINE7',\n 'CLOAD', 'TABLES1', 'NLPARM', 'GRDSET',\n ]\n potential_elements_found = [key for key in model.card_count if key not in skip_cards]\n etypes = [\n 'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',\n 'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5', 'CVISC',\n 'CBUSH', 'CBUSH1D', 'CBUSH2D',\n 'CONROD', 'CROD', 'CTUBE', 'PLOTEL',\n 'CBAR', 'CBEAM', 'CBEND',\n 'CSHEAR',\n 'CTRIA3', 'CQUAD4', 'CTRIA6', 'CQUAD8', 'CTRIAR', 'CQUADR',\n 'CTETRA', 'CPENTA', 'CHEXA', 'CPYRAM',\n 'CHBDYG', 'CHBDYE', 'CHBDYP',\n # nastran 95\n 'CQUAD1',\n ]\n for key in potential_elements_found:\n if key not in etypes:\n log.warning('is %s an element?' % key)\n\n msg = (\n 'Cell Type is not defined (cell_type=0).\\n'\n ' cell_types_array = %s\\n'\n ' potential_elements_found=[%s]\\n'\n ' nelements=%s\\n\\n'\n '%s\\n\\n' % (\n cell_types_array,\n ', '.join(potential_elements_found),\n len(cell_types_array),\n '', #str(model.elements2),\n )\n )\n print(str(model.elements2))\n #msg += model.get_bdf_stats()\n raise RuntimeError(msg)\n\n deep = 1\n if len(nids_list) == 1:\n nids_array = nids_list[0].ravel()\n else:\n #raise NotImplementedError(len(nids_list))\n nids_array = np.hstack([nid_list.flatten() for nid_list in nids_list])\n #nids_array = np.array(nids_list, dtype=dtype)\n\n #-----------------------------------------------------------------\n # saving some data members\n self.gui.element_ids = eids_array\n\n #-----------------------------------------------------------------\n # build the grid\n\n #self.log.info('nids_array = %s' % nids_array)\n #self.log.info('cell_offsets_array = %s' % cell_offsets_array)\n #self.log.info('cell_types_array = %s' % cell_types_array)\n\n # Create the array of cells\n #print('nids_array =', nids_array)\n cells_id_type = numpy_to_vtkIdTypeArray(nids_array, deep=1)\n vtk_cells = vtk.vtkCellArray()\n vtk_cells.SetCells(nelements, cells_id_type)\n\n # Cell types\n vtk_cell_types = numpy_to_vtk(\n cell_types_array, deep=deep,\n array_type=vtk.vtkUnsignedCharArray().GetDataType())\n\n vtk_cell_offsets = numpy_to_vtk(cell_offsets_array, deep=deep,\n array_type=vtk.VTK_ID_TYPE)\n\n grid = self.gui.grid\n #grid = vtk.vtkUnstructuredGrid()\n grid.SetCells(vtk_cell_types, vtk_cell_offsets, vtk_cells)\n return results", "def reconstruct_input(self, ix):", "def map_case_to_punnet_indices(caseX, caseX_sym_to_coord):\n\n caseCoord_to_punnetCoord = {}\n\n for y,j in enumerate(caseX):\n for x,i in enumerate(j):\n\n case_coord = (y,x)\n\n punnet_coord = caseX_sym_to_coord[i]\n\n caseCoord_to_punnetCoord[case_coord] = punnet_coord\n\n return caseCoord_to_punnetCoord", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)", "def apply_remap_values(labels: np.ndarray, label_map: Dict[int, int]) -> np.ndarray:\n for l1, l2 in label_map.items():\n labels[labels == l1] = l2", "def reconstruct(self, X):", "def reconstruct(self, X):", "def _untransform_params(self, x):\r\n # work out how many places are fixed, and where they are. tricky logic!\r\n fix_places = self.fixed_indices + [t[1:] for t in self.tied_indices]\r\n if len(fix_places):\r\n fix_places = np.hstack(fix_places)\r\n Nfix_places = fix_places.size\r\n else:\r\n Nfix_places = 0\r\n\r\n free_places = np.setdiff1d(np.arange(Nfix_places + x.size, dtype=np.int), fix_places)\r\n\r\n # put the models values in the vector xx\r\n xx = np.zeros(Nfix_places + free_places.size, dtype=np.float64)\r\n\r\n xx[free_places] = x\r\n [np.put(xx, i, v) for i, v in zip(self.fixed_indices, self.fixed_values)]\r\n [np.put(xx, i, v) for i, v in [(t[1:], xx[t[0]]) for t in self.tied_indices] ]\r\n\r\n [np.put(xx, i, t.f(xx[i])) for i, t in zip(self.constrained_indices, self.constraints)]\r\n if hasattr(self, 'debug'):\r\n stop # @UndefinedVariable\r\n\r\n return xx", "def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()", "def replace(self, nodes, nodeID: int | list = None) -> list:\n match = self.match_this(nodes, nodeID)\n if type(nodes) is np.ndarray():\n for source, target in match.items():\n self[source].coors = nodes[target, :]\n else:\n for source, target in match.items():\n self[source].coors = nodes[target].coors", "def transform(self,\n chip: 'np.ndarray',\n channel_order: Optional[List[int]] = None):\n masks = []\n for (value_from, value_to) in self.mapping.items():\n mask = (chip == value_from)\n masks.append((mask, value_to))\n for (mask, value_to) in masks:\n chip[mask] = value_to\n\n return chip", "def unmap(values, mapping):\n i = 0\n for m in mapping:\n values.replace(to_replace=m, value=i, inplace=True)\n i += 1", "def modify(test_case):\r\n n=len(test_case)\r\n mod_test_cases=[]\r\n for i in range(n):\r\n mod_test_case=test_case[:]\r\n #print(mod_test_case[i])\r\n mod_test_case[i]= not mod_test_case[i]\r\n mod_test_cases.append((mod_test_case,i))\r\n return mod_test_cases", "def transform(self, x):", "def reduce_possibilities_by_row(self):\n x = self.targetCell.x\n for i in range(1,10): #content\n for n in range(9): #y-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[x][n]\n if self.targetCell != neighbour_cell:\n self.targetCell.row_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.row_neighbour_possibilities = flatten_list(self.targetCell.row_neighbour_possibilities)", "def transform(self, X, y=None):\r\n Xo = X.copy()\r\n for col, tmap in self.maps.items():\r\n vals = np.full(X.shape[0], np.nan)\r\n for val, mean_target in tmap.items():\r\n vals[X[col]==val] = mean_target\r\n Xo[col] = vals\r\n return Xo", "def reduce_possibilities_by_column(self):\n y = self.targetCell.y\n for i in range(1,10): #content\n for n in range(9): #x-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[n][y]\n if self.targetCell != neighbour_cell:\n self.targetCell.column_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.column_neighbour_possibilities = flatten_list(self.targetCell.column_neighbour_possibilities)", "def remap_context_labels(self):\n c_contexts = list(self.context[self.iter])\n unique_contexts = uniqify(c_contexts)\n remap_dict = dict(zip(unique_contexts,\n range(1, len(unique_contexts) + 1)))\n\n remapped = copy.deepcopy(self.context[self.iter])\n for old, new in remap_dict.iteritems():\n self.context[self.iter][remapped==old] = new", "def inverse_transform(self, X, copy=...):\n ...", "def transform(self, X):\n raise NotImplementedError()" ]
[ "0.5443269", "0.54277223", "0.53019744", "0.52509886", "0.52509886", "0.52509886", "0.52509886", "0.52509886", "0.52509886", "0.52509886", "0.51589334", "0.51589334", "0.51589334", "0.51411754", "0.5120727", "0.5113141", "0.5113141", "0.503857", "0.5027112", "0.49946713", "0.49919495", "0.49904338", "0.49693286", "0.49686942", "0.4938216", "0.49148571", "0.49124157", "0.48942813", "0.48878714", "0.48748708" ]
0.8381181
0
Takes a sentence as an input, and returns a dictionary of emotions and number of occurences for them.
def get_emotions_in_sentence(sentence): tknzr = TweetTokenizer() tokens = tknzr.tokenize(sentence) emotions = {} for word in tokens: # Replace hashtags with pure words (i.e. "#positive" becomes "positive") if re.match("^#\S+", word): word = re.sub("^#", "", word) try: _emotions = nrc_lexicon.loc[word] _emotions = _emotions[_emotions['yes_or_no'] == 1] if _emotions[_emotions['yes_or_no'] == 1].empty: pass for _emotion in _emotions[_emotions['yes_or_no'] == 1]['emotion']: if _emotion not in emotions: emotions[_emotion] = 0 emotions[_emotion] += 1 except: pass return emotions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_word_in_each_sentence(sentence):\n\tsentence = sentence.lower()\n\twords = sentence.split()\n\tcount_dict = dict()\n\tfor _ in words:\n\t\tif count_dict.get(_):\n\t\t\tcount_dict[_] += 1\n\t\telse:\n\t\t\tcount_dict[_] = 1\n\treturn count_dict", "def basic_count(sentence):\n letter_count = {}\n for char in sentence:\n if char not in letter_count:\n letter_count[char] = 0\n letter_count[char] += 1\n return letter_count", "def word_count(poem):\n lines = [line for line in poem.split(\"\\n\") if line]\n word_map = {}\n for line in lines:\n for word in line.split(\" \"):\n if word:\n if word in word_map:\n word_map[word] += 1\n else:\n word_map[word] = 1\n return word_map", "def find_types_of_sents_in_text(text):\r\n return dict(Counter(map(lambda x: x[-1], nltk.sent_tokenize(text))))", "def word_lengths(sentence):\n\n word_count_dict = {}\n sentence = sentence.split()\n\n for word in sentence:\n length = len(word)\n if length not in word_count_dict:\n word_count_dict[length] = {word}\n else:\n set = word_count_dict[length]\n set.add(word)\n\n return word_count_dict", "def get_num_words_spoken_by_character_per_episode(content):\n content = list(csv.reader(content.splitlines(), delimiter=','))\n characters = [name[2] for name in content]\n characters = list(dict.fromkeys(characters))\n del characters[0]\n res = defaultdict()\n for character in characters:\n episode = 1\n dic = {}\n count = 0\n for row in content: \n if row[2] == character:\n if str(episode) == row[1]:\n count += len(row[3].split())\n else:\n dic[str(episode)] = count\n episode = int(row[1])\n count = len(row[3].split())\n if '13' not in dic.keys():\n dic['13'] = count \n dic = Counter(dic)\n res[character] = dic\n return res", "def word_count(phrase):\n word_dict = {}\n\n for word in phrase.split():\n word_dict[word] = word_dict.get(word, 0) + 1\n\n return word_dict", "def info(doc):\n\tinfo = {}\n\tinfo['sentences'] = [str(sent) for sent in doc.sents]\n\t#sentences : [sent1, sent2, ...]\n\tinfo['tokens'] = [str(token) for token in doc]\n\t#all tokens in info['tokens']\n\ttoken_vals = {}\n\tfor token in info['tokens']:\n\t\tcurrent_word = token\n\t\ti = 0\n\t\tcurrent_sent = info['sentences'][i]\n\t\tfor i in range(len(info['sentences'])): #for each sentence\n\t\t\tval = current_sent.count(str(current_word))\n\t\t\t#value is the number of times the current word is in the current sent\n\t\t\ttoken_vals[str(token)] = val\n\t\t\t#append to dictionary\n\tinfo['token_vals'] = token_vals\n\t#given a word and a sentence, val is how many times it appears in that sentence\n\treturn info", "def get_num_words_spoken_by_character_per_episode(content):\n d = defaultdict(Counter)\n reader_list = csv.DictReader(content.splitlines())\n for row in reader_list:\n words = row['Line'].strip().split()\n d[row['Character']][row['Episode']] += len(words)\n return d", "def task1(sentence):\n split_sentence = sentence.split()\n dictionary = dict()\n for word in split_sentence:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n for item in dictionary:\n print(\"Word \" + item + \" used \" + str(dictionary[item]) + \" times\")\n return dictionary", "def _count_words_in_string(self, sentence):\n word_count = dict()\n for i in sentence:\n if word_count.get(i) is None:\n word_count[i] = 1\n else:\n word_count[i] = word_count.get(i)+1\n\n return word_count", "def word_count(phrase):\n Wordlist = phrase.replace(\"\\n\", ' ') # Creating a list without escape codes\n Wordlist = Wordlist.split(\" \") # Split the sentence in words\n dictionary = {} # Create an empty dictionary to store the results\n for i in Wordlist:\n if i != '': # unless is a ''\n dictionary[i] = Wordlist.count(i)\n return dictionary", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def get_e_probs(dataset):\n\n # Number of times that the state s is seen paired with observation x in the corpus\n e_word_tag_counts = {}\n\n for sentence in dataset:\n\n for word_to_tag in sentence:\n # Foreach (word, tag) tuple we are calculating number of incstances\n if word_to_tag in e_word_tag_counts:\n e_word_tag_counts[word_to_tag] += 1\n else:\n e_word_tag_counts[word_to_tag] = 1\n\n return e_word_tag_counts", "def count_words(tokenized_sentences):\r\n \r\n word_counts = {}\r\n \r\n # Loop through each sentence\r\n for sentence in tokenized_sentences: # complete this line\r\n \r\n for token in sentence: # complete this line\r\n\r\n # If the token is not in the dictionary yet, set the count to 1\r\n if token not in word_counts.keys(): # complete this line\r\n word_counts[token] = 1\r\n \r\n # If the token is already in the dictionary, increment the count by 1\r\n else:\r\n word_counts[token] += 1\r\n \r\n return word_counts", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def find_total_occurrences(poem, *words):\n word_counts = word_count(poem)\n print word_counts\n return {word: word_counts.get(word, 0) for word in words}", "def fancy_count(sentence, alphabet):\n sentence = sentence.lower()\n\n # create dictionary of all letters set to 0\n letter_count = {}\n for char in alphabet:\n letter_count[char] = 0\n\n for char in sentence:\n if char in letter_count.keys():\n letter_count[char] += 1\n return letter_count", "def count_sentences(text):\n\n import re\n\n # Make a list of sentences (separated by either '.', '!' or '?')\n sentence_list = re.split(r'[.!?]', text)\n # Find the size of the list\n count = len(sentence_list)\n\n return count", "def count_sentences(text):\n count = 0\n terminals = '.;?!'\n for character in text:\n \n if character in terminals:\n count += 1\n\n return count", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def analyze_emoji_sentimens(text):\n sum = 0.0\n count = 0\n for character in list(text):\n value = index.get(character, None)\n if value != None:\n sum += value\n count += 1\n if count == 0:\n return 0.0\n\n return sum/count", "def pos(text):\n\n pos_counter = src.utils.nlp.parts_of_speech(text)\n total_count = sum(pos_counter.values())\n pos_dict = {pos: count / total_count for pos, count in pos_counter.items()}\n return pos_dict", "def count_words(phrase):\n # split the input string at spaces\n phrase_split = phrase.split()\n\n # initiate empty dictionary\n word_count = {}\n\n # iterate over words in the phrase\n for word in phrase_split:\n if word in word_count:\n\n # if the word is already a key in the dictionary, increase the value by 1\n word_count[word] += 1\n\n else:\n # if the word is not a key in the dictionary, set its value to 1\n word_count[word] = 1\n\n return word_count", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter", "def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency", "def count_words():\n paragraph = \"a distinct section of a piece of writing,\"\n # 替换\n paragraph.replace(\",\", \" \").replace(\":\", \" \").replace(\";\", \" \").replace(\".\", \" \").replace(\"?\", \" \")\n words = paragraph.split(\" \")\n nums = {}\n\n for word in words:\n nums[word] = nums[word]+1 if word in nums else 1\n # nums[word] = nums.get(word, 0) + 1\n\n for word, num in nums.items():\n print(word, \": \", num)", "def count_entity_doc(document):\n count = {}\n for line in document[1:]:\n _, _, entity_type, _, _ = conll04_parser.split_line(line)\n if entity_type in count:\n count[entity_type] += 1\n else:\n count[entity_type] = 1\n return count", "def word_count(phrase):\n return collections.Counter(phrase.split())" ]
[ "0.68797433", "0.6825759", "0.65769255", "0.6535194", "0.65148383", "0.65113354", "0.6354589", "0.63443613", "0.6339356", "0.63273925", "0.6326009", "0.62736243", "0.6194626", "0.6182634", "0.6145766", "0.61381006", "0.6122692", "0.6083806", "0.6078651", "0.6076343", "0.6007803", "0.6000442", "0.5925114", "0.5918564", "0.59056175", "0.5882476", "0.58701545", "0.5848634", "0.5837764", "0.583313" ]
0.7836813
0
Adds the end bracket to finish list of lists Reads the finished product as string Converts String to List Sends final copy of list to generate_template
def compile_data(self): with open(self.store_path, 'a') as file: file.write(']') with open(self.store_path) as file: list_of_lists = file.read() card_list = json.loads(list_of_lists) return self.generate_template(card_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate(entities_to_proceed):\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"", "def create_list(self, data):\n\n temp = []\n for item in data:\n if len(item) > 2:\n i = 0\n while i < len(item):\n temp.append(item[i:min(i+2, len(item))])\n i += 2\n else:\n temp.append(item)\n data = temp\n\n temp = []\n for item in data:\n if item[-1] == \"{\":\n temp.append(\"[\" + item[0][:-1] + \",\")\n elif item[-1] == \"}\":\n temp.append(\"],\")\n else:\n temp.append(\"[\" + \" \".join(item).replace(\":\", \",\") + \"],\")\n return ast.literal_eval(\"\".join(temp))", "def latex_string_list(self,\n values, erased_elements=[],\n sizes=('large', 'small')):\n if not self.variable_list:\n return ''\n string_list = []\n large, small = sizes\n for var in self.variable_list:\n erased = var in erased_elements\n mark = '?' if erased else ' '\n string = '{value:>5}{mark}'.format(\n value=latex_number(values[var]), mark=mark)\n size = large if erased else small\n if size:\n string = '{{\\\\{size} {string}}}'.format(\n string=string, size=size)\n string_list.append(string)\n return string_list", "def create_list_string(list_):\n return f\"[{' '.join(list_)}]\"", "def parse_template(string):\n count = 0\n list1 = []\n for character in string:\n count = count + 1\n if character == \"{\":\n end = string.find(\"}\", count)\n s_strg = string[count:end]\n list1.append(s_strg)\n string = string.replace(s_strg, \"\", 1)\n count = count - len(s_strg)\n\n subs = tuple(list1)\n\n return(string, subs)\n print(subs)", "def create_content_list(contents: List[Text]) -> Text:\n # print(contents)\n return '\\n'.join(\n [template.LIST_TEMPLATE.format(\n level='',\n content=item\n ) for item in contents if item.strip()])", "def _template(inlist):\n from collections import OrderedDict\n if isinstance(inlist, str):\n inlist = [inlist]\n\n templates = []\n for item in reversed(inlist):\n templates.append(output_space(item))\n\n return OrderedDict(reversed(OrderedDict(templates).items()))", "def build_list(self, l):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(l, comma)\n return self.build_container(\n repr_elems, self.left_square_bracket, self.right_square_bracket)", "def pr(self):\n return self.listformat.format(self.idx, str(self).rstrip())", "def generateRunList(softwareList):\n swEntrys = []\n for software in softwareList:\n swEntrys.append('\"recipe[%s]\"' % software)\n\n return '{ \"run_list\": [ %s ] }' % ', '.join(swEntrys)", "def __str__(self):\n str_list = ['[']\n for i in self.data_list:\n str_list.append(str(i))\n str_list.append(', ')\n str_list.pop() # remove trailing space\n str_list.append(\"]\")\n\n return ''.join(str_list)", "def __str__(self):\n\t\tstrBuffer = \"[\"\n\t\ttemp = self.head\n\t\tcount = 0\n\n\t\twhile temp.getNext() != None:\n\t\t\t#print temp.getData()\n\t\t\tstrBuffer += temp.getData() + \", \"\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tstrBuffer = strBuffer[:-1] + '\\n' \n\t\t\ttemp = temp.getNext()\n\t\t\n\t\tstrBuffer += (temp.getData() + ']') #off by one fix\n\t\t\t\n\t\treturn strBuffer", "def SetList(self, temp_list: list or tuple):\n self.Clear()\n for item in temp_list:\n self.insert(Tags.End.value, item)", "def shortypy(your_string, your_list):\n sub = \"\"\n count = your_string.count(\"{\")\n for braces in range(count):\n x = your_string.find(\"{\")\n ind = your_string[x + 1]\n sub += your_string[:x]\n sub += str(your_list[int(ind)])\n your_string = your_string[(x + 3):]\n sub += your_string\n return sub", "def string_factory(list_of_dicts):\n result = []\n for item in range(len(list_of_dicts)):\n result.append(template.format(**list_of_dicts[item]))\n return result", "def listToStringFormat(self, list) ->str:\n string = ''\n for element in list:\n string = string + str(element) + \"\\n\"\n return string", "def gen_tag_addlist(self) -> tp.List[xml.TagAddList]:\r\n if not self.tag_adds:\r\n robot_config = self.main_config['ros']['robots'][self.robot]\r\n prefix = robot_config['prefix']\r\n model_base = robot_config['model']\r\n model_variant = robot_config.get('model_variant', '')\r\n\r\n if model_variant != '':\r\n model = f\"{model_base}_{model_variant}\"\r\n else:\r\n model = model_base\r\n\r\n desc_cmd = f\"$(find xacro)/xacro $(find {model_base}_description)/urdf/{model}.urdf.xacro\"\r\n for s in self.sizes:\r\n exp_adds = xml.TagAddList()\r\n pos_i = random.randint(0, len(self.positions) - 1)\r\n\r\n exp_adds.append(xml.TagAdd(\".\",\r\n \"master\",\r\n {},\r\n True))\r\n exp_adds.append(xml.TagAdd(\"./master\",\r\n \"group\",\r\n {\r\n 'ns': 'sierra'\r\n },\r\n False))\r\n exp_adds.append(xml.TagAdd(\"./master/group/[@ns='sierra']\",\r\n \"param\",\r\n {\r\n 'name': 'experiment/n_robots',\r\n 'value': str(s)\r\n },\r\n False))\r\n\r\n for i in range(0, s):\r\n\r\n ns = f'{prefix}{i}'\r\n pos = self.positions[pos_i]\r\n pos_i = (pos_i + 1) % len(self.positions)\r\n spawn_cmd_args = f\"-urdf -model {model}_{ns} -x {pos.x} -y {pos.y} -z {pos.z} -param robot_description\"\r\n\r\n exp_adds.append(xml.TagAdd(\"./robot\",\r\n \"group\",\r\n {\r\n 'ns': ns\r\n },\r\n True))\r\n\r\n exp_adds.append(xml.TagAdd(f\"./robot/group/[@ns='{ns}']\",\r\n \"param\",\r\n {\r\n \"name\": \"tf_prefix\",\r\n \"value\": ns\r\n },\r\n True))\r\n\r\n # These two tag adds are OK to use because:\r\n #\r\n # - All robots in Gazebo are created using spawn_model\r\n # initially.\r\n #\r\n # - All robots in Gazebo will provide a robot description\r\n # .urdf.xacro per ROS naming conventions\r\n exp_adds.append(xml.TagAdd(f\"./robot/group/[@ns='{ns}']\",\r\n \"param\",\r\n {\r\n \"name\": \"robot_description\",\r\n \"command\": desc_cmd\r\n },\r\n True))\r\n\r\n exp_adds.append(xml.TagAdd(f\"./robot/group/[@ns='{ns}']\",\r\n \"node\",\r\n {\r\n \"name\": \"spawn_urdf\",\r\n \"pkg\": \"gazebo_ros\",\r\n \"type\": \"spawn_model\",\r\n \"args\": spawn_cmd_args\r\n },\r\n True))\r\n\r\n self.tag_adds.append(exp_adds)\r\n\r\n return self.tag_adds", "def write_template_body2(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n basin = template_filename.split('/')[-1].split('_')[1].replace('.php', '')\n template_file = open(template_filename, 'a')\n template_file.write('domains.push({\\n')\n template_file.write(' displayName: \"All\",\\n')\n template_file.write(' name: \"'+basin+'\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('variables.push({\\n')\n template_file.write(' displayName: \"Mean\",\\n')\n template_file.write(' name: \"<?php echo $LeadMean_name; ?>\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_AL.php\",\\n')\n template_file.write(' displayName: \"Atlantic\",\\n')\n template_file.write(' name: \"'+template_type+'_AL\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_CP.php\",\\n')\n template_file.write(' displayName: \"Central Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_CP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_EP.php\",\\n')\n template_file.write(' displayName: \"Eastern Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_EP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_WP.php\",\\n')\n template_file.write(' displayName: \"Western Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_WP\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Initialize the page\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//function for keyboard controls\\n')\n template_file.write('document.onkeydown = keys;\\n')\n template_file.write('\\n')\n template_file.write(\n '//Decare object containing data about the currently displayed map\\n'\n )\n template_file.write('imageObj = {};\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('initialize();\\n')\n template_file.write('\\n')\n template_file.write(\n '//Format initialized run date & return in requested format\\n'\n )\n template_file.write('function formatDate(offset,format){\\n')\n template_file.write(' var newdate = String(cycle);\\n')\n template_file.write(' var yyyy = newdate.slice(0,4)\\n')\n template_file.write(' var mm = newdate.slice(4,6);\\n')\n template_file.write(' var dd = newdate.slice(6,8);\\n')\n template_file.write(' var hh = newdate.slice(8,10);\\n')\n template_file.write(\n ' var curdate = new Date(yyyy,parseInt(mm)-1,dd,hh)\\n'\n )\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write(' //Offset by run\\n')\n template_file.write(\n ' var newOffset = curdate.getHours() + offset;\\n'\n )\n template_file.write(' curdate.setHours(newOffset);\\n')\n template_file.write('\\n')\n template_file.write(\n ' var yy = String(curdate.getFullYear()).slice(2,4);\\n'\n )\n template_file.write(' yyyy = curdate.getFullYear();\\n')\n template_file.write(' mm = curdate.getMonth()+1;\\n')\n template_file.write(' dd = curdate.getDate();\\n')\n template_file.write(' if(dd < 10){dd = \"0\" + dd;}\\n')\n template_file.write(' hh = curdate.getHours();\\n')\n template_file.write(' if(hh < 10){hh = \"0\" + hh;}\\n')\n template_file.write('\\n')\n template_file.write(' var wkday = curdate.getDay();\\n')\n template_file.write(\n ' var day_str = [\"Sun\", \"Mon\", \"Tue\", \"Wed\", '\n +'\"Thu\", \"Fri\", \"Sat\"];\\n'\n )\n template_file.write('\\n')\n template_file.write(' //Return in requested format\\n')\n template_file.write(\" if(format == 'valid'){\\n\")\n template_file.write('//06Z Thu 03/22/18 (90 h)\\n')\n template_file.write(\n 'var txt = hh + \"Z \" + day_str[wkday] + \" \" + '\n +'mm + \"/\" + dd + \"/\" + yy;\\n'\n )\n template_file.write(' return txt;\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('function initialize(){\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Set image object based on default variables\\n'\n )\n template_file.write(' imageObj = {\\n')\n template_file.write(\n ' variable: \"<?php echo $LeadMean_name; ?>\",\\n'\n )\n template_file.write(' domain: \"'+basin+'\"\\n')\n template_file.write(' };\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change domain based on passed argument, if any\\n'\n )\n template_file.write(' var passed_domain = \"\";\\n')\n template_file.write(' if(passed_domain!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_domain,domains)>=0){\\n'\n )\n template_file.write(\n ' imageObj.domain = passed_domain;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change variable based on passed argument, if any\\n'\n )\n template_file.write(' var passed_variable = \"\";\\n')\n template_file.write(' if(passed_variable!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_variable,variables)>=0){\\n'\n )\n template_file.write(\n ' imageObj.variable = passed_variable;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Populate forecast hour and dprog/dt arrays for this '\n +'run and frame\\n'\n )\n template_file.write(\" populateMenu('variable');\\n\")\n template_file.write(\" populateMenu('domain');\\n\")\n template_file.write(\" populateMenu('maptype')\\n\")\n template_file.write('\\n')\n template_file.write(' //Populate the frames arrays\\n')\n template_file.write(' frames = [];\\n')\n template_file.write(\n ' for(i=minFrame;i<=maxFrame;i=i+incrementFrame)'\n +'{frames.push(i);}\\n'\n )\n template_file.write('\\n')\n template_file.write(\n ' //Predefine empty array for preloading images\\n'\n )\n template_file.write(' for(i=0; i<variables.length; i++){\\n')\n template_file.write(' variables[i].images = [];\\n')\n template_file.write(' variables[i].loaded = [];\\n')\n template_file.write(' variables[i].dprog = [];\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(' //Preload images and display map\\n')\n template_file.write(' preload(imageObj);\\n')\n template_file.write(' showImage();\\n')\n template_file.write('\\n')\n template_file.write(' //Update mobile display for swiping\\n')\n template_file.write(' updateMobile();\\n')\n template_file.write('\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('var xInit = null;\\n')\n template_file.write('var yInit = null;\\n')\n template_file.write('var xPos = null;\\n')\n template_file.write('var yPos = null;\\n')\n template_file.write('\\n')\n template_file.write('</script>\\n')\n template_file.write('\\n')\n template_file.write('</body>\\n')\n template_file.write('</html>\\n')\n template_file.close()", "def dend():\n #\n # this is the omega\n inlist = list(\"end\" + \"\\n\") # WTF?\n #\n # change data into a list element\n outlist[1247:1250] = inlist # place data in the list in the correct place\n outstr = \"\".join(outlist)\n # print(outstr)\n # print(len(outstr))\n # of = open(\"workfile\", \"w\")\n # of.write(outstr)", "def mk_lst_trans_met(self):\n\t\telem_rnge_I = [[21,30],[39,44],[46,48],[74,76],[78,80]]\n\t\telem_rnge=[]\n\t\tfor i in elem_rnge_I:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements", "def generate_ordered_elements(revise_dos,ordered_list):\n my_ordered_elements=''\n for key in ordered_list:\n my_ordered_elements=my_ordered_elements+key+' '\n my_ordered_elements=my_ordered_elements[:len(my_ordered_elements)-1]\n return my_ordered_elements", "def rodape(lst_rodape):\n\n tmp=''\n tmp=''\n tmp+='\\t\\t\\t\\t<lines>3.3cm 2.2cm 19.5cm 2.2cm</lines>\\n'\n tmp+='\\t\\t\\t\\t<setFont name=\"Helvetica\" size=\"8\"/>\\n'\n tmp+='\\t\\t\\t\\t<drawString x=\"3.3cm\" y=\"2.4cm\">' + lst_rodape[2] + '</drawString>\\n'\n tmp+='\\t\\t\\t\\t<drawString x=\"18.4cm\" y=\"2.4cm\">Página <pageNumber/></drawString>\\n'\n tmp+='\\t\\t\\t\\t<drawCentredString x=\"11.5cm\" y=\"1.7cm\">' + lst_rodape[0] + '</drawCentredString>\\n'\n tmp+='\\t\\t\\t\\t<drawCentredString x=\"11.5cm\" y=\"1.3cm\">' + lst_rodape[1] + '</drawCentredString>\\n'\n\n return tmp", "def __rd_tpl_tail(self, fp):\n tlist = []\n tail = \"\"\n while True:\n line = fp.readline()\n if line == \"\":\n break\n tlist.append(line)\n for line in reversed(tlist):\n if line.strip() == \"\":\n tlist.pop()\n else:\n break\n for line in tlist:\n tail += line\n self.template['tail'] = tail\n return", "def _finalize() -> str:\n\t\t\t\t# Get the sub's replacement\n\t\t\t\tstart_pos = sub_element_holder.find(sub_type)\n\t\t\t\tend_pos = other_element_holder.find(\",\", start_pos)\n\t\t\t\tif end_pos == -1:\n\t\t\t\t\tend_pos = len(other_element_holder)\n\t\t\t\treplacement_element_type: str = other_element_holder[\n\t\t\t\t\tstart_pos:end_pos\n\t\t\t\t]\n\t\t\t\t# Ensure filter matches\n\t\t\t\tif replace_filter in replacement_element_type:\n\t\t\t\t\t# Check if replacement creates an equality\n\t\t\t\t\tif sub_element_holder.replace(sub_type, replacement_element_type) == other_element_holder:\n\t\t\t\t\t\t# Return a replacement with both types listed\n\t\t\t\t\t\treturn sub_element_holder.replace(sub_type, \"[{}, {}]\".format(\n\t\t\t\t\t\t\tsub_type,\n\t\t\t\t\t\t\treplacement_element_type,\n\t\t\t\t\t\t))\n\t\t\t\treturn \"\"", "def __str__(self):\n data_string = \"\"\n for list_el in self.data_list:\n for inner_list_el in list_el:\n data_string += str(inner_list_el)\n data_string += \"\\t\"\n data_string += \"\\n\"\n return data_string", "def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string", "def _create_list_item(self, str):\n para = nodes.paragraph()\n para += nodes.strong('', str)\n\n item = nodes.list_item()\n item += para\n\n return item", "def presidente(lst_presidente):\n tmp=''\n tmp+='\\t\\t<para style=\"P3\" spaceAfter=\"35\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n tmp+='\\t\\t<para style=\"P4\"><b>' + str(lst_presidente) + '</b></para>\\n'\n tmp+='\\t\\t<para style=\"P4\">Presidente </para>\\n'\n return tmp", "def create_list(starting, ending):\n pass # remove this line when starting your function", "def _close_list(self):\n list_type = self.current_parent_element['attrs']['class']\n tag = LIST_TYPES[list_type]\n\n html = '</{t}>'.format(\n t=tag\n )\n self.cleaned_html += html\n self.current_parent_element['tag'] = ''\n self.current_parent_element['attrs'] = {}" ]
[ "0.57277954", "0.56510913", "0.5608768", "0.55836725", "0.54954815", "0.5477419", "0.54415685", "0.5341591", "0.5339922", "0.5274645", "0.52173305", "0.5185166", "0.51788926", "0.51774514", "0.5177045", "0.5169892", "0.516534", "0.5163845", "0.51523644", "0.51406246", "0.51371366", "0.5134035", "0.51306945", "0.5129415", "0.5119444", "0.510705", "0.5103431", "0.50821847", "0.506581", "0.5044766" ]
0.6110166
0
Builds the Teams Card Traditional tools for Json formatting do not preserve the template's syntax, So it has to be built manually For card_list, the sensor data is looped to create a table. This adds each sensor to the card and add a comma between them. Once the last run, the comma is skipped and the bracket is closed
def generate_template(self, card_list): with open(self.generator_output, 'w') as gen_file: print(Template.Top_prefix1+self.project+Template.Top_prefix2, file=gen_file) print(Template.sensor_prefix, file=gen_file) _run = len(card_list) _loop = 0 for e in card_list: data_info = Sensor_Data(e[0], e[1], e[2], e[3]) _loop += 1 if _loop != _run: print(str(data_info)+',', file=gen_file) else: print(data_info, file=gen_file) print(Template.sensor_suffix, file=gen_file) print(Template.Link_row_Template1+self.project+Template.Link_row_Template2, file=gen_file) print(Template.button_row_template1+'https://www.google.com/'+Template.button_row_template2, file=gen_file) print(Template.Bot_suffix, file=gen_file) return gen_file.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile_data(self):\n with open(self.store_path, 'a') as file:\n file.write(']')\n with open(self.store_path) as file:\n list_of_lists = file.read()\n card_list = json.loads(list_of_lists)\n return self.generate_template(card_list)", "def construct_cards(tokens: List[Tuple[Token, str]]) -> List[str]:\n md = Render()\n headers: Tuple[str, str, str] = (\"\", \"\", \"\")\n cards: List[str] = []\n\n for token in tokens:\n if token[0] == Token.HEADER_1:\n headers = (token[1], \"\", \"\")\n elif token[0] == Token.HEADER_2:\n headers = (headers[0], token[1], \"\")\n elif token[0] == Token.HEADER_3:\n headers = (headers[0], headers[1], token[1])\n elif token[0] == Token.CARD:\n constructed_headers = Template.assemble_headers(headers)\n body = add_cloze(md.render(token[1]))\n\n if constructed_headers is None:\n cards.append(\n f'{Template.OPEN_CARD}' \n f'{body}'\n f'{Template.CLOSE_CARD}'\n )\n else:\n cards.append(\n f'{Template.OPEN_CARD}' \n f'{constructed_headers}'\n f'<hr>'\n f'{body}'\n f'{Template.CLOSE_CARD}'\n )\n\n return cards", "def doGenCards(self, characterName, cardJSONS: list):\r\n card_svgs = {}\r\n for (card) in cardJSONS:\r\n card_svg = self.ts.getNewCard()\r\n card = self.substituteVariables(card)\r\n for attribute, value in {attr:card[attr] for attr in card if (attr not in Constants.invisibleAttributes)}.items():\r\n node = card_svg.find(f\".//*[@id='{attribute}']\")\r\n if(node.tag == f\"{Constants.ns['svg']}text\"):\r\n if(attribute == 'name'):\r\n node.text = value.title()\r\n else:\r\n node.text = f\"{attribute.upper()}: {value}\"\r\n elif(attribute == 'description'):\r\n list(node)[0].text = value\r\n elif(attribute == 'tags'):\r\n self.genTags(list(node)[0], value)\r\n node = card_svg.find(f\".//*[@id='Character Name']\")\r\n node.text = characterName.upper()\r\n card_svgs[card[\"name\"]] = card_svg\r\n return card_svgs", "def generate(entities_to_proceed):\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"", "def parse_mtgjson_cards(json_data):\n output = []\n for data in json_data.values():\n cards = []\n for raw in data[\"cards\"]:\n c = Card(raw)\n c.image_url = MTGConstants.card_image_url_base.format(c.multiverse_id)\n c.set = data[\"code\"]\n c.set_name = data[\"name\"]\n cards.append(c)\n output = output + cards\n return output", "def formatted_list_timecard_line(timecard_dict):\n\n return '%s %s %s %s %s %s %s ' % (\n timecard_dict['id'],\n timecard_dict['contract']['client']['name'],\n timecard_dict['contract']['employee']['first'],\n timecard_dict['contract']['employee']['last'],\n timecard_dict['date'],\n dt.strptime(timecard_dict['period_start'], DATE_ISO_FORMAT).strftime(DATE_INPUT_FORMAT),\n timecard_dict['period_end']\n )", "def build_series_card(series_metadata_item):\n \n try:\n s_card = dict()\n s_card['title'] = 'Indicator ' + series_metadata_item['IndicatorCode'] + ': ' + series_metadata_item['SeriesDesc'].replace('%','percent')\n s_card['layer_title'] = series_metadata_item['SeriesDesc'].replace('%','percent').replace(',',' ').replace('/',' ')\n \n snippet = s_card['title']\n \n s_card['snippet'] = (snippet[:250] + '..') if len(snippet) > 250 else snippet\n s_card['description'] = \\\n '<p><strong>Series ' + series_metadata_item['SeriesCode'] + ': </strong>' + series_metadata_item['SeriesDesc'] + \\\n '/p>' + \\\n '<p><strong>Indicator ' + series_metadata_item['IndicatorCode'] + ': </strong>' + \\\n series_metadata_item['IndicatorDesc'] + \\\n '</p>' + \\\n '<p><strong>Target ' + series_metadata_item['TargetCode'] + ': </strong>' + \\\n series_metadata_item['TargetDesc'] + \\\n '</p>' + \\\n '<p><strong>Goal ' + series_metadata_item['GoalCode'] + ': </strong>' + \\\n series_metadata_item['GoalDesc'] + \\\n '</p>' + \\\n '<p><em>Release Version: ' + series_metadata_item['SeriesRelease'] + ' </em>'+ \\\n '</p>' \n \n series_tags = series_metadata_item['Tags'][:]\n series_tags.append(series_metadata_item['SeriesRelease'])\n \n s_card['tags'] = series_tags\n \n return s_card\n except:\n print('Unexpected error:', sys.exc_info()[0]) \n return None", "def card_format(card):\n pretty_output = '%s pts: %d powers: %s' % (\n card['name'],\n card['points'],\n ', '.join(card['powers'])\n )\n pretty_output += ' desc: %s' % card['description']\n return pretty_output", "def get_cards_as_string(self):\n return '' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n'.format(*self.get_cards_high_to_low())", "def writeDataCards(opt,sigExp,bkgExp,shapesURL):\n\n #create a card per category\n dcList=[]\n for icat in range(len(opt.categs)):\n cat='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n dcTxt='%s/shapes-parametric.datacard_%s.dat'%(opt.output,cat)\n dcList.append(dcTxt)\n with open(dcTxt,'w') as dc:\n dc.write('#\\n')\n dc.write('# datacard was automatically generated with generateWorkspace.py\\n')\n dc.write('# the options passed are printed below\\n')\n dc.write('# %s\\n'%opt)\n dc.write('#\\n')\n dc.write('imax *\\n')\n dc.write('jmax *\\n')\n dc.write('kmax *\\n')\n dc.write('-'*50+'\\n')\n dc.write('shapes * * {0} $PROCESS_{1} $PROCESS_$SYSTEMATIC\\n'.format(shapesURL,cat))\n dc.write('shapes data_obs * {0} $PROCESS_{1}\\n'.format(shapesURL,cat))\n dc.write('-'*50+'\\n')\n dc.write('bin %s\\n'%cat)\n dc.write('observation -1\\n')\n dc.write('-'*50+'\\n')\n dc.write('%15s %15s %15s\\n'%('bin',cat,cat))\n dc.write('%15s %15s %15s\\n'%('process','sig','bkg'))\n dc.write('%15s %15s %15s\\n'%('process','0', '1'))\n dc.write('%15s %15s %15s\\n'%('rate','%3.2f'%sigExp[icat], '%3.2f'%bkgExp[icat]))\n dc.write('-'*50+'\\n')\n \n #float the background normalization as well as the signal\n dc.write('mu_bkg{0} rateParam {0} bkg 1\\n'.format(cat))\n\n #uncertainties\n dc.write('lumi %8s %15s %15s\\n'%('lnN','1.027','-'))\n dc.write('%s_sigShape %8s %15s %15s\\n'%(cat,'shape','1','-'))\n dc.write('%s_bkgShape %8s %15s %15s\\n'%(cat,'shape','-','1'))\n dc.write('{0} autoMCStats 0.0 1\\n'.format(cat))\n \n print '\\tshapes available @',shapesURL\n print '\\tgenerated the following datacards',dcList", "def generate_card(template, data):\n\t# Look for a loop\n\tloop_start = re.search(\"\\t*{{( *)forall (\\w+)( *)}}\\n\", template)\n\t# If we found a loop, process it\n\tif loop_start:\n\t\t# Find the end of the for loop\n\t\tloop_start_begin = loop_start.start()\n\t\tloop_start_end = loop_start.end()\n\t\tloop_close = re.search(\"\\t*{{( *)endfor( *)}}\\n\", template[loop_start_end:])\n\n\t\tloop_close_begin = loop_start_end\n\t\tloop_close_end = loop_start_end\n\n\t\t# Find the end of the outermost for loop\n\t\twhile loop_close:\n\t\t\tloop_close_begin = loop_close_end + loop_close.start()\n\t\t\tloop_close_end += loop_close.end()\n\t\t\tloop_close = re.search(\"\\t*{{( *)endfor( *)}}\\n\", template[loop_close_end:])\n\n\t\t# Split up the template\n\t\tbeginning = template[:loop_start_begin]\n\t\tmiddle = template[loop_start_end:loop_close_begin]\n\t\tend = template[loop_close_end:]\n\n\t\t# Recursively process the middle and concatenate to the beginning\n\t\t# Pull out the data for the loop from the dictionary\n\t\tkey = loop_start.group(2)\n\t\tdict_ = data[key]\n\t\tfor element in dict_:\n\t\t\tbeginning += generate_card(middle, element)\n\n\t\t# Add back on the end\n\t\ttemplate = beginning + end\n\n\t# Process any remaining simple replacements\n\ttemplate = replace_params(template, data)\n\n\treturn template", "def card_content(card_ids, credential_ids, company_id):\n count = 0\n length = len(card_ids)\n values_cards = ''\n values_creds = ''\n for (a, b) in zip(card_ids, credential_ids):\n count += 1\n values_cards += '(' + a + ', ' + str(company_id) + ', ' + b + ')'\n values_creds += '('+b+')'\n if count != length:\n values_cards += ', '\n values_creds += ', '\n values = {'values_card': values_cards, 'values_cred': values_creds}\n return values", "def _createCardDB(cards, pilotTexts, upgradeTexts, modificationTexts, titleTexts):\n # TODO cardDB should be an object by now instead of a dict\n card_db = {}\n initialisms_db = {}\n ship_db = {}\n\n for name, ship in cards['ships'].items():\n ship_db[name] = '{} ({}/{}/{}/{})'.format(name, ship.get('attack') or 0, ship['agility'], ship['hull'], ship['shields'])\n card_db[cleanName(name)] = '**' + ship_db[name] + '**\\n\\r\\n'\n\n # fill templates\n for pilot in cards['pilotsById']:\n if not cleanName(pilot['name']) in card_db:\n card_db[cleanName(pilot['name'])] = ''\n card_db[cleanName(pilot['name'])] += '**' + '{}'.format(pilot['name']) + '**'\n if 'unique' in pilot:\n card_db[cleanName(pilot['name'])] += ' *'\n card_db[cleanName(pilot['name'])] += '\\n\\r\\n'\n if 'limited' in pilot:\n card_db[cleanName(pilot['name'])] += '^^*limited*\\n\\n'\n if 'ship_override' in pilot:\n card_db[cleanName(pilot['name'])] += '^^Ship: {} ({}/{}/{}/{})'.format(pilot['ship'], pilot['ship_override'].get('attack') or 0, pilot['ship_override']['agility'], pilot['ship_override']['hull'], pilot['ship_override']['shields']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^') + '\\n\\n'\n else:\n card_db[cleanName(pilot['name'])] += ('^^Ship: {}\\n\\n'.format(ship_db[pilot['ship']])).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n card_db[cleanName(pilot['name'])] += '^^Skill: {}\\n\\n^^Points: {}\\n\\n'.format(pilot['skill'], pilot['points']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if pilot['name'].replace('\"','') in pilotTexts:\n card_db[cleanName(pilot['name'])] += ('^^' + pilotTexts[pilot['name'].replace('\"','')]['text'] + '\\n\\n').replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n card_db[cleanName(pilot['name'])] += '\\n\\n'\n log.info('Added %s', card_db[cleanName(pilot['name'])])\n\n for upgrade in cards['upgradesById']:\n if not cleanName(upgrade['name']) in card_db:\n card_db[cleanName(upgrade['name'])] = ''\n card_db[cleanName(upgrade['name'])] += '**' + '{}'.format(upgrade['name']) + '**'\n if 'unique' in upgrade:\n card_db[cleanName(upgrade['name'])] += ' *'\n card_db[cleanName(upgrade['name'])] += '\\n\\r\\n'\n if 'limited' in upgrade:\n card_db[cleanName(upgrade['name'])] += '^^*limited*\\n\\n'\n if 'faction' in upgrade:\n card_db[cleanName(upgrade['name'])] += '^^Faction: {}\\n\\n'.format(upgrade['faction']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if 'slot' in upgrade:\n card_db[cleanName(upgrade['name'])] += '^^Type: {}\\n\\n'.format(upgrade['slot']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if 'attack' in upgrade:\n card_db[cleanName(upgrade['name'])] += '^^Attack: {}\\n\\n'.format(upgrade['attack']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if 'range' in upgrade:\n card_db[cleanName(upgrade['name'])] += '^^Range: {}\\n\\n'.format(upgrade['range']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if 'points' in upgrade:\n card_db[cleanName(upgrade['name'])] += '^^Points: {}\\n\\n'.format(upgrade['points']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if upgrade['name'].replace('\"','') in upgradeTexts:\n card_db[cleanName(upgrade['name'])] += ('^^' + upgradeTexts[upgrade['name'].replace('\"','')]['text'] + '\\n\\n').replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n card_db[cleanName(upgrade['name'])] += '\\n\\n'\n log.info('Added %s', card_db[cleanName(upgrade['name'])])\n\n for modification in cards['modificationsById']:\n if not cleanName(modification['name']) in card_db:\n card_db[cleanName(modification['name'])] = ''\n card_db[cleanName(modification['name'])] += '**' + '{}'.format(modification['name']) + '**'\n if 'unique' in modification:\n card_db[cleanName(modification['name'])] += ' *'\n card_db[cleanName(modification['name'])] += '\\n\\r\\n'\n if 'limited' in modification:\n card_db[cleanName(modification['name'])] += '^^*limited*\\n\\n'\n if 'ship' in modification:\n card_db[cleanName(modification['name'])] += '^^Ship: {}\\n\\n'.format(modification['ship']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if 'points' in modification:\n card_db[cleanName(modification['name'])] += '^^Points: {}\\n\\n'.format(modification['points']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if modification['name'].replace('\"','') in modificationTexts:\n card_db[cleanName(modification['name'])] += ('^^' + modificationTexts[modification['name'].replace('\"','')]['text'] + '\\n\\n').replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n card_db[cleanName(modification['name'])] += '\\n\\n'\n log.info('Added %s', card_db[cleanName(modification['name'])])\n\n for titleText in cards['titlesById']:\n if not cleanName(titleText['name']) in card_db:\n card_db[cleanName(titleText['name'])] = ''\n card_db[cleanName(titleText['name'])] += '**' + '{}'.format(titleText['name']) + '**'\n if 'unique' in titleText:\n card_db[cleanName(titleText['name'])] += ' *'\n card_db[cleanName(titleText['name'])] += '\\n\\r\\n'\n if 'limited' in titleText:\n card_db[cleanName(titleText['name'])] += '^^*limited*\\n\\n'\n if 'ship' in titleText:\n card_db[cleanName(titleText['name'])] += '^^Ship: {}\\n\\n'.format(titleText['ship']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if 'points' in titleText:\n card_db[cleanName(titleText['name'])] += '^^Points: {}\\n\\n'.format(titleText['points']).replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n if titleText['name'].replace('\"','') in titleTexts:\n card_db[cleanName(titleText['name'])] += ('^^' + titleTexts[titleText['name'].replace('\"','')]['text'] + '\\n\\n').replace('(','&#40;').replace('(','&#41;').replace(' ',' ^^')\n card_db[cleanName(titleText['name'])] += '\\n\\n'\n log.info('Added %s', card_db[cleanName(titleText['name'])])\n\n # Create initialisms\n for upgrade in cards['upgradesById']:\n if len(upgrade['name'].split()) > 1:\n log.info('Adding %s initialism for %s', cleanName(''.join(title[0] for title in upgrade['name'].split())), upgrade['name'])\n initialisms_db[cleanName(''.join(title[0] for title in upgrade['name'].split()))] = card_db[cleanName(upgrade['name'])]\n\n for modification in cards['modificationsById']:\n if len(modification['name'].split()) > 1:\n log.info('Adding %s initialism for %s', cleanName(''.join(title[0] for title in modification['name'].split())), modification['name'])\n initialisms_db[cleanName(''.join(title[0] for title in modification['name'].split()))] = card_db[cleanName(modification['name'])]\n\n for titleText in cards['titlesById']:\n if len(titleText['name'].split()) > 1:\n log.info('Adding %s initialism for %s', cleanName(''.join(title[0] for title in titleText['name'].split())), titleText['name'])\n initialisms_db[cleanName(''.join(title[0] for title in titleText['name'].split()))] = card_db[cleanName(titleText['name'])]\n\n for name, text in initialisms_db.items():\n card_db[name] = text\n\n return card_db", "def as_string(self):\n # Remove cards which have no definition\n cards = [card for card in self if card.definition is not None]\n # Remove cards which have no word\n cards = [card for card in cards if card.word]\n return \"\\n\".join([card.get_flashcard() for card in cards])", "def marshall(self):\n try:\n data = [\"x02\"] #start token\n data.extend(ac.getCarState(0, acsys.CS.CurrentTyresCoreTemp)) #0-3 - Core tyre temperatures, Degrees celcius\n data.extend(info.physics.tyreWear) #4-7 #tyre wear\n data.extend(ac.getCarState(0, acsys.CS.DynamicPressure)) #8-11 pressure of each tyre in PSI\n data.extend(ac.getCarState(0, acsys.CS.TyreDirtyLevel)) #12-15 amount of dirt on each tyre\n data.append(ac.getCarState(0, acsys.CS.SpeedMS)) #16 speed in metres/sec\n data.append(ac.getCarState(0, acsys.CS.Gear)) #17 gear number\n data.append(ac.getCarState(0, acsys.CS.BestLap)) #18 best lap time in ms\n data.append(ac.getCarState(0, acsys.CS.RPM)) #19 rpm\n data.append(ac.getCarState(0, acsys.CS.LapCount)) #20 lap count\n data.append(ac.getCarState(0, acsys.CS.LapInvalidated)) #21 is lap invalid? 0-no, 1-yes\n data.append(ac.getCarState(0, acsys.CS.LapTime)) #22 current lap time in ms\n data.append(ac.getCarState(0, acsys.CS.LastLap)) #23 last lap in ms\n data.append(ac.getCarState(0, acsys.CS.PerformanceMeter)) #24 delta time in ms from best lap?? (haven't checked)\n data.append(ac.getCarState(0, acsys.CS.Steer)) #25 steering rotation in radians\n data.append(ac.getCarName(0)) #26 name of car being driven by player\n data.append(ac.getTrackName(0)) #27 track name\n\n data.append(\"x04\") #end token\n except Exception as e:\n ac.console(\"{}\".format(e))\n return \",\".join(str(v) for v in data).encode()", "def make_deck_from_string(string, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n deck_list = []\n while len(string) >= 14:\n x = 'card_' + string[7:9] + '_' + string[10:12]\n card = eval (x)\n if card.card_type == 'monster':\n deck_list.append(Monster(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n attack= card.attack, health= card.health,lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'tactic':\n deck_list.append(Tactic(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'item':\n deck_list.append(Item(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'character':\n deck_list.append(Character(name = card.name,set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n health= card.health,skill_1_lv = card.skill_1_lv, skill_1_type = card.skill_1_type,skill_2_lv = card.skill_2_lv, skill_2_type = card.skill_2_type,skill_3_lv = card.skill_3_lv, skill_3_type = card.skill_3_type))\n\n string = string[14:]\n\n\n return deck_list", "async def deliver_card(list_of_lists: List[List[str]]) -> str:\n\n final_string = []\n for sublist in list_of_lists:\n final_string.append('\\u200A'.join(sublist))\n\n # add blank emoji to first line to accommodate compact mode w/o resizing emojis\n return '<:blank:589560784485613570>\\n' + '\\n'.join(final_string)", "def fill_cards_markup(self, script_manager):\n for card in self.cards:\n card.fill_markup(self._trello, script_manager)", "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def json_market_builder(self, customerID, marketID) :\n json_result = '{\\n'\n json_result += '\\t \"_results\":[\\n'\n json_result += '\\t\\t{ \"customerID\": \"' + str(customerID)\n json_result += ', \"marketID\": \"' + str(marketID)\n json_result += '}\\n'\n json_result += '\\n\\t]\\n}'\n return json_result", "def summary_data(self):\n data = {\n \"total\": self.total,\n \"card_one_value\": self.cards[0].value,\n \"card_two_value\": self.cards[1].value,\n \"card_one_rank\": self.cards[0].rank,\n \"card_two_rank\": self.cards[1].rank,\n \"cards\": \" \".join([str(card) for card in self.cards]),\n \"soft\": int(self.soft),\n \"from_split\": int(self.from_split),\n \"blackjack\": int(self.blackjack),\n \"num_cards\": len(self.cards),\n \"start_total\": self.cards[0] + self.cards[1],\n \"wager\": int(self.wager),\n \"insurance\": int(self.insurance),\n \"surrender\": int(self.surrender),\n \"double_down\": int(self.double_down),\n \"num_aces\": self.num_aces,\n \"num_hard_aces\": self.num_hard_aces\n }\n return data", "def __str__(self):\n\t\t\n\t\tstring = \"{Jokers: \"\n\t\tfor card in self.jokers:\n\t\t\tstring += str(card)+\", \"\n\n\t\tstring += \"}\"\n\n\t\tfor i in range(len(self.grps)):\n\t\t\tstring += \", {group \"+str(i+1)+\": \"\n\t\t\tfor card in self.grps[i]:\n\t\t\t\tstring += str(card)+\", \"\n\t\t\tstring += \"}\"\n\t\treturn string", "def createObsTable(df):\n \n textarea = ''\n \n for idx in range(len(df)):\n # Convert the dataframe ra and dec into Sky Coordinates\n c = SkyCoord(df['ra'].iloc[idx]*u.degree, df['dec'].iloc[idx]*u.degree)\n # Convert RA and DEC into hour-minute-second and degree-minute-second\n ra_hms = c.ra.hms\n dec_dms = c.dec.dms\n # Get the observation time and convert it into a standard format\n date_obs = df['date_obs'].iloc[idx].decode()#[2:-1]\n time_obj = Time(date_obs, format='iso', scale='utc')\n # Convert observation time and sky coords into a string\n if dec_dms.d != 0:\n name = (\" %07i %s %s %s.%s %02i %02i %06.3f%+03i %02i %05.2f W84\\n\" %\n (int(df['visit_id'].iloc[idx]), date_obs[:4], date_obs[5:7], \n date_obs[8:10], str(time_obj.mjd)[6:11],\n ra_hms.h, ra_hms.m, ra_hms.s,\n dec_dms.d, np.abs(dec_dms.m), np.abs(dec_dms.s)))\n else:\n if copysign(1, dec_dms.d) == -1.0:\n dec_dms_d = '-00'\n else:\n dec_dms_d = '+00'\n name = (\" %07i %s %s %s.%s %02i %02i %06.3f%s %02i %05.2f W84\\n\" %\n (df['visit_id'].iloc[idx], date_obs[:4], date_obs[5:7],\n date_obs[8:10], str(time_obj.mjd)[6:11],\n ra_hms.h, ra_hms.m, ra_hms.s,\n dec_dms_d, np.abs(dec_dms.m), np.abs(dec_dms.s)))\n textarea += name\n \n return textarea", "def __str__(self):\n res = []\n for card in self.cards:\n res.append(str(card))\n return \"\\n\".join(res)", "def _getCSVForPerField(self, statistic):\n\n rows = []\n\n chart_json = simplejson.loads(statistic.chart_json)\n description = chart_json['description'] \n header = []\n for item in description:\n header.append(item[-1].encode('utf-8'))\n rows.append(header)\n\n final_stat = simplejson.loads(statistic.final_json)\n for choice, result in final_stat.iteritems():\n row = []\n row.append(unicode(choice).encode('utf-8'))\n for item in result:\n row.append(unicode(item).encode('utf-8'))\n rows.append(row)\n\n return rows", "def device_details_json():\n return [\n {\n \"dateutc\": 1547094300000,\n \"winddir\": 344,\n \"windspeedmph\": 1.6,\n \"windgustmph\": 2.2,\n \"maxdailygust\": 3.4,\n \"tempf\": 34,\n \"hourlyrainin\": 0,\n \"eventrainin\": 0,\n \"dailyrainin\": 0,\n \"weeklyrainin\": 0,\n \"monthlyrainin\": 0,\n \"totalrainin\": 0,\n \"baromrelin\": 30.38,\n \"baromabsin\": 24.89,\n \"humidity\": 49,\n \"tempinf\": 69.6,\n \"humidityin\": 30,\n \"uv\": 0,\n \"solarradiation\": 0,\n \"feelsLike\": 34,\n \"dewPoint\": 16.87,\n \"date\": \"2019-01-10T04:25:00.000Z\",\n },\n {\n \"dateutc\": 1547094000000,\n \"winddir\": 344,\n \"windspeedmph\": 0,\n \"windgustmph\": 0,\n \"maxdailygust\": 3.4,\n \"tempf\": 34,\n \"hourlyrainin\": 0,\n \"eventrainin\": 0,\n \"dailyrainin\": 0,\n \"weeklyrainin\": 0,\n \"monthlyrainin\": 0,\n \"totalrainin\": 0,\n \"baromrelin\": 30.38,\n \"baromabsin\": 24.89,\n \"humidity\": 50,\n \"tempinf\": 69.4,\n \"humidityin\": 29,\n \"uv\": 0,\n \"solarradiation\": 0,\n \"feelsLike\": 34,\n \"dewPoint\": 17.34,\n \"date\": \"2019-01-10T04:20:00.000Z\",\n },\n ]", "def __init__(self, cards_on_table=None, cards=[], hands_list=[]):\n self.cards = cards\n self.cards_on_table = cards_on_table\n self.hands_list = hands_list", "def __str__(self):\n res = []\n for card in self.deckcards:\n res.append(str(card))\n return '\\n'.join(res)", "def __prepare_torsions_contents(torsions: Optional[dict],\n elements: list) -> list:\n\n torsions_contents = []\n\n number_of_torsions = len(torsions) if torsions is not None else 0\n\n torsions_contents.append(\n ' {:^2}'.format(number_of_torsions) +\n ' ! Nr of torsions;' +\n 'at1;at2;at3;at4;;V1;V2;V3;V2(BO);vconj;n.u;n\\n')\n\n if number_of_torsions:\n\n for key, values in torsions.items():\n\n num = ReactiveForceFieldWriter.__get_num_from_str(elements,\n key)\n\n torsions_contents.append(\n ' ' + num + ' ' * 2 +\n str(values['value']).lstrip('[').rstrip(']') +\n '\\n')\n\n return torsions_contents", "def printAll(self, cards, output):\r\n for (card, num) in cards.items():\r\n self.ts.addCards(card, num)\r\n self.ts.writeCards(output)" ]
[ "0.6514272", "0.5683417", "0.561787", "0.5517833", "0.54892814", "0.54420215", "0.5386302", "0.5369797", "0.53519976", "0.5306699", "0.529736", "0.5160479", "0.5063059", "0.50323474", "0.49998054", "0.49891", "0.49889284", "0.49734932", "0.49520928", "0.49148855", "0.4858352", "0.48573944", "0.48540673", "0.48449042", "0.48299438", "0.4822644", "0.48168272", "0.48084313", "0.479315", "0.47868478" ]
0.62450767
1
Convert string date in format 'Aug 2020' to '20200801'
def reformat_date(date_str: str) -> str: if date_str is None or date_str == '': return None [month_key, year] = date_str.split() return f'{year}-{MONTHS[month_key]}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_date_column(datestring):\n return datetime.datetime.strptime(datestring.strip(), \"%b-%Y\").date()", "def convert_date(dt_str, letter_date):\r\n if letter_date:\r\n rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format\r\n rev_date_pts = rev_date.split(\"-\")\r\n year_num = int(rev_date_pts[0])\r\n if year_num > 1999:\r\n year_num = year_num - 100\r\n year_str = str(year_num)\r\n rev_date_pts[0] = year_str\r\n revised = \"-\".join(rev_date_pts)\r\n\r\n else:\r\n revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(\r\n '%Y-%m-%d') # convert date to YY-mm string format\r\n\r\n return revised", "def to_yearmonth(yearmonthdate_str):\n # yearmonth = int(yearmonth_str[:7].replace('-', ''))\n yearmonth = int(yearmonthdate_str[:4] + yearmonthdate_str[5:7])\n return yearmonth", "def preprocess_date(date_):\n if 'JAN' in date_:\n date_ = date_.replace('JAN', '01')\n elif 'FEB' in date_:\n date_ = date_.replace('FEB', '02')\n elif 'MAR' in date_:\n date_ = date_.replace('MAR', '03')\n elif 'APR' in date_:\n date_ = date_.replace('APR', '04')\n elif 'MAY' in date_:\n date_ = date_.replace('MAY', '05')\n elif 'JUN' in date_:\n date_ = date_.replace('JUN', '06')\n elif 'JUL' in date_:\n date_ = date_.replace('JUL', '07')\n elif 'AUG' in date_:\n date_ = date_.replace('AUG', '08')\n elif 'SEP' in date_:\n date_ = date_.replace('SEP', '09')\n elif 'OCT' in date_:\n date_ = date_.replace('OCT', '10')\n elif 'NON' in date_:\n date_ = date_.replace('NON', '11')\n elif 'DEC' in date_:\n date_ = date_.replace('DEC', '12')\n if date_[-2:] > '17':\n date_ = date_[:6] + '19' + date_[-2:]\n else:\n date_ = date_[:6] + '20' + date_[-2:]\n return datetime.strptime(date_, '%d-%m-%Y')", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def filter_project_date(s):\n return datetime.strptime(s, '%Y-%m-%d').strftime('%B %Y')", "def date_to_month(date):\r\n return re.sub(r'(\\d{4}-\\d{2})-\\d{2}', r'\\1', date)", "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def rebuildDate(date):\n parts = date.split(\" \")\n parts[1] = parts[1][:-1]\n eDate = parts[2] + '-' + parts[0] + '-' + parts[1]\n return eDate", "def str_date(date):\n d = str(date).split('-')\n string_date = \"\"\n for i in range(len(d)):\n string_date += d[i]\n return string_date", "def reformat_date(mdy_date_string):\n month, day, year = mdy_date_string.split('/')\n return f\"{year}-{month}-{day}\"", "def str_day_month(s):\n # TODO: Fix the -06:00 time zone offset\n if s:\n d = convert_from_iso(s)\n return datetime.datetime.strftime(d, \"%B %d|%A\").strip(\"0\")\n else:\n # Couldn't parse, return original.\n return s", "def str_to_date(str_input):\n date = str_input.split('-')\n return dt.date(int(date[0]), int(date[1]), int(date[2]))", "def string_to_date(date_string):\n\n return date(int(date_string[:4]),\n int(date_string[5:7]),\n int(date_string[8:10]))", "def format_aa_date(data, format_string='%Y-%m'):\n raw_date = data.split('~')[0]\n return datetime.strptime(raw_date, format_string)", "def as_date(inp):\n \n out = datetime.datetime.strptime(str(inp), \"%Y%m\")\n out = out.replace(day = 28) + datetime.timedelta(days=4)\n \n return out - datetime.timedelta(days = out.day)", "def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)", "def formatDate(string):\n splitStr = re.split('-',string)\n return int(splitStr[0]+'12'+splitStr[2])", "def str_2_date(str_date):\n str_format = \"%m/%d/%y\"\n return datetime.strptime(str_date, str_format)", "def convert_str_date(date, current_pattern, output_pattern):\n assert isinstance(date, str)\n assert isinstance(current_pattern, str)\n assert isinstance(output_pattern, str)\n\n dt = datetime.datetime.strptime(date, current_pattern).date()\n\n dt = dt.strftime(output_pattern)\n return dt", "def reformat_date(mdy_date_string):\n date = mdy_date_string.split('/')\n return f\"{date[2]}-{date[0]}-{date[1]}\" # difficult to read", "def _str_to_date(self, date):\n return datetools.date_parser(date)", "def _get_date_from_str(date_input):\r\n return datetime.datetime.strptime(date_input.strip(), \"%Y-%m-%d\").replace(tzinfo=pytz.UTC)", "def datestr_to_yearday(date_str):\n date = datetime.datetime.strptime(date_str, '%b %d %Y')\n day_of_year = get_month_day_offset(date.month, date.year) + date.day\n return '%d.%03d' % (date.year, day_of_year)", "def convert(date):\n converted_date = datetime.datetime.strptime(date, \n \"%Y-%m-%d\").date()\n return converted_date", "def date(string): \n return string[17:19]+\"/\"+string[15:17]+\"/\"+string[11:15]", "def str2date(date):\n return datetime.datetime.strptime(date, \"%m/%d/%Y\").date()", "def compute_date(date_text):\n dt = None\n if date_text and len(date_text) == 8:\n try:\n dt = datetime.datetime.strptime(date_text, '%m%d%Y')\n except ValueError:\n pass\n return dt", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def convert_date(year: str, week: str):\n date = datetime.fromisocalendar(int(year), int(week), 1)\n return date.strftime(\"%m/%d/%YZ\")" ]
[ "0.63704276", "0.6331814", "0.63243455", "0.619683", "0.61956924", "0.59415126", "0.58787394", "0.5870436", "0.58346206", "0.58210987", "0.57984483", "0.57928306", "0.57660526", "0.5744133", "0.5704916", "0.56917703", "0.56885225", "0.5663735", "0.5614924", "0.5588107", "0.5576284", "0.55649745", "0.55514926", "0.55237764", "0.5509453", "0.55006635", "0.54948884", "0.54948384", "0.5468333", "0.5459249" ]
0.6506631
0
add quote to string
def add_quote(item): if type(item) == str: return "\'" + item + "\'" else: return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quot(string):\r\n return string.replace('\"', \"'\")", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v", "def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def csv_quote_escape(self, the_string):\n the_string = the_string.replace('\"', r'\"\"')\n\n the_string = '\"' + the_string + '\"'\n\n return the_string", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def escape_quotes(self, val):\n if val.startswith(self.quote) and val.endswith(self.quote):\n # make sure any previously escaped quotes are not re-escaped\n middle = val[1:-1].replace(\"\\\\\" + self.quote, self.quote)\n middle = middle.replace(self.quote, \"\\\\\" + self.quote)\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n return val", "def _escapeString(self, value):\n if '\"' in value and \"'\" in value:\n substrings = value.split(\"\\\"\")\n result = [\"concat(\"]\n for substring in substrings:\n result.append(\"\\\"%s\\\"\" % substring)\n result.append(\", '\\\"', \")\n result = result[0:-1]\n if value.endswith('\"'):\n result.append(\", '\\\"'\")\n return \"\".join(result) + \")\"\n\n if '\"' in value:\n return \"'%s'\" % value\n return \"\\\"%s\\\"\" % value", "def quote(s):\n # Based on shlex.quote. Bun unlike shlex, it quotes every string and\n # not just the ones that contain unsafe characters.\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def qstring(self, s):\n\n if '\"' in s or ' ' in s or '\\\\' in s:\n return '\"' + s.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"') + '\"'\n else:\n return s", "def quote(s):\n\n\ts = \"'\" + s.replace(\"'\", \"\"\"'\"'\"'\"\"\") + \"'\"\n\n\t#get rid of gratuitous leading and trailing empty strings\n\tif s.startswith(\"''\"): s = s[2:]\n\tif s.endswith(\"''\"): s = s[:-2]\n\n\treturn s", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def SingleQuote(s):\n return pipes.quote(s)", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def quote(m):\n return '\"' + m + '\"'", "def _escape_string(s, surrounding_quote='\"'):\n s = s.replace('\\\\', '\\\\\\\\')\n if surrounding_quote == '\"':\n s = s.replace('\"', r'\\\"')\n if surrounding_quote == \"'\":\n s = s.replace(\"'\", r\"\\'\")\n return s", "def quote(value):\n return DoubleQuotedScalarString(value)", "def encodeLiteral(self, string):\r\n return string.replace(\"'\",\"''\")", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def Quote(s):\n if not nonnormal_char_re.search(s):\n return s # no quoting necessary\n slist = []\n for char in s:\n if nonnormal_char_re.search(char):\n slist.append(\"\\\\x%02x\" % ord(char))\n else:\n slist.append(char)\n return '\"%s\"' % \"\".join(slist)", "def quoted(val: str) -> str:\n return f'\"{val}\"' if ' ' in val else val", "def QuotedEscaped (s):\n return repr(s)", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return \"'\" + s + \"'\"\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):\n string = string.replace('\"', '\"\"')\n\n if forceDoubleQuote:\n string = '\"' + string + '\"'\n return string", "def argument_quote(argument):\n argument = argument.replace('\"', '\"\"')\n if ' ' in argument:\n argument = argument.replace(\"'\", \"''\")\n argument = \"'\" + argument + \"'\"\n return argument", "def embeded_triple_quotes():\n pass", "def quote(value):\n single = value.find(\"'\")\n double = value.find('\"')\n multiline = value.find('\\n') != -1\n if multiline or ((single != -1) and (double != -1)):\n if value.find('\"\"\"') == -1 and value[0] != '\"' and value[-1] != '\"':\n s = '\"\"\"%s\"\"\"' % value\n else:\n s = \"'''%s'''\" % value\n elif (single != -1) and (double == -1):\n s = '\"%s\"' % value\n else:\n s = \"'%s'\" % value\n return s" ]
[ "0.82810366", "0.7859846", "0.7793372", "0.77169293", "0.7604381", "0.7604381", "0.7577342", "0.75539434", "0.75435525", "0.7453465", "0.7444847", "0.7441447", "0.7387246", "0.7360612", "0.7244319", "0.72439015", "0.7235581", "0.71914256", "0.7133501", "0.7128411", "0.70995504", "0.70525205", "0.70134044", "0.70098263", "0.7007605", "0.70062554", "0.69718754", "0.6963915", "0.6934899", "0.6927298" ]
0.7902357
1
Goes up the indicated number of levels and returns the equivalent of calling locals() in that scope
def parent_vars(level, extra_vars = None): try: 1/0 except: frame = sys.exc_traceback.tb_frame # Go up in the frame stack for i in range(level+1): frame = frame.f_back loc, glob = frame.f_locals, frame.f_globals if extra_vars != None: loc = loc.copy() for key in extra_vars.keys(): loc[key] = extra_vars[key] return loc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stack():\n return currentframe().f_back.f_locals.setdefault(SN, [])", "def get_local(self, where: Any, depth: int = 0) -> Any:\n return self.scope_stack[depth][where]", "def _resolve_this(self, levels):\n if hasattr(\n sys, \"_getframe\"\n ): # implementation detail of CPython, speeds up things by 100x.\n desired_frame = sys._getframe(3)\n contracted = desired_frame.f_locals[\"self\"]\n else:\n call_frame = inspect.stack(0)[3]\n contracted = call_frame[0].f_locals[\"self\"]\n ref = contracted.ref.split(\".\")\n\n # (0=module, 1=module's parent etc.)\n level_offset = levels - 1\n traversed = self\n for i in range(len(ref) - level_offset):\n traversed = getattr(traversed, ref[i])\n return traversed", "def retr():\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n return stack[-1]", "def getLevel(unique_name):", "def fetch_locals(self, upcount=1):\n\n frame = inspect.currentframe()\n i = upcount\n while True:\n if frame.f_back is None:\n break\n frame = frame.f_back\n i -= 1\n if i == 0:\n break\n\n for k, v in frame.f_locals.items():\n self.__dict__[k] = v", "def _traverse_stack_for(t: type):\n for fr in inspect.stack():\n frame = fr.frame\n try:\n locals = frame.locals\n except AttributeError:\n # idk\n continue\n else:\n for object in locals.values():\n if type(object) is t:\n return object\n finally:\n # prevent reference cycles\n del fr", "def get(self, name, level=None):\n level = level or self.local_variables\n assert isinstance(level, EnvironmentLevel)\n\n level, index = self.__locate__(name, level)\n if not level or index < 0:\n return None # TODO throw?\n else:\n return level.expressions[index]", "def lvl_algo(next_level):\n total_xp_needed = (next_level * next_level)\n return total_xp_needed", "def extractLocals(trcback):\n\n\toutput = []\n\tstack = extractStack(getInnerMostFrame(trcback))\n\tfor frame, fileName, lineNumber, name, context, index in stack:\n\t\targsNames, nameless, keyword = extractArguments(frame)\n\t\targuments, namelessArgs, keywordArgs, locals = OrderedDict(), [], {}, {}\n\t\tfor key, data in frame.f_locals.iteritems():\n\t\t\tif key == nameless:\n\t\t\t\tnamelessArgs = map(repr, frame.f_locals.get(nameless, ()))\n\t\t\telif key == keyword:\n\t\t\t\tkeywordArgs = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())\n\t\t\telif key in argsNames:\n\t\t\t\targuments[key] = repr(data)\n\t\t\telse:\n\t\t\t\tlocals[key] = repr(data)\n\t\toutput.append(((name, fileName, lineNumber), (arguments, namelessArgs, keywordArgs, locals)))\n\treturn output", "def __getitem__(self, item):\r\n current = self\r\n while current is not None:\r\n if item in current.locals:\r\n return current.locals[item]\r\n current = current.parent", "def getLevels():", "def get_level_profile(n, l):\r\n c, p, q = (n, 1, 0)\r\n for i in range(l):\r\n c, p, q = get_next_level(c, p, q)\r\n return (c, p, q)", "def get_initial_level(self, meta, raven_vars, dispatch, t):\n res = self.get_resource()\n request = {res: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n return self._initial_stored.evaluate(inputs, target_var=res)[0][res]", "def my_previous_function():\n global level\n if level ==2:\n level -= 1\n elif level == 3:\n level -=2", "def print_vars():\n try:\n frame = sys._getframe()\n except ValueError:\n print(f\"Value error\")\n return\n\n prev_frame = frame.f_back\n if prev_frame is not None:\n local_vars = prev_frame.f_locals\n for local_name, local_val in local_vars.items():\n print(f\"{local_name}: {type(local_val).__module__ == 'builtins'}\")", "def get_level(level_name):\n return LEVELS[level_name.upper()]", "def do_showlocals(self, line):\n if(Rsp.state != STOPPED):\n self.output = \"Command only possible during STOPPED-state.\"\n return\n curfunc = get_func(Rsp.pc) \n self.output = \"Funktion:%s\"%curfunc\n stackmap = funcmap[curfunc].stacklocals\n regmap = funcmap[curfunc].reglocals\n for var in stackmap:\n self.output += \"%s:%s\\n\"%(var, typemap[stackmap[var].type].name) \n for var in regmap:\n self.output += \"%s:%s\\n\"%(var, typemap[regmap[var].type].name)", "def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)", "def depth_from_indentation(function):\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap", "def format_locals(sys_exc_info):\n\n current_tb = sys_exc_info[-1]\n while current_tb:\n next_tb = current_tb.tb_next\n if not next_tb:\n frame_locals = current_tb.tb_frame.f_locals\n return pprint.pformat(frame_locals)\n current_tb = next_tb", "def test_function(n, m,level=10):\n # call another function so the stack is bigger\n if level > 0:\n return test_function(n,m,level=level-1)\n else:\n return test_function2(m, n)", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def get_stkvar(*args):\n return _ida_frame.get_stkvar(*args)", "def resolve(self, expr: loxExprAST.Expr, depth: int) -> None:\n self.locals[expr] = depth", "def resolve_top_level(*_):\n # pylint: disable=attribute-defined-outside-init\n data = TopLevel()\n subleaf1 = SubLeaf()\n subleaf1.value = \"subleaf1\"\n subleaf2 = SubLeaf()\n subleaf2.value = \"subleaf2\"\n leaf = Leaf()\n leaf.leaflets = [subleaf1, subleaf2]\n leaf.value = \"some leaf value\"\n data.leaf = leaf\n data.name = \"top level name\"\n return data", "def get_levels(self):\n return self.levels[self.game]", "def get_level_name(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n i = this_idx\n cw = cwd\n pp = \"\"\n while i >= target_idx:\n cw, pp = os.path.split(cw)\n i -= 1\n return pp", "def depth_from_match(function):\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #print self._current_node\n self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap", "def init_locals(self):\n pass" ]
[ "0.6399998", "0.6136883", "0.5983501", "0.59237546", "0.5805225", "0.5802206", "0.57395476", "0.5731738", "0.5708003", "0.5674699", "0.5579075", "0.5545951", "0.549061", "0.5461867", "0.54421484", "0.54293317", "0.5416329", "0.538953", "0.5357531", "0.53261524", "0.52594477", "0.5244545", "0.5200878", "0.5193431", "0.5179439", "0.5150772", "0.5134791", "0.5083639", "0.5058928", "0.50513536" ]
0.72129375
0
Takes a chunk of HTML, looks for a select, and outputs a list of options of that select. Looks only at value='', not at the pretty display.
def get_select_options(html, select_name): # a regular expression to match the select block pattern = re.compile('<select *name="%s"[^>]*>(.*?)</select>' % select_name, re.MULTILINE | re.IGNORECASE | re.DOTALL) m = pattern.search(html) if (m == None): # we have no match, try another pattern pattern = re.compile('<select *name=%s[^>]*>(.*?)</select>' % select_name, re.MULTILINE | re.IGNORECASE | re.DOTALL) m = pattern.search(html) select_block = m.group() # extract the options from the select block pattern = re.compile('<option[^>]*value="(.*?)">(.*?)</option>', re.IGNORECASE) options = pattern.findall(select_block) return options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def htmlSelect(labelText, parName, args, choiceList, hint=None, descriptionSeparator='::',\n labelAttr='', attr=''):\n snippet = htmlLabel(labelText,parName,labelAttr)\n default = args[parName] if parName in args else ''\n if not isinstance(default,list):\n default = [default]\n snippet += '<select name=\"%s\"%s>\\n' % (parName,sep(attr))\n if hint:\n snippet += '<option value=\"\">%s</option>\\n' % hint\n for c in choiceList:\n p = c.split(descriptionSeparator)\n if len(p)==2:\n (desc,val) = p\n else:\n (desc,val) = (c,c)\n if val in default:\n snippet += '<option selected=\"yes\" value=\"%s\">%s</option>\\n' % (val,desc)\n else:\n snippet += '<option value=\"%s\">%s</option>\\n' % (val,desc)\n snippet += '</select>\\n'\n return snippet", "def get_section_choices(sections):\n ret = []\n if sections == None:\n return ret\n sections = string.splitfields(decode_html(sections), '\\n')\n for s in sections :\n s = string.strip(s)\n ret.append((s, s))\n return ret\n # if s != '':\n # yield(encode_html(s), s)", "def find_opts_linux(soup, header):\n\n # Get the source line of the header\n header_el = soup.find(id=header)\n if header_el is None:\n return set()\n header_source_line = soup.find(id=header).sourceline\n\n # Get the element where the options are described\n opts_el = [pre for pre in soup.find_all('pre') if pre.sourceline == header_source_line][0]\n\n opts_lines = opts_el.text.split('\\n')\n opts_lines = [line.lstrip().split(maxsplit=1)[0] for line in opts_lines if line]\n opts = [line for line in opts_lines if line[0] == '-' and line != '-']\n\n # Remove false positives\n opts = {o for o in opts if not o[-1] in NON_OPTS_CHARS}\n\n return opts", "def Options(current, mValues):\n s = \"\"\n pairs = mValues.items()\n pairs.sort(lambda x,y: cmp(x[1], y[1]))\n try:\n for pair in pairs:\n s += '\\n<option %svalue=\"%s\">%s</option>' % \\\n (pair[1] == current and 'selected=\"selected\" ' or '', pair[1], pair[0])\n except Exception, e:\n logging.info(\"Options error %r\" % e)\n return \"<!-- Select Error-->\"\n return safestring.SafeString(s)", "def render_to_html(env_spec_list):\n if not env_spec_list:\n return []\n html_output = \"\"\n\n for env_spec_entry in env_spec_list:\n if env_spec_entry[\"choices\"] is None:\n ret_str = render_label(env_spec_entry)\n ret_str += render_input(env_spec_entry)\n else:\n ret_str = render_label(env_spec_entry)\n ret_str += f'<select id=\"env_spec_{env_spec_entry[\"name\"].lower()}\" name=\"{env_spec_entry[\"name\"].lower()}\">\\n'\n\n for choice in env_spec_entry[\"choices\"]:\n ret_str += render_choice(\n choice, choice == env_spec_entry[\"default_value\"]\n )\n ret_str += \"</select>\\n\"\n\n if env_spec_entry[\"comment\"] is not None:\n ret_str += f\"<small>{env_spec_entry['comment']}</small>\\n\"\n\n html_output += ret_str\n return html_output", "def test_render_none(self):\n self.check_html(\n self.widget(choices=((\"\", \"Unknown\"),) + self.beatles),\n \"beatles\",\n None,\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"\">Unknown</option>\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def _get_select_options(self, d):\n\t\top = d.options.split('\\n')\n\t\tif len(op) > 1 and op[1][:4].lower() == 'sql:':\n\t\t\t# Execute the sql query\n\t\t\tquery = op[1][4:].replace('__user',\n\t\t\t\t\t\twebnotes.session.get('user'))\n\t\telse:\n\t\t\t# Extract DocType and Conditions\n\t\t\t# and execute the resulting query\n\t\t\tdt = op[0][5:].strip()\n\t\t\tcond_list = [cond.replace('__user',\n\t\t\t\twebnotes.session.get('user')) for cond in op[1:]]\n\t\t\tquery = \"\"\"\\\n\t\t\t\tSELECT name FROM `tab%s`\n\t\t\t\tWHERE %s docstatus!=2\n\t\t\t\tORDER BY name ASC\"\"\" % (dt,\n\t\t\t\tcond_list and (\" AND \".join(cond_list) + \" AND \") or \"\")\n\t\ttry:\n\t\t\topt_list = webnotes.conn.sql(query)\n\t\texcept:\n\t\t\t# WARNING: Exception suppressed\n\t\t\topt_list = []\n\n\t\treturn opt_list", "def load_select_options(self, doclist):\n\t\tfor d in doclist:\n\t\t\tif (d.doctype == 'DocField' and d.fieldtype == 'Select' and\n\t\t\t\td.options and d.options[:5].lower() == 'link:'):\n\t\t\t\t\n\t\t\t\t# Get various options\n\t\t\t\topt_list = self._get_select_options(d)\n\n\t\t\t\topt_list = [''] + [o[0] or '' for o in opt_list]\n\t\t\t\td.options = \"\\n\".join(opt_list)", "def test_option_selection(self):\r\n\r\n # Create options 0-4 and select option 2\r\n self.context['options_value'] = [2]\r\n self.context['options'] = [\r\n {'id': id_num,\r\n 'choice': 'correct',\r\n 'description': '<p>Unescaped <b>HTML {0}</b></p>'.format(id_num)}\r\n for id_num in range(0, 5)]\r\n\r\n xml = self.render_to_xml(self.context)\r\n\r\n # Expect that each option description is visible\r\n # with unescaped HTML.\r\n # Since the HTML is unescaped, we can traverse the XML tree\r\n for id_num in range(0, 5):\r\n xpath = \"//span[@data-id='{0}']/p/b\".format(id_num)\r\n self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False)\r\n\r\n # Expect that the correct option is selected\r\n xpath = \"//span[contains(@class,'selected')]/p/b\"\r\n self.assert_has_text(xml, xpath, 'HTML 2', exact=False)", "def test_dbpa003_select(dash_duo):\n app = Dash()\n\n options = {\n \"OptionA\": \"Option 1\",\n \"OptionB\": \"Option 2\",\n \"OptionC\": \"Option 3\",\n }\n\n value = \"OptionB\"\n\n with_keywords = Select(\n options=options,\n value=value,\n id=\"with-keywords\",\n )\n without_keywords = Select(options, value, id=\"without-keywords\")\n\n app.layout = html.Div([with_keywords, without_keywords])\n\n dash_duo.start_server(app)\n\n # Check values\n assert [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ] == [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ]\n\n # Check labels\n assert [\n a.text\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ] == [\n a.text\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ]", "def test_render_value_label(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"John\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def create_select(qualifier, lines, select_id=None):\n options = {} #{ option : [Label]}\n for label in lines.keys():\n option = qualifier(label)\n if (option not in options):\n options[option] = []\n options[option].append(label)\n option_list = list(options.keys())\n option_list.sort()\n print '<select class=\"lines\"',\n if select_id is not None:\n print 'id=%s' % qa(select_id)\n print 'multiple=\"true\" size=\"10\" onchange=\"updateSvg();\">'\n for option in option_list:\n print '<option value=' + qa('[' + \n reduce(lambda x,y:x+json.dumps(str(y))+',',options[option],\"\")[0:-1]\n + ']') + '>'+qe(option)+'</option>'\n print '</select>'", "def parse_options(options):\r\n # convert single quotes inside option values to html encoded string\r\n options = re.sub(r\"([a-zA-Z])('|\\\\')([a-zA-Z])\", r\"\\1&#39;\\3\", options)\r\n options = re.sub(r\"\\\\'\", r\"&#39;\", options) # replace already escaped single quotes\r\n # parse the set of possible options\r\n lexer = shlex.shlex(options[1:-1].encode('utf8'))\r\n lexer.quotes = \"'\"\r\n # Allow options to be separated by whitespace as well as commas\r\n lexer.whitespace = \", \"\r\n\r\n # remove quotes\r\n # convert escaped single quotes (html encoded string) back to single quotes\r\n tokens = [x[1:-1].decode('utf8').replace(\"&#39;\", \"'\") for x in lexer]\r\n\r\n # make list of (option_id, option_description), with description=id\r\n return [(t, t) for t in tokens]", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def print_options(val, cur_matches):\n print val\n\n #skip one to print none at end\n for i,v in enumerate(cur_matches[1:]):\n print \"[%i] %s : %s \"%(i+1, v[0], v[1])\n print \"[%i] %s : %s \" % (0, cur_matches[0][0], cur_matches[0][1])\n\n print \n print 'Choice?'", "def options(self):\n\n select = self._get_selenium_select()\n options = []\n\n if select:\n\n for option in select.options:\n options.append(option.text.encode('ascii', 'ignore'))\n\n return options", "def do_select(self, line):\n xpath, option = split_args(line)\n e = self._find_element_by_xpath(xpath)\n select = Select(e)\n select.select_by_value(option)", "def get_portions(self):\n try:\n portions = self.soup.find(class_=\"portions\")\n self.portions = portions.find(\"option\", selected=True).text.strip()\n except Exception:\n current_app.logger.error(f\"Could not extract portions: {traceback.format_exc()}\")\n self.portions = \"\"", "def select(action, object_='', options=[], selection=None):\n html = u'<select '\n html += u'name=\"'+action+object_+'\" '\n if action and object_:\n html += u'onchange=\"submitLink(\\''+action+'\\', \\''+object_+'\\');\"'\n html += u'>\\n'\n for option, value in options:\n html += u' <option value=\"'+unicode(value)+'\" '\n if value == selection:\n html += u'selected=\"selected\" '\n html += u'>'\n html += option\n html += u'</option>\\n'\n html += u'</select>\\n'\n return html", "def options(self):\n if self._ast:\n for option in self._ast[1]:\n yield option", "def extract_classes(soup):\r\n select = soup.find('select', id='dnn_ctr11396_TimeTableView_ClassesList')\r\n return {option['value']: option.text for option in select.findChildren('option')}", "def select(input, output, fields, delimiter, encoding, verbose, format_in, format_out, zipfile, filter):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['format_out'] = format_out\n options['zipfile'] = zipfile\n options['filter'] = filter\n acmd = Selector()\n acmd.select(input, options)\n pass", "def form_SequenceOfStructuresWithSelects(request):\n substructure = schemaish.Structure()\n substructure.add( 'a', schemaish.String() )\n substructure.add( 'b', schemaish.String() )\n\n schema = schemaish.Structure()\n schema.add( 'myList', schemaish.Sequence( substructure ))\n\n form = formish.Form(schema, 'form')\n\n options = [('a',1),('b',2),('c',3)]\n form['myList.*.b'].widget = formish.SelectChoice(options)\n\n form.defaults = {'myList': [{'a':'foo','b':'b'}]}\n return form", "def test_multiple_options_same_value(self):\n self.check_html(\n self.widget(choices=self.numeric_choices),\n \"choices\",\n [\"0\"],\n html=(\n \"\"\"<select multiple name=\"choices\">\n <option value=\"0\" selected>0</option>\n <option value=\"1\">1</option>\n <option value=\"2\">2</option>\n <option value=\"3\">3</option>\n <option value=\"0\" selected>extra</option>\n </select>\"\"\"\n ),\n )", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def _parse_v_option(self, line):\n if self._regex_helper.search_compiled(W._re_v_option, line):\n self.current_ret['RESULT'].append(self._regex_helper.group(\"V_OPTION\"))\n raise ParsingDone", "def get_group_options(self, table_id):\n table_url = self.base_url + \"/table?table=\" + str(table_id)\n html_text = self.fetch(table_url)\n restrict_group_select = re.search(r'<select id=\"restrictToGroup\">([\\s\\S]*?)<\\/select>', html_text)[0]\n options = re.findall(r'\"(\\d*)\">([^<]*)', restrict_group_select)\n return options", "def render_options(self, *args):\n selected_choices_arg = 1 if VERSION < (1, 10) else 0\n\n # Filter out None values, not needed for autocomplete\n selected_choices = [c for c in args[selected_choices_arg] if c]\n\n if self.url:\n all_choices = copy.copy(self.choices)\n self.choices += [ (c, c) for c in selected_choices ]\n self.filter_choices_to_render(selected_choices)\n\n html = super(WidgetMixin, self).render_options(*args)\n\n if self.url:\n self.choices = all_choices\n\n return html", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def _split_into_body_and_options(\n section_content: str,\n) -> Tuple[str, Optional[str], Dict[int, bool]]:\n lines = section_content.strip().splitlines()\n\n skipif_expr = None\n flag_settings = {}\n i = 0\n for line in lines:\n stripped = line.strip()\n if _OPTION_SKIPIF_RE.match(stripped):\n skipif_match = _OPTION_SKIPIF_RE.match(stripped)\n assert skipif_match is not None\n skipif_expr = skipif_match.group(1)\n i += 1\n elif _OPTION_DIRECTIVE_RE.match(stripped):\n directive_match = _OPTION_DIRECTIVE_RE.match(stripped)\n assert directive_match is not None\n option_strings = directive_match.group(1).replace(\",\", \" \").split()\n for option in option_strings:\n if (\n option[0] not in \"+-\"\n or option[1:] not in doctest.OPTIONFLAGS_BY_NAME\n ):\n raise ValueError(f\"doctest has an invalid option {option}\")\n flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]]\n flag_settings[flag] = option[0] == \"+\"\n i += 1\n elif stripped == \":hide:\":\n i += 1\n else:\n break\n\n if i == len(lines):\n raise ValueError(\"no code/output\")\n\n body = \"\\n\".join(lines[i:]).lstrip()\n if not body:\n raise ValueError(\"no code/output\")\n\n if i and lines[i].strip():\n # no newline between option block and body\n raise ValueError(f\"invalid option block: {section_content!r}\")\n\n return body, skipif_expr, flag_settings" ]
[ "0.58957684", "0.5724786", "0.5716659", "0.57085514", "0.568741", "0.56390756", "0.5558695", "0.5549973", "0.54741323", "0.5450411", "0.5365093", "0.5349225", "0.53060424", "0.5284998", "0.5204723", "0.5121547", "0.51064837", "0.50677323", "0.5058194", "0.5057655", "0.50476235", "0.50248927", "0.49957404", "0.49950442", "0.49734133", "0.4931061", "0.49275583", "0.49027115", "0.48950797", "0.4894131" ]
0.5956624
0
Make a string safe for a CSV field
def csv_safe(string): # let's backslash all the quotation marks anyways string = str(string) string = string.replace('"','\\"') if "," not in string and "\n" not in string: return string return '"' + string + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_friendly_string(input):\n #Convert to string and strip of whitespace\n s = strip(str(input))\n #Newlines are bad\n s = replace(s, '\\n', \" \")\n s = replace(s, '\\r', \" \")\n #Replace double quotes with double-double\n s = replace(s, '\"', '\"\"')\n\n #Add quotes if needed\n found = [(find(s,x)>0) for x in ' \",']\n if any(found):\n s = '\"' + s + '\"'\n\n# #Add quotes if needed\n# for x in s:\n# if not x in (string.digits + string.ascii_letters):\n# #Found a non-alphanumeric character\n# s = '\"' + s + '\"'\n# break\n \n return s", "def CsvEscape(text):\n if not text:\n return ''\n if text.find('\"') > -1:\n text = text.replace('\"', '\"\"')\n if (text == '' or text.find(',') > -1 or text.find('\"') > -1 or\n text.find('\\n') > -1 or text.find('\\r') > -1 or text[0] == ' ' or\n text[-1] == ' '):\n text = '\"%s\"' % text\n return text", "def clean_csv_value(value):\n\n # replace None with \\N\n if value is None:\n return r'\\N'\n # convert value to string and add extra '\\' to '\\n'\n return str(value).replace('\\n', '\\\\n')", "def csv_quote_escape(self, the_string):\n the_string = the_string.replace('\"', r'\"\"')\n\n the_string = '\"' + the_string + '\"'\n\n return the_string", "def _validate_value(value : Any) -> str:\n if isinstance(value, str):\n value = value.replace(\"'\", \"''\")\n return f'\"{value}\"'\n return f\"{value}\"", "def _scrub(self, string):\n if not string.isalnum():\n raise ValueError(\"Table name cannot include non-alphanumerics.\")\n return string", "def cleaned_string(val):\r\n return urllib.quote_plus(smart_str(val))", "def sanitaze(field):\n return re.sub('[^0-9a-zA-Z]+', '-', str(field))", "def __call__(self, value): # noqa: D102\n if not isinstance(value, str):\n raise ValueError(f\"Input value must be a string. '{value}' is not.\")\n\n raw_value = value\n for c in self.remove_characters:\n value = value.replace(c, \"\")\n if not bool(re.match(f\"^[{self.allowed_characters},]+$\", value)):\n raise ValueError(\n f\"Input must only contain values '{self.allowed_characters},'. '{raw_value}' does not.\"\n )\n if not bool(\n re.match(\n f\"^([{self.allowed_characters}],)+[{self.allowed_characters}]$\", value\n )\n ):\n raise ValueError(\n f\"Input must have format '(?,?,?,?)'. '{raw_value}' does not.\"\n )\n if not all([c in value for c in self.required_characters]):\n raise ValueError(\n f\"Input must contain {self.required_characters}. '{raw_value}' does not.\"\n )\n return raw_value", "def default_cleaner_fn(fld):\n if (isinstance(fld, str)):\n return re.sub(\"[\\\"\\'\\\\\\\\]\", \"\", fld) # remove quotes and backslashes\n else:\n return fld", "def formatFromCSV(String):\r\n #CSV files strip all leading 0s from numbers\r\n if(len(String)) == 4:\r\n return String\r\n elif(len(String)) == 3:\r\n return (\"0\" + String).strip()\r\n elif(len(String)) == 2:\r\n return (\"00\" + String).strip()\r\n elif(len(String)) == 1:\r\n return (\"000\" + String).strip()", "def test_init_with_field_dict_and_custom_field_separator(self):\n fields = {\n 'Column 1': 'a=${aaa}',\n 'Column 2': 'b=${bbb}',\n 'Column 3': 'c=${ccc}',\n }\n csv_formatter = CSVFormatter(fields=fields, sep=\" || \")\n csv = csv_formatter.format_records(self.records)\n\n csv_expected = textwrap.dedent(\"\"\"\\\n #Column 1 || Column 2 || Column 3\n a=foobar_01 || b=8 || c=4898FE19\n a=foobar_02 || b=160 || c=5825D187\n a=foobar_03 || b=99 || c=3648A436\n \"\"\")\n\n assert csv == csv_expected", "def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)", "def ToCsv(self):\n\n def csv_helper(the_dict, the_field):\n if the_field not in the_dict:\n return \"\"\n value = the_dict[the_field]\n if value is None:\n return \"\"\n if isinstance(value, set):\n value = \"{}\".format(value)\n #yes, I want to fallback to the previous case\n\n\n if isinstance(value, str):\n value = value.replace(\"\\\"\",\"\\\"\\\"\")\n value = value.replace(\"\\r\",\"\")\n #value = value.replace(\"\\n\",\"\\\\n\")\n return \"\\\"{}\\\"\".format(value)\n return value\n\n output = \"\"\n first = True\n for one_field in self.CSV_FIELDS:\n if first:\n first = False\n template = \"{}{}\"\n else:\n template = \"{},{}\"\n output = template.format(output, csv_helper(self.__dict__, one_field))\n return output", "def format_field(self, value, format_spec):\n value = super(FilenameFormatter, self).format_field(value, format_spec)\n if self.lowercase:\n value = value.lower()\n if not self.nonwordchars:\n value = re.sub('[^\\w\\s]+', '', value)\n value = re.sub('\\s+', self.word_delimiter, value)\n return value", "def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')", "def _sanitise_fields(self, record):\n sanitised = {}\n for k, v in record.items():\n new_key = k.replace('(', '_').replace(')', '_')\n sanitised[new_key] = v\n return sanitised", "def safe_quoted_string(value):\n validate_safe_string(value)\n return u'\\'{}\\''.format(value)", "def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }", "def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def _normalize_column(column):\n if not isinstance(column, str):\n msg = \"expected column of type 'str', got {0!r} instead\"\n raise TypeError(msg.format(column.__class__.__name__))\n column = column.strip()\n column = column.replace('\"', '\"\"') # Escape quotes.\n if column == '':\n column = '_empty_'\n return '\"' + column + '\"'", "def _normalize_column(column):\n if not isinstance(column, str):\n msg = \"expected column of type 'str', got {0!r} instead\"\n raise TypeError(msg.format(column.__class__.__name__))\n column = column.strip()\n column = column.replace('\"', '\"\"') # Escape quotes.\n if column == '':\n column = '_empty_'\n return '\"' + column + '\"'", "def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False", "def _sanitize_param(self, param):\n if param:\n # Can't send unicode.\n param = str(param)\n return param", "def __clean_string(cls, text):\n if text.startswith(\"(\"):\n text = text[1:]\n if text.endswith(\")\"):\n text = text[:-1]\n if text.endswith(\",\"):\n text = text[:-1]\n if len(text) > 2 and cls.__is_quote(text[0]) and \\\n cls.__is_quote(text[-1]):\n text = text[1:-1]\n return text", "def test_init_with_format_string(self):\n fmt_str = \"a=${aaa}, b: ${bbb}, c has value ${ccc}\"\n csv_formatter = CSVFormatter(fmt_str=fmt_str)\n csv = csv_formatter.format_records(self.records)\n\n csv_expected = textwrap.dedent(\"\"\"\\\n a=foobar_01, b: 8, c has value 4898FE19\n a=foobar_02, b: 160, c has value 5825D187\n a=foobar_03, b: 99, c has value 3648A436\n \"\"\")\n\n assert csv == csv_expected", "def test_csv_with_unicode(self):\n\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': u'String with ' + unichr(0x16c) + ' char',\n 'item_num': 2,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect csv content to be utf-8 encoded.\n content = response.content\n result = ('Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n 'String with ' + unichr(0x16c) + ' char,2\\r\\n')\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n else:\n result = result.encode(settings.DEFAULT_CHARSET)\n self.assertEqual(content, result)", "def test_return_csv_string(self):\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n csv = csv_formatter.to_csv(self.records, path_or_buf=None)\n assert csv == csv_expected", "def csv_to_field_CampaignLanguages(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(r';')\n entity.string = splitter.split(value)", "def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")" ]
[ "0.6742102", "0.6715928", "0.66002315", "0.58941966", "0.57422", "0.568915", "0.5677563", "0.5639043", "0.55888426", "0.557945", "0.5571272", "0.55682826", "0.5516632", "0.5515312", "0.5504741", "0.5504615", "0.54903525", "0.548838", "0.5464011", "0.5452441", "0.543062", "0.543062", "0.5415972", "0.5358819", "0.5343704", "0.53414863", "0.5331015", "0.5296478", "0.52949035", "0.5291223" ]
0.7284532
0
Make a string safe for HTML with double quotes
def html_dq_safe(string): if not string: return string string = string.replace('"','&quot;') return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_html_safe(s):\n return s.replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")", "def html_escape(s):\n s = html.escape(s, False)\n s = s.replace('\"', \"&quot;\")\n return s", "def escape_html(s):\n\treturn s. \\\n\t\treplace(\"<\", \"&lt;\"). \\\n\t\treplace(\">\", \"&gt;\"). \\\n\t\treplace(\"&\", \"&amp;\"). \\\n\t\treplace(\" \", \"&nbsp;\"). \\\n\t\treplace(\"\\t\", \"&nbsp;&nbsp;&nbsp;&nbsp;\")", "def escape(cls, html):\n return (\"%s\" % (html)).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def escape(s):\r\n return str(s).replace('<', '&lt;').replace('>', '&gt;')", "def safestr(s):\n return quote(str(s), '')", "def htmlquote(text):\r\n text = text.replace(\"&\", \"&amp;\") # Must be done first!\r\n text = text.replace(\"<\", \"&lt;\")\r\n text = text.replace(\">\", \"&gt;\")\r\n text = text.replace(\"'\", \"&#39;\")\r\n text = text.replace('\"', \"&quot;\")\r\n return text", "def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&amp;')\n\t\tunsafe = string.replace(unsafe, '<', '&lt;')\n\t\treturn string.replace(unsafe, '>', '&gt;')", "def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def safe_quoted_string(value):\n validate_safe_string(value)\n return u'\\'{}\\''.format(value)", "def test_single_quotes_returned(self):\n test_string = \"<p style=\\\"font-weight: bold;\\\">Test</p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"'\", cleaned)\n self.assertEqual(cleaned, \n \"<p style='font-weight: bold;'>Test</p>\"\n )", "def escape(html):\n if not isinstance(html, unicode):\n if not isinstance(html, str):\n html = unicode(html)\n else:\n html = unicode(html, 'utf-8')\n return html.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def safe(e):\n if PY2 and isinstance(e, unicode):\n return quote(e.encode('utf-8'), safe='')\n else:\n return quote(str(e), safe='')", "def htmlescape(s):\n if isinstance(s, htmltext):\n return s\n else:\n s = stringify(s)\n # inline _escape_string for speed\n s = s.replace(\"&\", \"&amp;\") # must be done first\n s = s.replace(\"<\", \"&lt;\")\n s = s.replace(\">\", \"&gt;\")\n s = s.replace('\"', \"&quot;\")\n return htmltext(s)", "def QuotedEscaped (s):\n return repr(s)", "def _html_esc(string):\n repls = {\n '<': 'lt',\n '>': 'gt',\n '&': 'amp',\n '\"': 'quot',\n }\n\n def repl(matchobj):\n return \"&%s;\" % repls[matchobj.group(0)]\n\n regex = \"([%s])\" % ''.join(repls.keys())\n return re.sub(regex, repl, string)", "def _escape(html):\n return encoding.force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def escape(s, quote=True):\n if s:\n s = s.replace(\"&\", \"&amp;\") # Must be done first!\n s = s.replace(\"<\", \"&lt;\")\n s = s.replace(\">\", \"&gt;\")\n if quote:\n s = s.replace('\"', \"&quot;\")\n s = s.replace('\\'', \"&#x27;\")\n return s", "def convertHTML(self, text):\n return text.replace('&#39;', \"'\")", "def html_quote(v):\n if v is None:\n return ''\n return cgi.escape(str(v), 1)", "def DoubleQuote(s):\n if not s:\n return '\"\"'\n elif all(c in _SafeShellChars for c in s):\n return s\n else:\n return '\"' + s.replace('\"', '\\\\\"') + '\"'", "def quot(string):\r\n return string.replace('\"', \"'\")", "def _encode_html(data: str) -> str:\n return html.escape(data)", "def clean_tag(data):\n # TODO: make this a method of Tag?\n return escape_html(data).replace('\"', '&quot;').replace(\"'\", '&#39')", "def html_escape(text): \n html_escape_table = {\n \"&\": \"&amp;\",\n '\"': \"&quot;\",\n \"'\": \"&apos;\",\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n }\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def htmlencode(s):\n \ts = s.replace(\"&\", \"&amp;\")\n\ts = s.replace(\"<\", \"&lt;\")\n\ts = s.replace(\">\", \"&gt;\")\n\ts = s.replace(\"\\\"\",\"&quot;\")\n\ts = s.replace(\"'\", \"&apos;\")\n\treturn s", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)" ]
[ "0.7772578", "0.7563124", "0.7387473", "0.71487874", "0.7135516", "0.7122315", "0.7078092", "0.70605904", "0.7015321", "0.70024043", "0.6998787", "0.69906825", "0.6930037", "0.6901274", "0.68498677", "0.682636", "0.6826105", "0.6747699", "0.67402124", "0.67118645", "0.66713095", "0.6657349", "0.6654809", "0.6596323", "0.65714854", "0.6515001", "0.65053236", "0.6481663", "0.648135", "0.6446609" ]
0.784569
0
Parses a URL and errors out if its not scheme http or https or has no net location
def url_check(url): url_tuple = urlparse.urlparse(url) if url_tuple[0] == 'http' or url_tuple[0] == 'https' and url_tuple[1] != "": return url else: raise Exception('bad url')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_url (url):\n # Parse URL provided\n v = urlparse(url)\n\n # Verify if protocol (http, https, ftp) and hostname are present \n # in the URL provided.\n if v.scheme and v.hostname:\n \n # Get URL base and hostname to form the correct URL base\n u = v.scheme + '://' + v.hostname + '/'\n return u\n\n else:\n # Not a valid URL\n return False", "def parse_url(url):\n loc = urlparse(url)\n\n # if the scheme (http, https ...) is not available urlparse wont work\n if loc.scheme == \"\":\n url = \"http://\" + url\n loc = urlparse(url)\n return loc", "def validate_base_url(base_url):\n parsed_url = urllib.parse.urlparse(base_url)\n if parsed_url.scheme and parsed_url.netloc:\n return parsed_url.geturl()\n else:\n error_message = \"base_url must contain a valid scheme (protocol \" \\\n \"specifier) and network location (hostname)\"\n raise ciscosparkapiException(error_message)", "def _validate_url(url):\n if urlparse.urlparse(url).scheme not in VALID_SCHEMES:\n _fail(url, \"Invalid URL\")", "def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False", "def validate_url(self, v):\n u = urlparse.urlparse(v)\n if u.scheme.lower() not in ('http', 'https'):\n raise ValueError('URL scheme must be either http:// or https://')\n if not u.netloc:\n raise ValueError('URL must specify a network location.')\n return u.scheme.lower() == 'https'", "def validate_url(url: str):\n try:\n return urlparse(url)\n except KeyboardInterrupt:\n return None", "def validate_url(path):\n parsed = urlparse(path)\n return bool(parsed.scheme) and bool(parsed.netloc)", "def _parse_url(url):\r\n if \":\" not in url:\r\n raise ValueError(\"url is invalid\")\r\n\r\n scheme, url = url.split(\":\", 1)\r\n\r\n parsed = urlparse(url, scheme=\"http\")\r\n if parsed.hostname:\r\n hostname = parsed.hostname\r\n else:\r\n raise ValueError(\"hostname is invalid\")\r\n port = 0\r\n if parsed.port:\r\n port = parsed.port\r\n\r\n is_secure = False\r\n if scheme == \"ws\":\r\n if not port:\r\n port = 80\r\n elif scheme == \"wss\":\r\n is_secure = True\r\n if not port:\r\n port = 443\r\n else:\r\n raise ValueError(\"scheme %s is invalid\" % scheme)\r\n\r\n if parsed.path:\r\n resource = parsed.path\r\n else:\r\n resource = \"/\"\r\n\r\n if parsed.query:\r\n resource += \"?\" + parsed.query\r\n\r\n return (hostname, port, resource, is_secure)", "def is_valid(url):\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)", "def is_valid(url):\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)", "def _validate_base_url(url: str) -> None:\n parse_result = urlparse(url)\n if parse_result.scheme not in ('http', 'https'):\n raise ValueError(\n f'Only HTTP[S] URLs are permitted. Actual URL: {url!r}')\n if url.endswith('/'):\n raise ValueError('Base (DICOMweb service) URL cannot have a trailing '\n f'forward slash: {url!r}')", "def validate_url(url):\n url_verify = ''\n\n try:\n url_verify = urlopen(url)\n except HTTPError:\n get_user_response(message='Error validating URL: {}'.format(url))\n\n return url_verify", "def validate_url(url):\n\n # Minimal URL validation with urlparse. This is extremely lenient, we might\n # want to use something like https://github.com/kvesteri/validators instead.\n parsed_url = urlparse(url)\n\n if not parsed_url.scheme:\n parsed_url = urlparse(\"http://\" + url)\n\n if not re.match(\"https?\", parsed_url.scheme):\n raise ValueError('Links must have an \"http\" or \"https\" prefix')\n\n if not parsed_url.netloc:\n raise ValueError(\"Links must include a domain name\")\n\n return parsed_url.geturl()", "def check_url(url=None, parse_url=None):\n return False", "def _is_url(s: str) -> bool:\n\n return urlparse(s).netloc != \"\"", "def parse_url(url):\n (scheme, netloc, path, params, query, frag) = urlparse(url)\n\n # We only support web services\n if not scheme in ('http', 'https'):\n raise InvalidUrl('Scheme must be one of http or https')\n\n is_ssl = scheme == 'https' and True or False\n\n # Verify hostnames are valid and parse a port spec (if any)\n match = re.match('([a-zA-Z0-9\\-\\.]+):?([0-9]{2,5})?', netloc)\n\n if match:\n (host, port) = match.groups()\n if not port:\n port = is_ssl and '443' or '80'\n else:\n raise InvalidUrl('Invalid host and/or port: %s' % netloc)\n\n return (host, int(port), path.strip('/'), is_ssl)", "def is_url(url):\n if '://' not in url:\n return False\n proto, addr = url.split('://', 1)\n if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']:\n return False\n return True", "def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def is_url(val):\n res = urlparse(val)\n return bool(res.scheme and res.netloc and res.params == \"\")", "def verify_url(url: str) -> bool:\n parsed_url = urlparse(url)\n return all([parsed_url.scheme, parsed_url.netloc])", "def _isurl(self, path):\n\n # We do this here to reduce the 'import numpy' initial import time.\n from urllib.parse import urlparse\n\n # BUG : URLs require a scheme string ('http://') to be used.\n # www.google.com will fail.\n # Should we prepend the scheme for those that don't have it and\n # test that also? Similar to the way we append .gz and test for\n # for compressed versions of files.\n\n scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n return bool(scheme and netloc)", "def is_valid_url(url: str) -> bool:\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc])\n except ValueError:\n return False", "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "def url_error():\n try:\n from urllib.error import URLError\n except ImportError:\n from urllib2 import URLError # suppress(import-error)\n\n return URLError", "def url_validator_callback(url: str) -> str:\n if url is None:\n return url\n\n url = url.strip()\n try:\n result = urlparse(url)\n if result.scheme and result.netloc:\n return url\n except:\n pass\n raise typer.BadParameter(\"Please supply a valid url\")", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True", "def test_parse_malformed_url(self):\r\n url = u'http://whttp://lucumr.pocoo.org/2012/8/5/stateless-and-proud/'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def is_url(url: str) -> bool:\n logger.info(url)\n result = urlparse(url)\n return all([result.scheme, result.netloc])" ]
[ "0.7514687", "0.74862754", "0.74089587", "0.7376512", "0.73097086", "0.7287078", "0.7267685", "0.72663707", "0.7234755", "0.7187554", "0.7187554", "0.7164459", "0.71450585", "0.71440923", "0.71121097", "0.7051559", "0.70260364", "0.69987065", "0.6996799", "0.69552004", "0.6935541", "0.6928888", "0.6912883", "0.68968123", "0.689071", "0.6879692", "0.6876471", "0.6865908", "0.6845711", "0.68404603" ]
0.81642485
0
Parses a URL and truncates it after the domain part
def url_truncate(url): url_tuple = urlparse.urlparse(url) return url_tuple[0] + '://' + url_tuple[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def domain_parse(url):\n url = url.lower()\n if not url.startswith('http://') and not url.startswith('https://'):\n url = '{schema}{host}'.format(schema='http://', host=url)\n url = urlparse(url)\n if not url.hostname:\n raise ValueError('Invalid domain provided')\n\n # Strip www prefix any additional URL data\n url = urlparse('{scheme}://{host}'.format(scheme=url.scheme, host=url.hostname.lstrip('www.')))\n return url", "def get_domain(self, full_url):\n clean_reg= re.compile(r'^((?:https?:\\/\\/)?(?:www\\.)?).*?(\\/.*)?$')\n match = re.search(clean_reg, full_url)\n beg, end = match.group(1), match.group(2)\n domain = string.replace(full_url, beg, '')\n domain = string.replace(domain, end, '')\n return domain", "def parse_domain(url):\n parsed_url = urllib.parse.urlparse(url)\n if not parsed_url.netloc:\n parsed_url = urllib.parse.urlparse('http://' + url)\n domain = parsed_url.netloc\n domain = domain.split('.')[-2:] # remove subdomains\n return '.'.join(domain)", "def getDomain(url):\n domain = string.replace(url,\"https://www.\",\"\")\n domain = string.replace(domain,\"http://www.\",\"\")\n domain = string.replace(domain,\"http://\",\"\")\n domain = string.replace(domain,\".com/\",\"\")\n domain = string.replace(domain,\".com\",\"\")\n return domain", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def truncate_url(url):\n url = parse.unquote(url)\n if len(url) <= 60 :\n return url\n url = url[:-1] if url.endswith(\"/\") else url\n url = url.split(\"//\",1)[1].split(\"/\")\n url = \"%s/.../%s\"%(url[0],url[-1])\n return url[:60]+\"...\" if len(url) > 60 else url", "def parse_domain(url):\n domain_match = lib.DOMAIN_REGEX.match(url)\n if domain_match:\n return domain_match.group()", "def processUrl(url):\n domain = 'http://www.gsmarena.com/'\n if domain not in url:\n url = urllib.parse.urljoin(domain, url)\n return url", "def parse_url(url):\n newurl = urlparse(url)\n return \"{0}://{1}\".format(newurl.scheme, newurl.netloc)", "def parsing(url):\n\n url = urlparse(url).netloc\n a = url.split('.')\n if len(a) >= 3:\n a = a[:-(len(a) - 1)]\n else:\n a = a[:-1]\n x = ('.'.join(a))\n return x", "def __ParseUrl(url):\n return urlparse(url)", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def domain(url):\n if isinstance(url, str):\n domain_match = re.match(r'https?://(?:www\\.)?([^/]+)\\.[^/]+', url)\n return domain_match.group(1) if domain_match else ''\n else:\n raise ParseError('Invalid input for domain(): {}'.format(url))", "def strip(url):\r\n split = list(urlsplit(url))\r\n split[4]=''\r\n return urlunsplit(split)", "def get_domain(url):\n a = urllib.parse.urlsplit(url)\n return str(a.scheme) + \"://\" + str(a.hostname)", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def urlparse(url):\n\tunquote_url=urllib.parse.unquote(url)\n\treturn unquote_url", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def get_root_domain(url):\n if url is None:\n return ''\n\n http = text.find_between(url, 'http://', '/')\n https = text.find_between(url, 'https://', '/')\n if http != '':\n if http.startswith('www.'):\n return http[4:]\n else:\n return http\n else:\n if https != '':\n if https.startswith('www.'):\n return https[4:]\n else:\n return https\n else:\n return ''", "def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))", "def get_domain(url):\n assert url is not None\n protocol = get_protocol(url)\n find = re.search(r\"(^https?://)?([a-z]|[A-Z]|[0-9]|\\.)+/?\", url)\n result = None\n if find:\n result = find.group(0)\n if result.endswith(\"/\"):\n result = result[0:-1]\n result = result.replace(\"www.\", \"\")\n if protocol:\n result = result.replace(protocol, \"\")\n return result", "def url_get_domain(url):\n\n url_tuple = urlparse.urlparse(url)\n return url_tuple[1]", "def extractDomain(self, url):\n domain = ''\n pattern = re.compile(r'http[s]?://([^/]+)/', re.U | re.M)\n url_match = pattern.search(url)\n if(url_match and url_match.lastindex > 0):\n domain = url_match.group(1)\n\n return domain", "def derive_domain(url, base=None):\n parsed = urlparse(url)\n if parsed.netloc:\n return parsed.netloc\n\n try:\n url_start = url[0]\n except (TypeError, IndexError):\n raise ValueError(f'Invalid URL: {url}')\n\n if url_start == '/':\n try:\n base_start = base[0]\n except (TypeError, IndexError):\n raise ValueError('Base is required for relative URL')\n\n if base_start == '/':\n raise ValueError('Base may not be relative')\n\n return derive_domain(url=base)\n\n first_slash_index = url.find('/')\n if first_slash_index > 0:\n return url[:first_slash_index]\n\n return url", "def hostify_url(url):\n\tif url[0] == '/':\n\t\treturn HOST + url\n\telse:\n\t\treturn url", "def cut_url(url):\n if len(url) > 50:\n return f\"...{url[-45:]}\"\n return url", "def parse_url(url):\n url = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(url.query)\n query_ = query.get('dn', query.get('title', ''))[0]\n if url.scheme == \"magnet\":\n return \"magnet:?xt={}\".format(query['xt'][0]), query_\n return \"http://{}{}{}\".format(*url[0:3]), query_", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def clean_url(url):\n return url[:url.find('?')]" ]
[ "0.7620176", "0.7324768", "0.7284738", "0.7240761", "0.7154701", "0.70812964", "0.7073174", "0.70719314", "0.706794", "0.7067837", "0.7048127", "0.7047652", "0.69829565", "0.6875338", "0.6874541", "0.6873694", "0.6865281", "0.6862966", "0.68619376", "0.68316275", "0.68275714", "0.6820332", "0.6817899", "0.67989576", "0.6798658", "0.67451656", "0.67269784", "0.67064476", "0.6681164", "0.66718256" ]
0.76441383
0
Init yarp ports for communication /data_server/in and /data_server/out
def server_yarp(): """Depending on the command it gets a finger data or all fingers data and send it through the port /data_server/out""" y.Network.init() portsrv_in = y.BufferedPortBottle() portsrv_in.open("/data_server/in") portsrv_out = y.BufferedPortBottle() portsrv_out.open("/data_server/out") #util vars time = 2 #daemon code while(True): bottle_in = portsrv_in.read() bottle_out = portsrv_out.prepare() command = bottle_in.get(0).asString() print command if command == "getdata": finger = bottle_in.get(1).asInt() hand.hand_in = hand.update_input() enabledVal=hand.hand_in["Enabled"][finger] handData=[] handData.append(hand.hand_in["Pos"][finger*3 + 0]) handData.append(hand.hand_in["Pos"][finger*3 + 1]) handData.append(hand.hand_in["Pos"][finger*3 + 2]) handData.append(hand.hand_in["Velocity"][finger*3 + 0]) handData.append(hand.hand_in["Velocity"][finger*3 + 1]) handData.append(hand.hand_in["Velocity"][finger*3 + 2]) handData.append(hand.hand_in["Torque"][finger*3 + 0]) handData.append(hand.hand_in["Torque"][finger*3 + 1]) handData.append(hand.hand_in["Torque"][finger*3 + 2]) bottle_out.clear() bottle_out.addString("getdata") bottle_out.addInt(finger) bottle_out.addDouble(enabledVal) for i in range(0,9): bottle_out.addDouble(handData[i]) portsrv_out.write() elif command == "getall": hand.hand_in = hand.update_input() allData = str(hand.hand_in) bottle_out.clear() bottle_out.addString("getall") bottle_out.addString(allData) portsrv_out.write()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_data_port(self):\n err = None\n sock = None\n for res in socket.getaddrinfo(None, 0, socket.AF_INET, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):\n af, socktype, proto, canonname, sa = res\n try:\n sock = socket.socket(af, socktype, proto)\n sock.bind(sa)\n except OSError as _:\n err = _\n if sock:\n sock.close()\n sock = None\n continue\n break\n if sock is None:\n if err is not None:\n raise err\n else:\n raise OSError(\"getaddrinfo returns an empty list\")\n sock.listen(1)\n port = sock.getsockname()[1]\n host = self.sock.getsockname()[0]\n response = self._send_port_command(host, port)\n return sock, response", "def defineDataServer(*args, device: AnyStr=\"\", server: AnyStr=\"\", undefine: bool=True,\n **kwargs)->None:\n pass", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def __init__(self, *arg, **kw):\n #: Input connection mapping\n self._inputs = {}\n for name, port in six.iteritems(self.input_ports):\n self._inputs[name] = port(self, name=name)\n\n #: Output connection mapping\n self._outputs = {}\n for name, port in six.iteritems(self.output_ports):\n self._outputs[name] = port(self, name=name)\n\n #: data cache\n self._output_data = {}", "def init(self, HOST, PORT, BACKLOG):\n s = socket(AF_INET, SOCK_STREAM)\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n #s.setblocking(0)\n s.bind((HOST, PORT))\n s.listen(BACKLOG)\n # Add socket to list of available inputs\n self.server = s\n self.inputs.append(s)\n self.log(\"Bound socket to port: %s\", PORT)\n self.log(\"Sequence initialized to: %s\", self.seqNum)", "def server_init(log_set, conf_set, header_set, commands_w_set):\n global log_th, conf_th, header_th, command_w_th\n log_th = log_set\n conf_th = conf_set\n header_th = header_set\n command_w_th = commands_w_set\n sock_ip = conf_set.get_item(q_key='general').get('sock_ip')\n port = int(conf_set.get_item(q_key='general').get('port'))\n return ThreadedTCPServer((sock_ip, port), ThreadedTCPRequestHandler)", "def port_setup(robot_name, num_cameras):\n\tglobal local_in_port\n\tglobal local_out_port\n\tglobal local_GPS_port\n\tglobal local_Dest_port\n\n\tglobal local_in_port_name\n\tglobal local_out_port_name\n\tglobal local_GPS_port_name\n\tglobal local_Dest_port_name\n\n\tglobal local_Radio_in_port\n\tglobal local_Radio_out_port\n\n\tglobal ors_in_port_name\n\tglobal ors_out_port_name\n\tglobal ors_GPS_port_name\n\tglobal ors_Dest_port_name\n\tglobal ors_Radio_in_port_name\n\tglobal ors_Radio_out_port_name\n\n\t# Define the names for all the ports\n\tport_prefix = \"/ors/robots/\" + robot_name + \"/\"\n\tlocal_port_prefix = \"/atrv_client/\" + robot_name + \"/\"\n\tview_prefix = \"/img/\" + robot_name + \"/\"\n\n\tors_in_port_name = port_prefix + \"in\"\n\tors_out_port_name = port_prefix + \"out\"\n\n\tors_Dest_port_name = port_prefix + \"Motion_Controller/in\"\n\tors_GPS_port_name = port_prefix + \"GPS/out\"\n\n\tors_Radio_out_port_name = port_prefix + \"Radio/out\"\n\tors_Radio_in_port_name = port_prefix + \"Radio/in\"\n\n\tlocal_in_port_name = local_port_prefix + \"in/\"\n\tlocal_out_port_name = local_port_prefix + \"out/\"\n\n\tlocal_GPS_port_name = local_port_prefix + \"GPS/in/\"\n\tlocal_Dest_port_name = local_port_prefix + \"Motion_Controller/out/\"\n\n\tlocal_Radio_in_port_name = local_port_prefix + \"Radio/in\"\n\tlocal_Radio_out_port_name = local_port_prefix + \"Radio/out\"\n\n\t# Start the yarp network connection\n\tyarp.Network.init()\n\n\t# Open the client ports\n\tlocal_in_port = yarp.BufferedPortBottle()\n\tlocal_in_port.open(local_in_port_name)\n\tlocal_out_port = yarp.BufferedPortBottle()\n\tlocal_out_port.open(local_out_port_name)\n\n\tlocal_GPS_port = yarp.BufferedPortBottle()\n\tlocal_GPS_port.open(local_GPS_port_name)\n\tlocal_Dest_port = yarp.BufferedPortBottle()\n\tlocal_Dest_port.open(local_Dest_port_name)\n\n\tlocal_Radio_out_port = yarp.BufferedPortBottle()\n\tlocal_Radio_out_port.open(local_Radio_out_port_name)\n\tlocal_Radio_in_port = yarp.BufferedPortBottle()\n\tlocal_Radio_in_port.open(local_Radio_in_port_name)\n\n\t# Connect the client ports to the simulator ports\n\tyarp.Network.connect (local_out_port_name, ors_in_port_name)\n\tyarp.Network.connect (ors_out_port_name, local_in_port_name)\n\n\tyarp.Network.connect (ors_GPS_port_name, local_GPS_port_name)\n\tyarp.Network.connect (local_Dest_port_name, ors_Dest_port_name)\n\n\tyarp.Network.connect (local_Radio_out_port_name, ors_Radio_in_port_name)\n\tyarp.Network.connect (ors_Radio_out_port_name, local_Radio_in_port_name)\n\n\n\t# Connect the cameras to yarpview windows\n\tprint (\" * Initializing yarpview windows.\")\n\tfor id in range(int(num_cameras)):\n\t\t# Build the name of the camera\n\t\tcamera_name = \"Camera{0}\".format(id+1)\n\n\t\t# Prepare the ports to be used\n\t\timg_view_port = view_prefix + camera_name\n\t\tatrv_camera_port = port_prefix + camera_name\n\n\t\tyarp.Network.connect (atrv_camera_port, img_view_port)", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def init(self, address, port):\n \n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.event_dict = {}\n\n # Create a TCP/IP socket\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (address, port)\n print('connecting to {} port {}'.format(address, port))\n self.sock.connect(server_address)\n self.axis_data = {i:0 for i in range(7)}\n self.verbose = True", "def initServer( self ):\n self.createDict()\n self.queue = []\n if not self.regKey or not self.serNode: raise SerialDeviceError( 'Must define regKey and serNode attributes' )\n port = yield self.getPortFromReg( self.regKey )\n self.port = port\n try:\n serStr = yield self.findSerial( self.serNode )\n self.initSerial( serStr, port )\n except SerialConnectionError, e:\n self.ser = None\n if e.code == 0:\n print 'Could not find serial server for node: %s' % self.serNode\n print 'Please start correct serial server'\n elif e.code == 1:\n print 'Error opening serial connection'\n print 'Check set up and restart serial server'\n else: raise\n yield self.populateDict()\n self.free = True\n self.setComp( None, 'common', 0.0 )", "def initialize(self, config: DataProviderConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.OUTPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = 5\n chn.sock_opts['sndhwm'] = int(self.batch_size / 2) + 5", "def __init__(self, host, server_port):\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.run(host, server_port)\n\n # TODO: Finish init process with necessary code\n #Vegard sier vi ikke skal skrive noe her", "def __init__(self):\n self.running = False\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.p1 = None\n self.p2 = None", "def initialize(self, config: DataConsumerConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.INPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = int(self.batch_size / 2) + 5\n chn.sock_opts['sndhwm'] = 5", "def init_server():\n\n global server_socket\n # creating a server socket\n server_socket = socket.socket()\n\n # binding the server socket to the wanted adress\n server_socket.bind(SERVER_ADDRESS)", "def __init__(self, server_addr, server_port):", "def start_server(input_array):\n print(responses.STATUS_SERVER_STARTING)\n\n # Instantiate a new server object which listens to the provided ports\n server = Server(input_array[0], input_array[1], input_array[2])\n\n # Create 3 UDP sockets\n server.create_udp_sockets()\n\n # Begin listening for packets\n while 1:\n server.begin_listening()\n\n print(responses.STATUS_CLOSING_SOCKETS)\n server.english_sc.close()\n server.maori_sc.close()\n server.german_sc.close()\n print(responses.STATUS_SERVER_SHUTDOWN)", "def set_up(self, host, port):\n self.socket.bind((host, port))\n self.socket.listen(5)\n while True:\n connectSocket , addr = self.socket.accept()\n sentence = connectSocket.recv(2048)\n sentence = sentence.decode()\n self.file(sentence)\n self.parser(connectSocket)", "def start(self, data):\n if \"Connector\" in data:\n self.connector = data[\"Connector\"]\n else:\n self.connector = Connector(data)\n data[\"Connector\"] = self.connector\n self.register_to_event(EVENT_GLOBAL_INTERRUPT, self.event_interrupt)", "def __init__(self):\n super().__init__(sys.argv)\n self.s1 = serverControl()\n self.c1 = clientControl(\"Markus\")\n self.c2 = clientControl(\"Hannes\")", "def start(self):\n self.port = self.conn.evalInServer(server_code.format(key=self.key))", "def __init__(self, port1, port2, port3):\n # Initialise ports\n self.ports = {\n \"English\": int(port1),\n \"Te reo Maori\": int(port2),\n \"German\": int(port3)\n }\n\n # Initialise sockets\n self.english_sc = None\n self.maori_sc = None\n self.german_sc = None", "def __init__(self, host, port):\n self.endline = '\\n'\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.sock.connect((host, port))\n\n self.current_data = 0\n self.current_3D_points = 0\n self.is_new_data = False", "def __init__(self):\n self.server_socket = None\n try:\n self.receive_video_socket = \\\n self.start_socket(IP, RECEIVE_VIDEO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, RECEIVE_VIDEO_PORT))\n self.send_video_socket = \\\n self.start_socket(IP, SEND_VIDEO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, SEND_VIDEO_PORT))\n self.receive_audio_socket = \\\n self.start_socket(IP, RECEIVE_AUDIO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, RECEIVE_AUDIO_PORT))\n self.send_audio_socket = \\\n self.start_socket(IP, SEND_AUDIO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, SEND_AUDIO_PORT))\n self.client_video_dict = {}\n self.client_audio_dict = {}\n\n except socket.error as e:\n print(\"socket creation fail: \", e)\n self.close_all()\n except Exception as e:\n print(\"server construct fail: \", e)\n self.close_all()", "def __init__(self):\n self._server = None\n self._address = \"\"\n self._port = 0", "def init(self, msg_in = None, client = None):\r\n self.name = 'Oasis_DL'\r\n\r\n self.circular_buffers[b'act_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')\r\n self.circular_buffers[b'cmd_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')\r\n self.circular_buffers[b'fault'] = CBServer(size = (2,10000), var_type = 'float64')\r\n\r\n self.description = ''\r\n\r\n self.task_dictionary[0] = {b'function':driver.get_actual_temperature,b'name':b'act_temperature'}\r\n self.task_dictionary[1] = {b'function':driver.set_target_temperature,b'name':b'cmd_temperature'}\r\n self.task_dictionary[2] = {b'function':driver.get_faults,b'name':b'fault'}\r\n \r\n\r\n\r\n self.task_dictionary[10] = {b'function':driver.set_lower_limit,b'name':b'set_lower_limit'}\r\n self.task_dictionary[11] = {b'function':driver.get_lower_limit,b'name':b'get_lower_limit'}\r\n self.task_dictionary[12] = {b'function':driver.set_upper_limit,b'name':b'set_upper_limit'}\r\n self.task_dictionary[13] = {b'function':driver.get_upper_limit,b'name':b'get_upper_limit'}\r\n\r\n flag = False\r\n message = None\r\n err = ''\r\n flag, message, err = driver.init(), '', ''\r\n if flag:\r\n self.lower_limit = driver.device_dict[b'lower_limit']\r\n self.upper_limit = driver.device_dict[b'upper_limit']\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response", "def __init__(self,device=0,Port=None,Server=None):\n self.Port = Port\n self.Server=Server\n if self.Server != None: # TCP Client mode\n self.NetInit()\n self.Transaction=self._NetTransaction\n else:\n try:\n self.I2cBus = SMBus(device)\n except :\n print 'Need python-smbus for I2C bus to work'\n print ''\n print 'To install: sudo apt-get install python-smbus'\n return None\n if self.Port != None: #TCP Server Mode\n self.ServerThread = threading.Thread(target=self.ListenerTread)\n self.ServerThread.start()", "def __init__(self, ip, port, beam_manager):\n self._beam_manager = beam_manager\n self._reference_target = None\n super(DelayConfigurationServer, self).__init__(ip,port)", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []" ]
[ "0.64356095", "0.60305744", "0.60272783", "0.59800035", "0.59362817", "0.59055966", "0.5846435", "0.5844897", "0.57740307", "0.5770993", "0.5770788", "0.5761662", "0.56832856", "0.5680248", "0.5649125", "0.56416017", "0.5637095", "0.56346905", "0.5632841", "0.5629053", "0.5602887", "0.5598959", "0.5596424", "0.55928683", "0.555657", "0.555358", "0.5546168", "0.5539616", "0.5533526", "0.5533526" ]
0.638576
1
a ppg/no ppg aware run wrapper
def run(): if ppg2.inside_ppg(): ppg2.run() else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(_):\n pass", "def run():\n main()", "def run(ctx):\n pass", "def custom():\n run(\"example\")", "def run_module_ground_plan(args):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def makeprg(setup, loop):\n return loop", "def main():\n run_program()", "def no_arg():\n run_no_arg()", "def run():\n # main(sys.argv[1:])\n main()", "def run(self, stdout=None, stderr=None):", "def main():\n tng.api.runner()", "def both_ppg_and_no_ppg(request):\n\n if request.param:\n if request.cls is None:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (\".\" + request.node.name + str(request.param))\n )\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n target_path = target_path.absolute()\n old_dir = Path(os.getcwd()).absolute()\n try:\n first = [False]\n\n def np(quiet=True, **kwargs):\n if not first[0]:\n Path(target_path).mkdir(parents=True, exist_ok=True)\n os.chdir(target_path)\n Path(\"cache\").mkdir()\n Path(\"results\").mkdir()\n Path(\"out\").mkdir()\n\n first[0] = True\n if not \"log_level\" in kwargs:\n kwargs[\"log_level\"] = 40\n if not \"cores\" in kwargs:\n kwargs[\"cores\"] = 1\n if not \"allow_short_filenames\" in kwargs:\n kwargs[\"allow_short_filenames\"] = True\n if not \"prevent_absolute_paths\" in kwargs:\n kwargs[\"prevent_absolute_paths\"] = False\n if not \"run_mode\" in kwargs:\n kwargs[\"run_mode\"] = ppg2.RunMode.NONINTERACTIVE\n\n g = ppg2.new(**kwargs)\n g.new = np\n g.new_pipegraph = np # ppg1 test case compatibility\n g.result_dir = Path(\"results\") # ppg test case compatibility\n return g\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n hasattr(request.node, \"rep_call\")\n and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n )\n ):\n try:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n yield np()\n\n finally:\n os.chdir(old_dir)\n else:\n if request.cls is None:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (\".\" + request.node.name + str(request.param))\n )\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n target_path = target_path.absolute()\n target_path.mkdir()\n old_dir = Path(os.getcwd()).absolute()\n os.chdir(target_path)\n try:\n\n def np():\n ppg2.global_pipegraph = None\n\n class Dummy:\n pass\n\n d = Dummy\n d.new = lambda: None\n d.new_pipegraph = lambda: None # ppg test case compatibility\n d.result_dir = Path(\"results\") # ppg test case compatibility\n ppg2.change_global_pipegraph(None)\n return d\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n ):\n try:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n ppg2.change_global_pipegraph(None)\n print('gloabl', ppg2.global_pipegraph)\n yield np()\n\n finally:\n os.chdir(old_dir)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def multi_run_wrapper(args):\n\treturn img_preprocessing(*args)", "def run_psea(fname):\n ...", "def Run(self, args):\n pass", "def Run():\r\n pass", "def run(self, args):\n pass", "def main():\n # Remove the funny -psn_xxx_xxx argument (from py2app)\n if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':\n del sys.argv[1]\n\n if len(sys.argv) <= 1:\n phoshare.phoshare_ui.main()\n else:\n phoshare.phoshare_main.main()", "def process():\n pass" ]
[ "0.64849204", "0.62375826", "0.604792", "0.60420704", "0.5985371", "0.59746647", "0.58955795", "0.58955795", "0.58955795", "0.58955795", "0.58955795", "0.58955795", "0.58955795", "0.58955795", "0.57908344", "0.57514197", "0.57406765", "0.57267183", "0.570456", "0.5685785", "0.5682515", "0.56807065", "0.56807065", "0.56708807", "0.5660613", "0.56296146", "0.5618514", "0.561127", "0.5599796", "0.5594217" ]
0.74806374
0
Return a sequence of the names in a directory If the filter is not None, include only those names for which the filter returns a true value.
def names(filter=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def names(path, filter=None):", "def get_filtered_dir_list(self) -> typing.List[str]:\n if self._regex is None:\n self.build_regex()\n\n return [\n elem\n for elem in self.super_dir()\n if self._regex.fullmatch(elem)\n ]", "def scan_directories(data_dir, file_filter):\n\n root = os.walk(data_dir)\n\n print('Scanning for files...')\n output = []\n\n for directory in root:\n\n files = directory[2]\n\n # Valid dataset contains video files of both halves and an accompanying label\n if file_filter(files):\n output.append(directory[0])\n\n print('Done')\n\n return output", "def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files", "def ls(filter=None):", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def test_get_only_names(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder, full_path=False, only_files=True, walk=True)\n need_result = []\n for i in range(1, 4):\n need_result.append('meme{}.jpg'.format(i))\n need_result.extend(['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n '1.txt',\n '2.txt',\n '3.txt',\n 'not_txt.not_txt',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ])\n self.assertEqual(sorted(result), sorted(need_result))", "def zip_names(zip):\n if hasattr(zip, 'names'):\n return zip.names()\n else:\n def zip_filter():\n # 'Fix' an issue where directories are also being listed...\n for name in zip.namelist():\n if name[-1] != '/':\n yield name\n return zip_filter()", "def ls(path, filter=None):", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))", "def _listfiles(dirname):\n for root, dirs, files in os.walk(dirname):\n fkey = os.path.basename(root)\n f = []\n for name in files:\n key = os.path.splitext(name)[0]\n listfilters[key] = os.path.join(root, name)\n f.append(key)\n if f:\n showfilters[fkey] = f", "def dir_filter(item):\n return not item.startswith(\"_\")", "def filter_files(path, string):\n try:\n listing = os.listdir(path)\n return [f for f in listing if string in f]\n except:\n raise ValueError(\"Error in upy.contrib.tree.menu @ filter_files()\")", "def find_filtered_folders(folder, regex_filter=None):\n found = []\n\n for name in os.listdir(folder):\n # Skip over special folders and hidden names\n name_len = len(name)\n if name_len >= 1 and name[0] == '.':\n continue\n\n full_name = os.path.join(folder, name)\n if os.path.isdir(full_name):\n if not regex_filter is None:\n match = re.search(regex_filter, full_name)\n if not match is None:\n found.append(name)\n else:\n found.append(name)\n\n found_len = len(found)\n return found if not found_len <= 0 else None", "def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)", "def dirname_filter ( self, dirname, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( dirname, pat ) for pat in self.DIRNAMES_IGNORE\n )", "def get_dirs_prefix(wdir, prefix, excludes=None, Lshow=True, Ldir=True):\n matched_dirs=[]\n for fname in os.listdir(wdir):\n # re.match finds only prefix\n if os.path.isdir(fname) and re.match(prefix, fname):\n if excludes:\n tag=False\n for ex in excludes:\n if re.search(ex, fname):\n tag=True\n break\n if not tag :\n matched_dirs.append(fname)\n print (fname)\n else:\n matched_dirs.append(fname)\n print (fname)\n return matched_dirs", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)", "def filtered_walk(rootdir, filter_fn, include_dirs=None, exclude_dirs=None, get_dirs=False):\n flist = []\n dlist = []\n for root, dirs, files in os.walk(rootdir):\n if include_dirs and len(set(root.split(os.sep)).intersection(set(include_dirs))) == 0:\n ## Also try re.search in case we have patterns\n if re.search(\"|\".join(include_dirs), root):\n pass\n else:\n continue\n if exclude_dirs and len(set(root.split(os.sep)).intersection(set(exclude_dirs))) > 0:\n continue\n if exclude_dirs and re.search(\"|\".join(exclude_dirs), root):\n continue\n dlist = dlist + [os.path.join(root, x) for x in dirs]\n flist = flist + [os.path.join(root, x) for x in filter(filter_fn, files)]\n if get_dirs:\n return dlist\n else:\n return flist", "def list_and_filter(self, pattern, root_path):\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']", "def created_names(self, prefix):\n assert os.path.isdir(prefix)\n cwd = os.getcwd()\n os.chdir(prefix)\n names = tuple(sorted(filter(\n os.path.isdir,\n glob.glob(os.path.join(*('*' * self.depth))))))\n os.chdir(cwd)\n return names", "def _filter_files(file_dir: Union[str, Path], is_viya4: Optional[bool] = False) -> list:\n file_names = []\n file_names.extend(sorted(Path(file_dir).glob(\"*.json\")))\n if is_viya4:\n file_names.extend(sorted(Path(file_dir).glob(\"score_*.py\")))\n file_names.extend(sorted(Path(file_dir).glob(\"*.pickle\")))\n # Include H2O.ai MOJO files\n file_names.extend(sorted(Path(file_dir).glob(\"*.mojo\")))\n if file_names:\n return file_names\n else:\n raise FileNotFoundError(\n \"No valid model files were found in the provided file directory.\"\n )", "def filter_by_name_prefix(\n repos: Iterable[instarepo.github.Repo], string_filter: Optional[StringFilter]\n) -> Iterable[instarepo.github.Repo]:\n if (\n not string_filter\n or not string_filter.value\n or string_filter.mode == FilterMode.ALLOW\n ):\n return repos\n if string_filter.mode == FilterMode.ONLY:\n return (repo for repo in repos if repo.name.startswith(string_filter.value))\n elif string_filter.mode == FilterMode.DENY:\n return (repo for repo in repos if not repo.name.startswith(string_filter.value))\n else:\n raise ValueError(\"Invalid filter mode \" + string_filter.mode)", "def filter(self, f, include_directories=False):\n return self._filter(f=f, include_directories=include_directories)", "def _listDirectories(self, filter: str = None) -> None:\n self._addressBar.setText(str(self._currPath))\n self._resetMainFileView()\n fileIco = QIcon(':file_sm')\n folderIco = QIcon(':folder_sm')\n fileCutIco = QIcon(':file_sm_cut')\n folderCutIco = QIcon(':folder_sm_cut')\n for file in self._currPath.glob('*'):\n if self._isPathHidden(file):\n continue\n if filter and filter.lower() not in file.name.lower():\n continue\n if file.is_dir():\n size = QStandardItem('')\n type = QStandardItem('Folder')\n icon = folderIco if Path(file) not in self._fileClipboard else folderCutIco\n else:\n size = QStandardItem(self._prettifySize(file.stat().st_size))\n type = QStandardItem(f'{file.suffix.upper()}{\" \" if file.suffix else \"\"}File')\n icon = fileIco if Path(file) not in self._fileClipboard else fileCutIco\n item = QStandardItem(icon, file.name)\n mod_date_str = dt.datetime.fromtimestamp(file.stat().st_mtime).strftime('%d.%m.%Y %H:%M')\n mod_date = QStandardItem(mod_date_str)\n self._model.appendRow([item, type, mod_date, size])\n self._mainFileView.sortByColumn(1, Qt.DescendingOrder)", "def test_filter_dir():\n\n repo_list = filter_repos(fixtures.config_dict_expanded, repo_dir=\"*github_project*\")\n\n assert len(repo_list) == 1\n for r in repo_list:\n assert r['name'] == 'kaptan'", "def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))", "def get_samples_file(foldername, filter=None):\n samples = []\n for file in os.listdir(foldername):\n if filter and file.find(filter) == -1:\n continue\n for sample in sfile(foldername + '/' + file, None).get_samples():\n samples.append(sample)\n return samples", "def get_filters(self, name=False):\n filtfile_list = self.get_value(\"FILTER_LIST\").split(\",\")\n if not name:\n return filtfile_list\n return [io.filterfile_to_filtername(filt) for filt in filtfile_list]" ]
[ "0.68968344", "0.6543039", "0.6297752", "0.617669", "0.6166819", "0.60795903", "0.5977339", "0.59732294", "0.596002", "0.5898916", "0.58962566", "0.5863355", "0.5831567", "0.5811614", "0.57734287", "0.5728591", "0.57237214", "0.56886095", "0.5684366", "0.5653274", "0.56437254", "0.56101996", "0.55913484", "0.55909306", "0.5549898", "0.55497336", "0.55284953", "0.54955506", "0.5494408", "0.5493815" ]
0.65556586
1
Return a sequence of information objects Return item info objects (see lsinfo) for the files in a directory. If the filter is not None, include only those names for which the filter returns a true value.
def ls(filter=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def ls(path, filter=None):", "def scan_directories(data_dir, file_filter):\n\n root = os.walk(data_dir)\n\n print('Scanning for files...')\n output = []\n\n for directory in root:\n\n files = directory[2]\n\n # Valid dataset contains video files of both halves and an accompanying label\n if file_filter(files):\n output.append(directory[0])\n\n print('Done')\n\n return output", "async def get_files_metadata(\n location_id: LocationID,\n uuid_filter: str = \"\",\n expand_dirs: bool = Query(\n True,\n description=(\n \"Automatic directory expansion. This will be replaced by pagination the future\"\n ),\n ),\n):", "def _listfiles(dirname):\n for root, dirs, files in os.walk(dirname):\n fkey = os.path.basename(root)\n f = []\n for name in files:\n key = os.path.splitext(name)[0]\n listfilters[key] = os.path.join(root, name)\n f.append(key)\n if f:\n showfilters[fkey] = f", "def list_image_files(dir, filter=None):\n for entry in os.listdir(dir):\n path = os.path.join(dir, entry)\n if os.path.isdir(path):\n for p in list_image_files(path, filter):\n yield p\n elif any((entry.lower().endswith(ext) for ext in image_exts)):\n if filter and not filter(path):\n continue\n yield path", "def _listDirectories(self, filter: str = None) -> None:\n self._addressBar.setText(str(self._currPath))\n self._resetMainFileView()\n fileIco = QIcon(':file_sm')\n folderIco = QIcon(':folder_sm')\n fileCutIco = QIcon(':file_sm_cut')\n folderCutIco = QIcon(':folder_sm_cut')\n for file in self._currPath.glob('*'):\n if self._isPathHidden(file):\n continue\n if filter and filter.lower() not in file.name.lower():\n continue\n if file.is_dir():\n size = QStandardItem('')\n type = QStandardItem('Folder')\n icon = folderIco if Path(file) not in self._fileClipboard else folderCutIco\n else:\n size = QStandardItem(self._prettifySize(file.stat().st_size))\n type = QStandardItem(f'{file.suffix.upper()}{\" \" if file.suffix else \"\"}File')\n icon = fileIco if Path(file) not in self._fileClipboard else fileCutIco\n item = QStandardItem(icon, file.name)\n mod_date_str = dt.datetime.fromtimestamp(file.stat().st_mtime).strftime('%d.%m.%Y %H:%M')\n mod_date = QStandardItem(mod_date_str)\n self._model.appendRow([item, type, mod_date, size])\n self._mainFileView.sortByColumn(1, Qt.DescendingOrder)", "def handle_full_info(files, directories, args):\n result_info = []\n if not files and len(directories) == 1:\n d = list(directories.keys())[0]\n result_info.extend(full_info(directories[d], args, d))\n log.debug(result_info)\n return result_info\n\n if files:\n result_info.extend(full_info(files, args))\n for d in directories:\n result_info.append(f'{d}:')\n result_info.extend(full_info(directories[d], args, d))\n log.debug(result_info)\n return result_info", "def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files", "def names(path, filter=None):", "def scan_dir(self, directory=\".\"):\n for root, dirs, files in os.walk(directory, topdown=False):\n for name in files:\n for filetype in self.allowed_file_types:\n if name.split(\".\")[-1] == filetype:\n self.song_list.append(os.path.join(root, name))", "def getImmediateFiles(aDir):\n return [name for name in os.listdir(aDir)\n if os.path.isfile(os.path.join(aDir,name))]", "def FS_filter(self, at_data, *args, **kwargs) -> dict:\n\n b_status : bool = True\n l_file : list = []\n l_dirHits : list = []\n l_dir : list = []\n str_path : str = at_data[0]\n al_file : list = at_data[1]\n\n if len(self.args['fileFilter']):\n if self.args['fileFilterLogic'].upper() == 'OR':\n al_file = [x \\\n for y in self.args['fileFilter'].split(',') \\\n for x in al_file if y in x]\n else:\n for y in self.args['fileFilter'].split(','):\n al_file = [x for x in al_file if y in x]\n\n if len(self.args['dirFilter']):\n l_dirHits = [str_path \\\n for y in self.args['dirFilter'].split(',') \\\n if y in str_path]\n if self.args['dirFilterLogic'].upper() == 'AND':\n if len(l_dirHits) == len(self.args['dirFilter'].split(',')):\n for y in self.args['dirFilter'].split(','):\n l_dirHits = [x for x in l_dirHits if y in x]\n else:\n l_dirHits = []\n if len(l_dirHits):\n # Remove any duplicates in the l_dirHits: duplicates can occur\n # if the tokens in the filter expression map more than once\n # into the leaf node in the <str_path>, as a path that is\n #\n # /some/dir/in/the/space/1234567\n #\n # and a search filter on the dirspace of \"123,567\"\n [l_dir.append(x) for x in l_dirHits if x not in l_dir]\n else:\n # If no dir hits for this dir, then we zero out the\n # file filter\n al_file = []\n\n if len(al_file):\n al_file.sort()\n l_file = al_file\n b_status = True\n else:\n self.dp.qprint( \"No valid files to analyze found in path %s!\" %\n str_path, comms = 'warn', level = 5)\n l_file = None\n b_status = False\n return {\n 'status': b_status,\n 'l_file': l_file\n }", "def listFilesInDir(self, path, recursive=False, fileNameOnly=True, filter=None):\n self._checkActive()\n def _process(args, path, ttype, moddate=0, size=0, md5hash=\"\"):\n fileNameOnly, filter, pathsreturn = args \n if ttype == \"F\":\n if (filter is None) or fnmatch.fnmatch(path, filter):\n #fullpath=q.system.fs.joinPaths(path, fileNameOnly)\n if fileNameOnly:\n pathsreturn.append(q.system.fs.getBaseName(path))\n else:\n pathsreturn.append(path)\n pathsreturn=[]\n self.walk(_process, (fileNameOnly, filter, pathsreturn) , path, recursive=recursive) \n return pathsreturn", "def list_and_filter(self, pattern, root_path):\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def DirItems():\n return diritems", "def list_files(dir: str, valid_extensions: list = None, filter_fn=None) -> list:\n res = list(os.path.join(dir, x.name) for x in os.scandir(dir) if x.is_file())\n return [file_path for file_path in res if\n (valid_extensions is None or get_file_ext(file_path) in valid_extensions) and\n (filter_fn is None or filter_fn(file_path))]", "def showFiles(self):\n self.listFiles.clear()\n path = self.listDirs.currentItem().text()\n dirList = [item for item in os.listdir(path) if (item[0] != '.' and item[-1] != '~' and item[0] != '$')]\n # ^ consider all files/folders except hidden and temporary ones.\n self.listFiles.addItems(dirList)", "def lsinfo(path):", "def full_info(files: List[str], args, dir_: str ='.') -> List[str]:\n temp_info = []\n for item in files:\n f_info = {}\n f_st = os.stat(os.path.join(CURRENT_DIR, dir_, item))\n f_info['mpde'] = f'{stat.filemode(f_st.st_mode):10}'\n f_info['nlink'] = f'{f_st.st_nlink:>3}'\n f_info['uid'] = f'{f_st.st_uid:>3}'\n size = f_st.st_size\n if args.block_size:\n size = ceil(size / args.block_size)\n f_info['size'] = f'{size:>8}'\n date = dt.datetime.fromtimestamp(f_st.st_mtime)\n if (dt.datetime.now() - date).days / 30 > 6:\n date_format = '%b %d %Y'\n else:\n date_format = '%b %d %I:%M'\n f_info['time'] = f'{date.strftime(date_format)} '\n f_info['name'] = f'{item:<}'\n temp_info.append(\n ' '.join([f_info['mpde'], f_info['nlink'], f_info['uid'],\n f_info['size'], f_info['time'], f_info['name']])\n )\n temp_info.append('\\n')\n return temp_info", "def scan_dir(self, dir):\n import pathlib\n import magic\n\n for filename in find_all_files(dir):\n self.filelist.append({\n \"filename\": filename,\n \"mime\": magic.from_file(filename, mime=True),\n \"size_bytes\": os.path.getsize(filename),\n \"ext\": pathlib.Path(filename).suffix\n })", "def build_file_list(location, filters):\n f = []\n for (dir_path, dir_name, file_names) in os.walk(location):\n for file in file_names:\n f.append(os.path.join(dir_path, file))\n obj_list = map(lambda file: os.path.join(location, file), f)\n\n if type(filters) == list:\n for filter in filters:\n obj_list = [i for i in obj_list if filter in i]\n else:\n obj_list = [i for i in obj_list if filters in i]\n\n return obj_list", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n tar_file = self._file_system.GetTARFile()\n if tar_file:\n for path_spec in self._directory.entries:\n location = getattr(path_spec, 'location', None)\n if location is None:\n continue\n\n kwargs = {}\n try:\n kwargs['tar_info'] = tar_file.getmember(location[1:])\n except KeyError:\n kwargs['is_virtual'] = True\n\n yield TARFileEntry(\n self._resolver_context, self._file_system, path_spec, **kwargs)", "def filter(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n self._printer('Searching ' + directory)\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n if self.filters.validate(root):\n # Check that non-empty folders flag is on and we're at the max directory level\n if self.filters.non_empty_folders and self.filters.get_level(root) == self.filters.max_level:\n # Check that the path is not an empty folder\n if os.path.isdir(directory + os.sep + root):\n # Get paths in folder without walking directory\n paths = os.listdir(directory + os.sep + root)\n\n # Check that any of the paths are files and not just directories\n if paths and any(os.path.isfile(os.path.join(directory, p)) for p in paths):\n self.add_path(directory, root)\n\n else:\n for filename in files:\n fullname = os.path.join(root, filename)\n if self.filters.validate(fullname):\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)", "def listfiles(self, *path):\n dir = self.localpath(*path)\n files = []\n for root, dirs, fnms in os.walk(dir):\n for f in fnms:\n if f[-5:] == '.info' and os.path.exists(os.path.join(root, f[:-5])):\n try:\n _open_file_info(os.path.join(root, f))\n files.append(\n path + tuple(_split_path(\n os.path.relpath(os.path.join(root, f[:-5]), start=dir)\n )))\n except ValueError:\n pass\n return files", "def process_directory(dir_path, items):\n result = []\n for item in items:\n name = os.path.join(dir_path, item)\n if os.path.isfile(name) and not os.path.islink(name):\n for mask in masks:\n if fnmatch.fnmatch(name, mask):\n result.append(os.path.abspath(name))\n break\n return result", "def get_samples_file(foldername, filter=None):\n samples = []\n for file in os.listdir(foldername):\n if filter and file.find(filter) == -1:\n continue\n for sample in sfile(foldername + '/' + file, None).get_samples():\n samples.append(sample)\n return samples", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n for path_spec in self._directory.entries:\n yield APMFileEntry(self._resolver_context, self._file_system, path_spec)", "def ls(self):\n return self._zip_file.infolist()", "def extract(apath, ffilter=[]):\n\n files = []\n\n def extract_recursive(curr_apath):\n \"\"\"Look into archive recursively to extract files considering ffilter\"\"\"\n\n handler = resolve_format(curr_apath)\n unpacker = HandlersFactory.get_handler(handler)\n _files = unpacker.files_list(curr_apath)\n\n for f in _files:\n if is_matched(f, ffilter=ffilter):\n _fpath = unpacker.extract(curr_apath, f)\n files.append(_fpath)\n if is_archive(f):\n _apath = unpacker.extract(curr_apath, f)\n extract_recursive(_apath)\n\n extract_recursive(apath)\n return files" ]
[ "0.62276477", "0.61657846", "0.6048434", "0.5977725", "0.59174025", "0.58965987", "0.57888776", "0.56596655", "0.5631385", "0.5593354", "0.5388888", "0.5364172", "0.53641665", "0.5358325", "0.53024876", "0.5293489", "0.52633387", "0.5261047", "0.52554226", "0.5251259", "0.52484536", "0.52426285", "0.5237178", "0.52329415", "0.5206662", "0.5202485", "0.51963997", "0.5191432", "0.51797116", "0.51636386" ]
0.6325731
0
Outputs the file at name to a stream. Data are copied starting from start. If end is not None, data are copied up to end.
def readfile(name, outstream, start=0, end=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writefile(name, instream, start=None, end=None, append=False):", "def writefile(path, instream, start=None, end=None, append=False):", "def readfile(path, outstream, start=0, end=None):", "def output(self, _in, out, **kwds):\n out.write(_in.read())", "def stream_to_file(self, path):\n # Using default state of files being overwritten for now\n if os.path.exists(path):\n os.remove(path)\n\n # Stream downloaded contents to file and show progress\n with open(path, 'wb') as f:\n for chunk in self.stream.iter_content(chunk_size=self.chunk_size):\n f.write(chunk)\n self.progress += int(len(chunk))\n self.prog_bar.show(self.progress)", "def _stream(self):\n logger.info('getting meta-data')\n while not self.handle.has_metadata():\n time.sleep(0.1)\n\n #self.handle.rename_file(0, 'test.mp4')\n\n while not self.handle.is_seed():\n stat = self.handle.status()\n\n print 'downloading %.2f%%'%(stat.progress * 100)\n sys.stdout.flush()\n\n time.sleep(1)", "def write_out_on_get_next(self, arg: Name):\n res = self.get_next(arg)\n while res and self.check_end_streaming(res) is False:\n self.write_out(res)\n res = self.get_next(arg)\n self.last_write_out()", "def write(stream, outfile, flush):\n try:\n # Writing bytes so we use the buffer interface (Python 3).\n buf = outfile.buffer\n except AttributeError:\n buf = outfile\n\n for chunk in stream:\n buf.write(chunk)\n if flush:\n outfile.flush()", "def _WriteStream(self, stream_name, stream_data):\n # TODO: this can raise an IOError e.g. \"Stale NFS file handle\".\n # Determine if this be handled more error resiliently.\n\n # Prevent zipfile from generating \"UserWarning: Duplicate name:\".\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self._zipfile.writestr(stream_name, stream_data)", "def addFileToStream(filename, outstream, header=None, outputIsDir=False):\n if outputIsDir:\n # Special case if output is a directory\n copyFragmentOutputDir(filename, outstream)\n return\n\n # if outstream is file,stream pair, get stream\n outstream=getStream(outstream)\n\n if header is not None:\n outstream.write(header)\n outstream.write('\\n')\n\n with open(filename, 'rt') as f:\n for line in f:\n outstream.write(line)", "def file_write(self, name: str, output: str) -> None:\n\n self._event_loop.call_soon_threadsafe(\n tpartial(self._file_write_in_thread, name, output)\n )", "def download(self, name, max_buf_length):\n full_path = self.path(name)\n fsize = self.__volume.getsize(full_path)\n current_offset = 0\n while current_offset < fsize:\n buf, l = self.safe_read_chunk(name, current_offset, max_buf_length)\n current_offset += l\n yield (buf, l, current_offset)", "def copy_file_to_stdout(file_):\n while True:\n block = file_.read(const.BUFFER_SIZE)\n if not block:\n break\n const.STDOUT.write(block)", "def _save_to_file(filename, data, start=0, header_size=None):\n if header_size is None:\n header_size = 0\n item_dtype = data.dtype\n # Open file as necessary\n opened = False\n if isinstance(filename, str):\n fd = open(filename, 'rb+')\n opened = True\n else:\n fd = filename\n # Seek to halo location and write\n offset = header_size + (start * item_dtype.itemsize)\n fd.seek(offset, os.SEEK_SET)\n data.tofile(fd)\n if opened:\n fd.close()", "def inout(input_, output_):\n while True:\n chunk = input_.read(1024)\n if not chunk:\n break\n output_.write(chunk)", "def _newstream(self, name):\n name = create_string_buffer(ccharp(name))\n # (const char* filename, environ_ns* env, const char* access)\n cnetica.NewFileStream_ns.argtypes = [c_char_p, c_void_p, c_char_p]\n cnetica.NewFileStream_ns.restype = c_void_p\n return cnetica.NewFileStream_ns(name, self.env, None) # file_p", "def emit(self, stream):\n # Create the header structure.\n\n SIZE_PLACEHOLDER = '0xZYXWVUTS'\n cabinet = etree.Element('FileCabinet')\n cabinet.set('HeaderSize', SIZE_PLACEHOLDER)\n\n files = etree.SubElement(cabinet, 'Files')\n offset = 0\n\n for info in self._files.values():\n f = etree.SubElement(files, 'File')\n f.set('Name', info.name)\n f.set('Size', str(info.size))\n f.set('Offset', str(offset))\n offset += info.size\n\n # Serialize and patch in the actual header size. With a\n # non-pathological XML serialization, the HeaderSize item will occur\n # within the first SIZE_REGION bytes while the first filename will\n # occur beyond the first SIZE_REGION bytes, meaning that we'll be\n # resistant if someone tries to break us by using a filename that\n # includes SIZE_PLACEHOLDER.\n\n SIZE_REGION = 90\n header = stringify_xml_doc(cabinet, indent=True)\n header = header.encode('utf-8')\n size_ascii = '0x{:08x}'.format(len(header)).encode('us-ascii')\n filled_size = header[:SIZE_REGION].replace(SIZE_PLACEHOLDER.encode('us-ascii'), size_ascii)\n header = filled_size + header[SIZE_REGION:]\n\n stream.write(header)\n\n # The rest is straightforward.\n\n for info in self._files.values():\n stream.write(info.contents)", "def copySubRangeOfFile(inputFile, fileStart, fileEnd, outputFileHandle):\n with open(inputFile, 'r') as fileHandle:\n fileHandle.seek(fileStart)\n data = fileHandle.read(fileEnd - fileStart)\n assert len(data) == fileEnd - fileStart\n outputFileHandle.write(data)", "def readfile(input_stream, offset, size):\n input_stream.seek(offset)\n dest = input_stream.read(size)\n if dest:\n return dest", "def export(self, stream):\n pass", "def __do_write(filestream, seq, header=None):\n if header is not None:\n filestream.write(header + '\\n') # double check newlines\n try:\n for line in chunks(seq, 70):\n filestream.write(line + '\\n')\n except Exception as e:\n print(e)", "def flush(self, name=None):\n if name is None:\n name = self.filename\n with open(name, 'w') as f:\n f.write(self.flushs())", "async def stream_file(self, file: Union[str, io.BufferedReader], **kwargs) -> None:\n await self.relay(\"stream_file\")(file, **kwargs)", "def open_output(name=None):\n return Output(name)", "def write(self, filename, data):\n raise NotImplementedError", "def write(self, filename, data, hdr):\n pass", "def copy_file(input, output):\n for f in input:\n while True:\n chunk = f.read(1024)\n if not chunk:\n break\n output.write(chunk)\n output.flush()", "def get_file(self, size):\n file = open(self.FILENAME, \"w\")\n file.seek(1024 * 1024 * size)\n file.write('\\x00')\n file.close()", "def savenet(self, name):\n file_p = self._newstream(name)\n # (const net_bn* net, stream_ns* file)\n cnetica.WriteNet_bn.argtypes = [c_void_p, c_void_p]\n cnetica.WriteNet_bn.restype = None\n cnetica.WriteNet_bn(self.net, file_p)", "def beginFileOutput(self):\n self._outputFilepath = self.dataSet[self._outputFileLabel]\n self._outputFile = open(self._outputFilepath, 'w')" ]
[ "0.73203975", "0.66282195", "0.6548188", "0.52931905", "0.5192701", "0.51389235", "0.50570816", "0.50302005", "0.49630135", "0.4937502", "0.49246177", "0.49229074", "0.49182338", "0.48734298", "0.48445198", "0.4824233", "0.4819832", "0.48137543", "0.47911045", "0.47882125", "0.47587287", "0.47488737", "0.4737642", "0.47336307", "0.47111204", "0.46874234", "0.46802023", "0.46792683", "0.4661831", "0.46460363" ]
0.731194
1
Write data to a file. If start or end is not None, then only part of the file is written. The remainder of the file is unchanged. If start or end are specified, they must ne nonnegative. If end is None, then the file is truncated after the data are written. If end is not None, parts of the file after end, if any, are unchanged. If end is not None and there isn't enough data in instream to fill out the file, then the missing data are undefined. If neither start nor end are specified, then the file contents are overwritten. If start is specified and the file doesn't exist or is shorter than start, the file will contain undefined data before start. If append is true, start and end are ignored.
def writefile(name, instream, start=None, end=None, append=False):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writefile(path, instream, start=None, end=None, append=False):", "def _save_to_file(filename, data, start=0, header_size=None):\n if header_size is None:\n header_size = 0\n item_dtype = data.dtype\n # Open file as necessary\n opened = False\n if isinstance(filename, str):\n fd = open(filename, 'rb+')\n opened = True\n else:\n fd = filename\n # Seek to halo location and write\n offset = header_size + (start * item_dtype.itemsize)\n fd.seek(offset, os.SEEK_SET)\n data.tofile(fd)\n if opened:\n fd.close()", "def append_end(self, data):\r\n with open(self.file_name, 'a', encoding='utf-8') as self.file:\r\n self.file.write(data)", "def write(self, data, meta):\n try:\n self.last_active = time.time()\n self.open_file(now=meta.time)\n prefix = format_prefix(meta).encode()\n\n if self.start_of_line:\n self.file.write(prefix)\n\n if data.endswith(b\"\\n\"):\n self.file.write(data[:-1].replace(b'\\n', b'\\n' + prefix) + b'\\n')\n self.start_of_line = True\n else:\n self.file.write(data.replace(b'\\n', b'\\n' + prefix))\n self.start_of_line = False\n\n self.last_write_was_error = False\n except Exception as e:\n if not self.last_write_was_error:\n print('Failed to write to', self.path, ':', e, file=sys.stderr)\n traceback.print_exc(file=sys.stdout)\n self.last_write_was_error = True", "def write(self, arg, **kwargs):\r\n if hasattr(arg, 'seek'):\r\n self._tofile(arg, **kwargs)\r\n else:\r\n with open(arg, 'wb') as fid:\r\n self._tofile(fid, **kwargs)", "def write(init_time, end_time, vpk, media, rms):\n global data_file\n\n # atualiza o arquivo de dados\n update()\n\n # Salva os dados no arquivo\n data_file.write(\n str(init_time) + ',' + str(end_time) + ',' +\n str(vpk) + ',' + str(media) + ',' + str(rms) + '\\n'\n )", "def write_data(filename, data, data_start_format, data_end_format,\n exclude=['(',')']):\n with open(filename, 'r') as data_file:\n file_data = data_file.readlines()\n write_lines = False\n count = 0\n data = [len(data)] + data\n for ind in range(len(file_data)):\n if data_start_format in file_data[ind]:\n write_lines = True\n ind += 1\n elif write_lines and len([i for i in exclude if i in file_data[ind]])==0:\n file_data[ind] = str(data[count]) + \"\\n\"\n count += 1\n elif write_lines and (data_end_format in file_data[ind]):\n write_lines = False\n break\n with open(filename, 'w') as data_file:\n data_file.writelines(file_data)\n print (\"File updated: \", filename)", "def writeData( self, file, bAddBeginOfDataChunk = True ):\n self.writeSpecificData( file, self.data, bAddBeginOfDataChunk = bAddBeginOfDataChunk )", "def get_between(self, start, end):\n now = datetime.now()\n now = datetime(now.year, now.month, now.day)\n \n assert isinstance(start, datetime), 'start need to be datetime instance'\n assert isinstance(end, datetime), 'end need to be datetime instance'\n assert start < end, 'start need to be less than end'\n assert end < now, 'end need to be less or equal than yesterday'\n assert start >= start_date, 'no data before \\\"2003-01-01\\\"'\n \n strftime = datetime.strftime\n self.db.DBFILE = \\\n strftime(start, date_str) + \"+\" + strftime(end, date_str)\n \n \n # write all the data in the file at once\n lst_dict = self._helper_get_between(start, end)\n self.db.save_iter(lst_dict)", "def write(self, filename, data):\n raise NotImplementedError", "def write(self, args, file_dat):\n assert self.checker_(file_dat)\n file_path = self.path(args)\n file_str = self.writer_(file_dat)\n autofile.write_file(file_path, file_str)", "def _WriteTaskStart(self, task_start):\n if self.storage_type != definitions.STORAGE_TYPE_TASK:\n raise IOError('Task start not supported by storage type.')\n\n stream_name = 'task_start.{0:06d}'.format(self._last_task)\n if self._HasStream(stream_name):\n raise IOError('Task start: {0:06d} already exists.'.format(\n self._last_task))\n\n task_start_data = self._SerializeAttributeContainer(task_start)\n\n data_stream = _SerializedDataStream(\n self._zipfile, self._temporary_path, stream_name)\n data_stream.WriteInitialize()\n data_stream.WriteEntry(task_start_data)\n data_stream.WriteFinalize()", "def upload_range( # type: ignore\n self, data, # type: bytes\n start_range, # type: int\n end_range, # type: int\n validate_content=False, # type: Optional[bool]\n timeout=None, # type: Optional[int]\n encoding='UTF-8',\n **kwargs\n ):\n # type: (...) -> Dict[str, Any]\n if self.require_encryption or (self.key_encryption_key is not None):\n raise ValueError(\"Encryption not supported.\")\n if isinstance(data, six.text_type):\n data = data.encode(encoding)\n\n content_range = 'bytes={0}-{1}'.format(start_range, end_range)\n content_length = end_range - start_range + 1\n try:\n return self._client.file.upload_range( # type: ignore\n range=content_range,\n content_length=content_length,\n optionalbody=data,\n timeout=timeout,\n validate_content=validate_content,\n cls=return_response_headers,\n **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)", "def write(cls, file, data):\n file.write(data)", "def _write(self, data, length, error, move_start=True):\n idxs = self.get_indexes(self._end, length, self.maxsize)\n self.move_end(length, error, move_start)\n self._data[idxs] = data", "def write_data(infbfile,begin_N,dur_N,outfbfile):\n infbfile.seek_to_sample(begin_N)\n for i in range(begin_N,(begin_N+dur_N)):\n data = infbfile.read_sample()\n data.tofile(outfbfile)", "def write_to_file(start_runtime, contents, write_mode='a'):\n with open(f\"{start_runtime}.txt\", write_mode) as f:\n f.write(\"Filename\\t\\tMaxTrack\\tNumInst\\t\\tTimeSig\\t\\tTPB\\n\")\n f.write(contents)", "def generic_log_filter(logfile, start, end, inplace=True, outfile=None):\n\n if not os.path.exists(logfile):\n raise errors.filters.EFilterException(f\"Failed finding {logfile} to process\")\n\n if not inplace and not outfile:\n raise errors.filters.EFilterException(\"Non inplace saving requires specifying an output file\")\n\n capturing = False\n processed_file = \"\"\n with open(logfile) as _file:\n _LOG.info(f\"Processing file {logfile}\")\n for line in _file:\n\n # Find the start position\n if start in line:\n capturing = True\n\n # Capture the log line if we are within the capturing state\n if capturing:\n processed_file += line\n\n # Find the end position\n if capturing and end in line:\n break\n else:\n _LOG.info(f\"End of {logfile} not found, capturing everything past start position.\")\n\n # Don't overwrite file if nothing was found\n if not processed_file:\n _LOG.info(\"Couldn't find start/end for logfile, skipping processing.\")\n return\n\n # Overwrite the existing file\n _outfile = logfile\n if not inplace and outfile:\n _LOG.info(f\"Saving filtered {logfile} to {_outfile}\")\n _outfile = outfile\n\n _LOG.info(\"Saving filtered log\")\n with open(_outfile, \"w\") as text_file:\n text_file.write(processed_file)", "def write(self, data):\n return self._write(self.wfile, data)", "def write_start_file(output_dir, start_time=None, time_budget=None,\r\n task_name=None):\r\n ingestion_pid = os.getpid()\r\n start_filename = 'start.txt'\r\n start_filepath = os.path.join(output_dir, start_filename)\r\n with open(start_filepath, 'w') as f:\r\n f.write('ingestion_pid: {}\\n'.format(ingestion_pid))\r\n f.write('task_name: {}\\n'.format(task_name))\r\n f.write('time_budget: {}\\n'.format(time_budget))\r\n f.write('start_time: {}\\n'.format(start_time))\r\n logger.debug(\"Finished writing 'start.txt' file.\")", "def write_no_seek(meta_file, data_block):\n position = meta_file.tell()\n # seek 0 bytes from the end of file (2)\n meta_file.seek(0, 2)\n meta_file.write(data_block)\n meta_file.seek(position)", "def write_data_to_file(data1, data2, data3, data4, data5):\n with data_file as open('data_file.txt','w'):\n data_file.write(data1 +'\\n')\n data_file.write(data2 +'\\n')\n data_file.write(data3 +'\\n')\n data_file.write(data4 +'\\n')\n data_file.write(data5 +'\\n')", "def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):\n\n from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\n\n if not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\n elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\n\n data = self._get_aggregated_data(\n data_start,\n data_end,\n AGGREGATE_TO_ONE_VALUE,\n aggregation_type,\n )\n\n logger.debug(\"to archive: %s\", data)\n\n archived = TimeSeriesDataArchive(\n start=data_start,\n end=data_end,\n value=data[0].value,\n sensor=self,\n aggregation_type=aggregation_type,\n )\n archived.save()\n\n logger.debug(\"archived %s to %s with %s: %s\", archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\n\n if delete:\n TimeSeriesData.objects.filter(\n sensor=self,\n ts__gte=data_start,\n ts__lt=data_end,\n ).delete()\n\n return archived", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def clear_range( # type: ignore\n self, start_range, # type: int\n end_range, # type: int\n timeout=None, # type: Optional[int]\n **kwargs\n ):\n # type: (...) -> Dict[str, Any]\n if self.require_encryption or (self.key_encryption_key is not None):\n raise ValueError(\"Unsupported method for encryption.\")\n\n if start_range is None or start_range % 512 != 0:\n raise ValueError(\"start_range must be an integer that aligns with 512 file size\")\n if end_range is None or end_range % 512 != 511:\n raise ValueError(\"end_range must be an integer that aligns with 512 file size\")\n content_range = 'bytes={0}-{1}'.format(start_range, end_range)\n try:\n return self._client.file.upload_range( # type: ignore\n timeout=timeout,\n cls=return_response_headers,\n content_length=0,\n file_range_write=\"clear\",\n range=content_range,\n **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)", "def readfile(name, outstream, start=0, end=None):", "def add_range(self, start, end) -> bool:\n start = _normalize_datetime(start)\n end = _normalize_datetime(end)\n assert end > start\n\n if self._start_time is None:\n self._start_time = start\n\n if start < self._start_time:\n delta = int((self._start_time - start).total_seconds() / 60)\n self._start_time = start\n self._backing_int = self._backing_int << delta\n\n start_idx = self._datetime_to_index(start)\n end_idx = self._datetime_to_index(end)\n idx_range = end_idx - start_idx\n range_mask = ((1 << (idx_range + 1)) - 1) << start_idx\n\n has_overlap = (self._backing_int & range_mask) > 0\n self._backing_int |= range_mask\n return has_overlap", "def end(self, end):\n if end is None:\n raise ValueError(\"Invalid value for `end`, must not be `None`\") # noqa: E501\n\n self._end = end", "def _write_staight_line(\n self,\n start: Point,\n end: Point,\n emission: float,\n info: EmissionInfo,\n source_group: int,\n section: int, # Section number of the line\n ):\n z_start = info.height\n z_end = info.height\n if info.height_over_buildings:\n z_start += self.grid.building_heights[self.grid.get_index(start.x, start.y)]\n z_end += self.grid.building_heights[self.grid.get_index(end.x, end.y)]\n\n with open(self.file_lines, \"a\") as f:\n # Write the line\n f.write(\n f\"unnamed,{section},{source_group},{start.x},{start.y},{z_start},\"\n f\"{end.x},{end.y},{z_end},{info.width},-{info.vertical_extension},0,0,\"\n f\"{emission},0,0,0,0\\n\"\n )", "def readfile(path, outstream, start=0, end=None):" ]
[ "0.75953966", "0.5372479", "0.5263298", "0.5215831", "0.518739", "0.51277757", "0.4973634", "0.49608546", "0.48990738", "0.48797482", "0.48632792", "0.48159334", "0.47887868", "0.4715249", "0.4707024", "0.46998763", "0.4688589", "0.46282938", "0.4616939", "0.46119153", "0.45464283", "0.4528774", "0.45170906", "0.45138517", "0.45138305", "0.45034188", "0.4502182", "0.44737503", "0.44729927", "0.44651812" ]
0.7525721
1
Factory which creates and returns a Dog model. If file name or rating not given, then a random one is used.
def create_dog(create_random_file_name): def dog_factory(rating=None, file_name=None): file_name = ( create_random_file_name() if file_name is None else file_name ) rating = random.randint(0, 5000) if rating is None else rating return models.Dog.create(file_name=file_name, rating=rating) return dog_factory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory", "def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory", "def create_model(mode: str, path_to_checkpoint = None) -> LightningModule:\n\n assert mode != None and mode != ''\n\n if mode == 'scratch':\n if path_to_checkpoint != None:\n model = DogsBreedClassifier.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifier()\n elif mode == 'densenet':\n if path_to_checkpoint != None:\n model = DogsBreedClassifierDenseNet.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifierDenseNet()\n else:\n if path_to_checkpoint != None:\n model = DogsBreedClassifierEfficientNet.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifierEfficientNet()\n\n return model", "def person_object_factory():\n person = {\n 'lastName': rl_fake().last_name(),\n 'gender': random.choice(('M', 'F'))\n }\n\n # Make the person's name match their gender.\n person['firstName'] = rl_fake().first_name_male() if person['gender'] == 'M' else rl_fake().first_name_female()\n\n # These are all optional in the DB. Over time, we'll try all possibilities.\n if flip():\n person['birthday'] = rl_fake().date_of_birth(minimum_age=18).strftime('%Y-%m-%d')\n if flip():\n person['phone'] = rl_fake().phone_number()\n if flip():\n person['email'] = rl_fake().email()\n return person", "def default_factory(*args, **kwargs):\n obj = RandomGameEntity()\n obj.build(*args, **kwargs)\n return obj", "def create_model(opts):\n # G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.conv_dim)\n # D = DCDiscriminator(conv_dim=opts.conv_dim)\n G = DCGenerator()\n D = DCDiscriminator()\n\n return G, D", "def create_reid_model(name, *args, **kwargs):\r\n if name not in __factory:\r\n raise KeyError(\"Unknown model:\", name)\r\n return __factory[name](*args, **kwargs)", "def create(**kwargs):\n\n generator = AbilityGeneratorFactory()\n abilities = generator.create(method=kwargs.get('generator',\n BEST_OF_THREE), profession=kwargs.get('profession'))\n return Dwarf(abilities, kwargs['st'])", "def model_from_gdsfactory(\n component: Component, dirpath=gf.CONFIG[\"sparameters\"], **kwargs\n) -> Model:\n kwargs.pop(\"function_name\", \"\")\n kwargs.pop(\"module\", \"\")\n component = gf.call_if_func(component, **kwargs)\n pins, f, s = sim.read_sparameters_lumerical(component=component, dirpath=dirpath)\n\n def interpolate_sp(freq):\n return interpolate(freq, f, s)\n\n Model.pin_count = len(pins)\n m = Model()\n m.pins = PinList([Pin(component=m, name=pins[i]) for i, _ in enumerate(pins)])\n m.__setattr__(\"sparams\", (f, s))\n m.s_parameters = interpolate_sp\n m.freq_range = (m.sparams[0][0], m.sparams[0][-1])\n m.wavelengths = speed_of_light / np.array(f)\n m.s = s\n return m", "def generate_food() -> FoodItem:\n presets = random.choice(FOOD_BANK)\n return FoodItem(presets['name'], presets['hp'], presets['msg'])", "def _random_model(self, input_size, output_size, task, config: dict) -> AbstractModel:\n return create_random_model(input_size, output_size, config, task)", "def load(cls, pickle_fp):\n with open(pickle_fp, \"rb\") as fp:\n my_instance = dill.load(fp)\n try:\n model_fp = os.path.splitext(pickle_fp)[0]+\".h5\"\n current_model = load_model(model_fp)\n setattr(my_instance.model, \"model\", current_model)\n except Exception:\n pass\n return my_instance", "def get_random_pet():\n resp = HTTP_request.get(' https://api.petfinder.com/v2/animals',\n params={\n \"limit\": 100,\n },\n headers={\"Authorization\": f\"Bearer {pet_finder_token}\"})\n\n pets = resp.json()[\"animals\"]\n\n random_pet = random.choice(pets)\n\n return {\"name\": random_pet[\"name\"], \"age\": random_pet[\"age\"], \"photo_url\": random_pet[\"photos\"][0][\"medium\"]}", "def create_goat(name):\n weight = random.randint(50, 100)\n insult_damage = 250 - weight\n\n return Goat(name, weight, insult_damage)", "def build_patch_discriminator(self, model_shape, filters=32, k_size=4, drop=False, rate=0.5, summary=False, model_file=None, name='gan_d_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n \"\"\"\n Create a Discriminator Model using hyperparameters values defined as follows\n \"\"\"\n init = RandomNormal(stddev=0.02)\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n c_dims = model_shape[2]\n\n input_shape = (n_rows, n_cols, c_dims)\n input_layer = Input(shape=input_shape, name=name+'input')\n\n d = self.Conv2D_Block(input_layer, filters, k_size=k_size, name=name+'1', bn=False)\n d = self.Conv2D_Block(d, 2*filters, k_size=k_size, name=name+'2')\n d = self.Conv2D_Block(d, 4*filters, k_size=k_size, name=name+'3')\n d = self.Conv2D_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'4')\n d = self.Conv2D_Block(d, 8*filters, strides=1, k_size=k_size, name=name+'5')\n\n if drop:\n d = Dropout(rate=0.5, name=name+'_dropout')(d, training=True)\n logits = Conv2D(1, k_size, strides=1, padding='same', kernel_initializer=init, name=name+'logits')(d)\n out = Activation('sigmoid', name=name+'sigmoid')(logits)\n\n model = Model(inputs=[input_layer], outputs=[out, logits], name='Discriminator_'+name[-3:])\n if (summary):\n model.summary()\n return model", "def factory(self, name):\n\t\tif os.path.isdir(name): return Dir(name)\n\t\telse: return File(name)", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def _get_human_models_file(self):\n\n base_url = 'ftp.flybase.net'\n human_disease_dir = 'releases/current/precomputed_files/human_disease'\n from ftplib import FTP\n ftp = FTP(base_url) # connect to host\n ftp.login()\n ftp.cwd(human_disease_dir)\n l = ftp.nlst() # get list of files\n ftp.quit()\n f = None\n f_list = [\n i for i, x in enumerate(l)\n if re.match(r'allele_human_disease_model', x)]\n if len(f_list) == 0:\n logger.error(\"Can't find the human_disease_model file\")\n elif len(f_list) > 1:\n logger.error(\n \"There's >1 human disease model file, \" +\n \"and I don't know which to choose: %s\", str(l))\n else:\n f = l[f_list[0]]\n\n if f is not None:\n # cat the url together\n file_url = '/'.join(('ftp:/', base_url, human_disease_dir, f))\n self.files['disease_models']['url'] = file_url\n\n # while we're at it, set the version...\n m = re.match(\n r'allele_human_disease_model_data_fb_(\\d+_\\d+).tsv.gz', f)\n # allele_human_disease_model_data_fb_2015_03.tsv.gz\n if m:\n ver = 'FB' + m.group(1)\n self.version_num = ver\n\n return", "def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> FileEvidence:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return FileEvidence()", "def _create_base_model(self, modality):\n\n if modality == \"RGB\":\n in_channels = 3\n elif modality == \"Flow\":\n in_channels = 10\n elif modality == \"Audio\":\n in_channels = 1\n\n model_dir = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n model_dir = os.path.join(model_dir, \"weights\")\n\n is_audio = True if modality == \"Audio\" else False\n\n if \"vgg\" in self.base_model_name:\n base_model = VGG(self.cfg.model.vgg.type, modality, in_channels)\n elif \"resnet\" in self.base_model_name:\n base_model = Resnet(self.cfg.model.resnet.depth, modality, in_channels)\n elif self.base_model_name == \"bninception\":\n pretrained = \"kinetics\" if modality == \"Flow\" else \"imagenet\"\n base_model = bninception(\n in_channels,\n modality,\n model_dir=model_dir,\n pretrained=pretrained,\n is_audio=is_audio,\n attend=self.use_attention,\n )\n\n return base_model", "def from_dict(cls, dikt) -> \"HealthRating\":\n return util.deserialize_model(dikt, cls)", "def __init__(self,\n model: Union[str, io.IOBase, DM, None] = None,\n name: Optional[str] = None,\n database = None):\n super().__init__(model=model, name=name, database=database)", "def _create_macho_dsym_from_uuid(project, cpu_name, uuid, fileobj,\n object_name):\n extra = {}\n if project is None:\n cls = GlobalDSymFile\n file_type = 'global.dsym'\n else:\n cls = ProjectDSymFile\n extra['project'] = project\n file_type = 'project.dsym'\n\n h = hashlib.sha1()\n while 1:\n chunk = fileobj.read(16384)\n if not chunk:\n break\n h.update(chunk)\n checksum = h.hexdigest()\n fileobj.seek(0, 0)\n\n try:\n rv = cls.objects.get(uuid=uuid, **extra)\n if rv.file.checksum == checksum:\n return rv\n except cls.DoesNotExist:\n pass\n else:\n # The checksum mismatches. In this case we delete the old object\n # and perform a re-upload.\n rv.delete()\n\n file = File.objects.create(\n name=uuid,\n type=file_type,\n headers={\n 'Content-Type': 'application/x-mach-binary'\n },\n )\n file.putfile(fileobj)\n try:\n with transaction.atomic():\n return cls.objects.create(\n file=file,\n uuid=uuid,\n cpu_name=cpu_name,\n object_name=object_name,\n **extra\n )\n except IntegrityError:\n file.delete()\n return cls.objects.get(uuid=uuid, **extra)", "def from_dict(cls, dikt) -> 'Galaxy':\n return util.deserialize_model(dikt, cls)", "def create_from_file(\n cls,\n model_file_path: str,\n index_file_path: Optional[str] = None) -> \"ImageSearcher\":\n options = ImageSearcherOptions(\n base_options=_BaseOptions(file_name=model_file_path),\n search_options=_SearchOptions(index_file_name=index_file_path))\n return cls.create_from_options(options)", "def read_json(cls, filename, **kwargs):\n # Open, json load, and close a json file\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n\n # Grab all properties from data-structure\n necessary_args = [\"wildtype\", \"genotypes\", \"phenotypes\"]\n options = {\n \"genotypes\": [],\n \"phenotypes\": [],\n \"wildtype\": [],\n \"stdeviations\": None,\n \"mutations\": None,\n \"n_replicates\": 1,\n }\n # Get all options for map and order them\n for key in options:\n # See if options are in json data\n try:\n options[key] = data[key]\n except KeyError:\n pass\n # Override any properties with manually entered kwargs passed directly\n # into method\n options.update(kwargs)\n args = []\n for arg in necessary_args:\n val = options.pop(arg)\n args.append(val)\n # Create an instance\n gpm = cls(args[0], args[1], args[2], **options)\n return gpm", "def __init__(self,\n model: Union[str, io.IOBase, DM, None] = None,\n name: Optional[str] = None,\n database = None,\n **kwargs):\n assert name is None, 'name is not used by this class'\n assert database is None, 'database is not used by this class'\n super().__init__(model=model, name=name, database=database, **kwargs)", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n\tif split_name not in SPLITS_TO_SIZES and split_name[:-2] not in SPLITS_TO_SIZES:\n\t\traise ValueError('split name %s was not recognized.' % split_name)\n\n\tif not file_pattern:\n\t\tfile_pattern = _FILE_PATTERN\n\tfile_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\t#Allowing None in the signature so that the dataset_factory can use the default\n\tif reader is None:\n\t\treader = tf.TFRecordReader\n\n\tkeys_to_features = {\n\t\t'image/encoded': tf.FixedLenFeature(\n\t\t\t(), tf.string, default_value=''),\n\t\t'image/format': tf.FixedLenFeature(\n\t\t\t(), tf.string, default_value='jpeg'),\n\t\t'image/class/label': tf.FixedLenFeature(\n\t\t\t[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n\t}\n\titems_to_handlers = {\n\t\t'image': slim.tfexample_decoder.Image(),\n\t\t'label': slim.tfexample_decoder.Tensor('image/class/label'),\n\t}\n\n\tdecoder = slim.tfexample_decoder.TFExampleDecoder(\n\t\tkeys_to_features, items_to_handlers)\n\n\tlabels_to_names = None\n\tif dataset_utils.has_labels(dataset_dir):\n\t\tlabels_to_names=dataset_utils.read_label_file(dataset_dir)\n\t\n\tif split_name in SPLITS_TO_SIZES:\n\t\tnum_samples = SPLITS_TO_SIZES[split_name]\n\telif split_name[:-2] in SPLITS_TO_SIZES:\n\t\tnum_samples = SPLITS_TO_SIZES[split_name[:-2]]\n\n\treturn slim.dataset.Dataset(data_sources=file_pattern,\n\t\treader=reader,\n\t\tdecoder=decoder,\n\t\tnum_samples=num_samples,\n\t\titems_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n\t\tnum_classes=_NUM_CLASSES,\n\t\tlabels_to_names=labels_to_names)", "def get_factory(self):\n if self.factory is None:\n this_dir, this_filename = os.path.split(__file__)\n data_path = os.path.join(this_dir, \"RDKitPh4.fdef\")\n self.factory = ChemicalFeatures.BuildFeatureFactory(data_path)\n return self.factory" ]
[ "0.5565132", "0.5565132", "0.5531412", "0.52198267", "0.51935834", "0.5106767", "0.50903696", "0.5042248", "0.5031069", "0.5011618", "0.5001675", "0.499962", "0.4996231", "0.4969871", "0.4945932", "0.49347904", "0.49257568", "0.49164423", "0.48965567", "0.48892954", "0.48567277", "0.48371026", "0.4817609", "0.47941643", "0.47640377", "0.4757948", "0.4752797", "0.47514626", "0.4749779", "0.4736108" ]
0.78184766
0
Factory which returns a random image file name.
def create_random_file_name(): def random_file_name_factory(): length = random.randint(10, 15) chars = string.ascii_letters + string.digits + "-_" return f"{''.join(random.choice(chars) for _ in range(length))}.jpg" return random_file_name_factory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filename_generate(image_class, size=12, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):\n\tnew_filename = time.strftime(\"%d-%m-%Y_\")\n\tnew_filename = new_filename + ''.join(random.choice(chars) for _ in range(size))\n\tnew_filename = new_filename + \"_P\" + str(image_class)\n\treturn new_filename", "def get_a_picture_randomly(self):\n files = os.listdir(self.image_directory)\n if len(files) == 0:\n return None\n full_image_name = os.path.abspath(self.image_directory + random.choice(files))\n return full_image_name", "def random_filename():\n\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))", "def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)", "def random_image():\n img_dir = \"./static\"\n img_list = os.listdir(img_dir)\n img_path = os.path.join(img_dir, random.choice(img_list))\n return img_path", "async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()", "def random_filename_upload_to(path):\n\n def f(instance, filename):\n ext = filename.split('.')[-1]\n filename = '{0}.{1}'.format(uuid.uuid4().hex, ext)\n return os.path.join(path, filename)\n\n return f", "def getImagePath(self)->str:\n\n returnStr = '../../../../assets/image/{}.png'.format(randint(1,15))\n return returnStr", "def get_generated_image_name(full_image_url):\r\n\r\n logging.debug('get_generated_image_name({})'.format(full_image_url))\r\n\r\n image_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\n image_extension = full_image_url.split(\".\")[-1]\r\n image_name = image_name + \".\" + image_extension\r\n logging.debug('get_generated_image_name - image_name = {}'.format(image_name))\r\n return image_name", "def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)", "def get_rand_img():\n import urllib\n import os\n import glob\n\n pics = glob.glob('/home/cody_techngs/PycharmProjects/ProjTest/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/rand*')\n nums = []\n\n for pic in pics:\n nums.append(int(pic.split('rand_img')[1].split('.')[0]))\n\n unique_num = False\n new_rand_num = 0\n\n while not unique_num:\n new_rand_num = random.randrange(1, 2000)\n if new_rand_num not in nums:\n unique_num = True\n\n img_name = 'rand_img{}.jpg'.format(new_rand_num)\n dl_location = os.getcwd() + '/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/' + img_name\n url = 'https://unsplash.it/400/300/?random'\n urllib.urlretrieve(url, dl_location)\n\n return 'static/images/HITs/{}'.format(img_name)", "def generate_file_name(self):\n self._session_iterator = None # New file invalidate old interator\n self._img_count += 1\n self._current_file = '{0}/frame_{1}.jpg'.format(self._relative_path,self._img_count)\n return self.current_file", "def make_img_name(file_ext='.png'):\r\n fn = []\r\n # format seqs and write out to temp file\r\n for i in range(0, 30):\r\n fn.append(choice(ALPHABET))\r\n return ''.join(fn) + file_ext", "def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')", "def get_random_file():\n\n return random.choice(File.get_files())", "def get_image_path(instance, filename):\n filename, file_extension = path.splitext(filename)\n return path.join(str(uuid4()) + file_extension)", "def giverandomfilename(self,user,postfix=\"\"):\n return \"%s_%s_%s\" % (user.username.encode(\"ascii\",\"ignore\"),\n str(randint(10000,99999)),\n \"testfile%s.txt\" % postfix)", "def generate_image_name(self, image):\n return image.replace('shub://', '').replace('/', '-') + '.simg'", "def _get_image_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_image_name = f\"{dirname}_{self.config_name}\"\n image_name = self.config_options.get(\"image\", default_image_name)\n return image_name", "def append_random_number_to_filename(self, local_img_file):\n date = datetime.datetime.now()\n date_string = date.strftime(\"%m-%d-%Y\")\n return \"%s-glitched.%s\" % (local_img_file.split(\".\")[0], local_img_file.split(\".\")[1])", "def test_get_image_name(self):\n ssp = self._get_ssp_stor()\n\n def verify_image_name(name, checksum, expected):\n img_meta = image_meta.ImageMeta(name=name, checksum=checksum)\n self.assertEqual(expected, ssp._get_image_name(img_meta))\n self.assertTrue(len(expected) <= const.MaxLen.FILENAME_DEFAULT)\n\n verify_image_name('foo', 'bar', 'image_foo_bar')\n # Ensure a really long name gets truncated properly. Note also '-'\n # chars are sanitized.\n verify_image_name(\n 'Template_zw82enbix_PowerVM-CI-18y2385y9123785192364',\n 'b518a8ba2b152b5607aceb5703fac072',\n 'image_Template_zw82enbix_PowerVM_CI_18y2385y91'\n '_b518a8ba2b152b5607aceb5703fac072')", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def _get_random_name(self, base_name):\n return base_name + '_' + self.__id_generator()", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def get_file_name(instance, filename):\n filename = make_unique_filename(filename)\n return os.path.join('uploads/profile_pics', filename)", "def generate_random_media_filepath(extension: str):\n\tfilename = f'{_generate_random_string(30)}{extension}'\n\treturn os.path.join(get_media_directory(), filename)", "def generate_random_name(filename):\n ext = filename.split('.')[-1]\n rns = [random.randint(0, len(LETTER_SET) - 1) for _ in range(3)]\n name = ''.join([LETTER_SET[rn] for rn in rns])\n return \"{new_fn}.{ext}\".format(new_fn=name, ext=ext)", "def generate_rand_name() -> str:\n suf = \"\".join(random.choices(string.ascii_uppercase + string.digits, k=6))\n return f\"exporters_{suf}\"", "def user_random_avatar():\n avatar_names = os.listdir(PATH)\n avatar_path = random.choice([avatar_image for avatar_image in avatar_names\n if os.path.isfile(os.path.join(PATH,avatar_image))])\n return PATH_RELATIVE+avatar_path", "def acquire_image_filename(self):\n\t\titem_key = self.contents_data[ITEM_KEY]\n\t\tif not item_key: return None\n\t\treturn \"portrait_\" + item_key + \".bmp\"" ]
[ "0.7617596", "0.71837944", "0.7169918", "0.7101182", "0.70534843", "0.698676", "0.6965958", "0.68606925", "0.68600625", "0.6857476", "0.67635787", "0.6722866", "0.6721171", "0.6692241", "0.66776305", "0.66626406", "0.66584057", "0.66395634", "0.66275424", "0.66211987", "0.6620777", "0.6585431", "0.65827733", "0.6575323", "0.6571974", "0.65718627", "0.65644217", "0.6544569", "0.6543904", "0.65313256" ]
0.8460935
0
return path of best checkpoint in a model_dir
def best_checkpoint(model_dir, model_name): filenames = glob.glob(os.path.join(model_dir,model_name+".*")) best = 0 best_ckpt = "" for fname in filenames: tmp = float(fname.replace(os.path.join(model_dir,model_name+"."),"")) if best < tmp: best = tmp best_ckpt = fname return best_ckpt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_model(model_chkp_dir, mode='last'):\n\n if mode == 'last':\n file_name = sorted(os.listdir(model_chkp_dir))[-1]\n model_path = os.path.join(model_chkp_dir, file_name)\n\n elif mode == 'best':\n raise NotImplementedError\n\n return model_path", "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)", "def find_last(self):\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n f\"Could not find model directory under {self.model_dir}\")\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"OOD\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, f\"Could not find weight files in {dir_name}\")\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint", "def get_saved_model_path(training_ckpt_base):\n ckpt_dir = os.path.dirname(training_ckpt_base)\n # If using a checkpoint from the best_exporter, return its saved_model.\n if os.path.basename(ckpt_dir) == 'variables':\n return os.path.join(\n os.path.dirname(ckpt_dir),\n tf.saved_model.constants.SAVED_MODEL_FILENAME_PB)\n # If using a training checkpoint, still return the eval saved_model.\n else:\n saved_models_dir = os.path.join(ckpt_dir, 'export', 'best_exporter')\n saved_model_paths = tf.gfile.Glob(os.path.join(saved_models_dir, '*'))\n if saved_model_paths:\n return os.path.join(saved_model_paths[0],\n tf.saved_model.constants.SAVED_MODEL_FILENAME_PB)\n # Otherwise, there is not eval saved_model.\n else:\n return None", "def get_latest_saved_model(model_dir):\n saved_models = os.path.join(model_dir, 'best_models')\n saved_chkp = sorted([int(mdl) for mdl in os.listdir(saved_models)])\n latest = saved_chkp[-1]\n path = os.path.join(saved_models, '%d' % latest)\n\n # Next, find the full path to the saved model\n mdl_time = os.listdir(path)\n\n # Return the final path\n return os.path.join(path, mdl_time[-1])", "def find_checkpoint(load_dir, seen_step):\n ckpt = tf.train.get_checkpoint_state(load_dir)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = extract_step(ckpt.model_checkpoint_path)\n if int(global_step) != seen_step:\n return int(global_step), ckpt.model_checkpoint_path\n return -1, None", "def find_last(self):\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"FCN_DenseNet\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint", "def get_best_model_path(fold: int) -> str:\n def parse_accuracy(filename: str) -> float:\n m = re.search(r\"__fold_\\d+_val_([01]\\.\\d+)\", filename)\n assert(m)\n return float(m.group(1))\n\n models = list(glob.glob(\"../models/*__fold_%d_val_*.hdf5\" % fold))\n accuracy = list(map(parse_accuracy, models))\n best = accuracy.index(max(accuracy))\n\n print(\"fold=%d best_model=%s\" % (fold, models[best]))\n return models[best]", "def poll_checkpoint_folder(\n checkpoint_folder: str, previous_ckpt_ind: int\n) -> Optional[str]:\n assert os.path.isdir(checkpoint_folder), (\n f\"invalid checkpoint folder \" f\"path {checkpoint_folder}\"\n )\n models_paths = list(\n filter(os.path.isfile, glob.glob(checkpoint_folder + \"/*\"))\n )\n models_paths.sort(key=os.path.getmtime)\n ind = previous_ckpt_ind + 1\n if ind < len(models_paths):\n return models_paths[ind]\n return None", "def get_best_known_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='total_score', desc=False)", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def _checkpoint_dir(job_log_dir: str) -> str:\n return os.path.join(job_log_dir, 'checkpoints')", "def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)", "def get_checkpoint():\n if ((FLAGS.run_dir or FLAGS.checkpoint_file) and\n FLAGS.bundle_file and not should_save_generator_bundle()):\n raise sequence_generator.SequenceGeneratorException(\n 'Cannot specify both bundle_file and run_dir or checkpoint_file')\n if FLAGS.run_dir:\n train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')\n return train_dir\n elif FLAGS.checkpoint_file:\n return os.path.expanduser(FLAGS.checkpoint_file)\n else:\n return None", "def get_latest_checkpoint_path(dirpath: str) -> Optional[str]:\n\n ret = None\n rank = get_global_rank()\n # Do all filesystem reads from rank 0 only\n if rank == 0:\n ret = _latest_checkpoint_path(dirpath)\n\n # If not running in a distributed setting, return as is\n if not (dist.is_available() and dist.is_initialized()):\n return ret\n\n # Otherwise, broadcast result from rank 0 to all ranks\n pg = PGWrapper(dist.group.WORLD)\n path_container = [ret] if rank == 0 else [None]\n pg.broadcast_object_list(path_container, 0)\n val = path_container[0]\n return val", "def latest_checkpoint(model_dir, model_name):\n ckpt_info_path = Path(model_dir) / \"checkpoints.json\"\n if not ckpt_info_path.is_file():\n return None\n with open(ckpt_info_path, 'r') as f:\n ckpt_dict = json.loads(f.read())\n if model_name not in ckpt_dict['latest_ckpt']:\n return None\n latest_ckpt = ckpt_dict['latest_ckpt'][model_name]\n ckpt_file_name = Path(model_dir) / latest_ckpt\n if not ckpt_file_name.is_file():\n return None\n\n return str(ckpt_file_name)", "def _get_checkpoint(self):\n ckpt = tf.train.get_checkpoint_state(self.model)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = ckpt.model_checkpoint_path\n else:\n raise RuntimeError('No checkpoint file found')\n return ckpt_path", "def test_model_checkpoint_path(tmpdir, logger_version, expected):\n tutils.reset_seed()\n model = EvalModelTemplate()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n overfit_pct=0.2,\n max_epochs=5,\n logger=logger\n )\n trainer.fit(model)\n\n ckpt_version = Path(trainer.ckpt_path).parent.name\n assert ckpt_version == expected", "def get_best_model_file_save_path(self):\n \n if self.best_model_file_saved_at_least_once:\n \n return self.absolute_model_file_save_path\n \n # create the base name path if not exists\n \n absolute_dirname = os.path.dirname(self.absolute_model_file_save_path)\n\n if not os.path.exists(absolute_dirname):\n\n os.makedirs(absolute_dirname)\n \n # update the model with respective path\n \n self.sql_model_instance.model_path = self.relative_model_file_save_path\n \n self.db.session.add(self.sql_model_instance)\n self.db.session.commit()\n \n # change the variable state to True\n \n self.best_model_file_saved_at_least_once = True\n \n return self.absolute_model_file_save_path", "def output_dir(self):\n return os.path.join(self.checkpoint_dir, self.model_dir)", "def get_most_recent_checkpoint(model_folder):\n checkpoints = [a.stem for a in model_folder.glob(\"*.index\")]\n latest_checkpoint = sorted(checkpoints, key=lambda x: -int(x.split('-')[-1]))[0]\n return latest_checkpoint", "def latest_savedmodel_path_from_base_path(base_path):\n\n protein_export_base_path = os.path.join(base_path, 'export/protein_exporter')\n\n suffixes = [\n x for x in tf.io.gfile.listdir(protein_export_base_path)\n if 'temp-' not in x\n ]\n\n if not suffixes:\n raise ValueError('No SavedModels found in %s' % protein_export_base_path)\n\n # Sort by suffix to take the model corresponding the most\n # recent training step.\n return os.path.join(protein_export_base_path, sorted(suffixes)[-1])", "def get_checkpoint_path(self) -> str:\n return self._j_checkpoint_storage.getCheckpointPath().toString()", "def load_checkpoint_train(cpdir, model, optimizer):\n start_epoch = 0\n start_global_step = 0\n if cpdir is not None:\n start_global_step, start_epoch = load_checkpoint(\n cpdir, model, optimizer)\n start_global_step += 1\n start_epoch += 1\n return start_global_step, start_epoch", "def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))", "def get_last_saved_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='step', desc=True)", "def get_checkpoint_path(self, checkpoint_id, name, path=''):\n\t\tself.log.debug(\"getting checkpoint path %s, %s\", name, path)\n\t\tbasename, _ = os.path.splitext(name)\n\t\tfilename = u\"{name}-{checkpoint_id}{ext}\".format(\n\t\t\tname=basename,\n\t\t\tcheckpoint_id=checkpoint_id,\n\t\t\text=self.filename_ext,\n\t\t)\n\n\t\t# Checkpoints are stored in relative directories\n\t\t# e.g. given:\n\t\t# folder1/notebook1.py\n\t\t# ... the checkpoint is at ...\n\t\t# folder1/<checkpoint directory>/notebook1.py\n\t\tbase_path = os.path.join(path, self.checkpoint_dir)\t\t \n\t\tfull_path = self._get_os_path( filename, path=base_path)\n\t\tself.log.debug(\"checkpoint path + filename %s\" % full_path)\n\t\treturn full_path", "def get_model_dir(experiment_dir, model_id):\r\n model_dirs = glob.glob(os.path.join(experiment_dir, str(model_id) + \"-*\"), recursive=False)\r\n return None if len(model_dirs) == 0 else model_dirs[0]", "def get_model_path(directory):\n\n path = directory + \"/model-0.h5\"\n\n # Model name\n models = [f for f in os.listdir(directory) if f.endswith(\"h5\")]\n\n if len(models) > 0:\n # get greater version\n max_v = max([m.split(\"-\")[1] for m in models])\n m = [model for model in models if model.endswith(max_v)][0]\n path = directory + \"/\" + m\n\n return path", "def _save_model(self, checkpoint_dir):\n # Check whether the specified path exists or not\n isExist = os.path.exists(checkpoint_dir)\n\n if not isExist:\n # Create a new directory because it does not exist\n os.makedirs(checkpoint_dir)\n\n filename = self._get_checkpoint_name()\n path = checkpoint_dir + filename\n\n # Serialize the model checkpoint in to a Python Pickle file\n with open(path, 'wb') as f:\n pickle.dump(self._model, f)\n return path" ]
[ "0.77047443", "0.7275439", "0.7243931", "0.7167202", "0.7096826", "0.70633096", "0.70490897", "0.7047686", "0.6949422", "0.69273454", "0.69082046", "0.6876256", "0.6825796", "0.6814628", "0.6799923", "0.67915136", "0.67452896", "0.6720997", "0.6715767", "0.6666559", "0.6664646", "0.6615617", "0.6465116", "0.6429091", "0.6421637", "0.64079577", "0.63999915", "0.63659054", "0.6365554", "0.63595706" ]
0.7602403
1
return path of latest checkpoint in a model_dir
def latest_checkpoint(model_dir, model_name): ckpt_info_path = Path(model_dir) / "checkpoints.json" if not ckpt_info_path.is_file(): return None with open(ckpt_info_path, 'r') as f: ckpt_dict = json.loads(f.read()) if model_name not in ckpt_dict['latest_ckpt']: return None latest_ckpt = ckpt_dict['latest_ckpt'][model_name] ckpt_file_name = Path(model_dir) / latest_ckpt if not ckpt_file_name.is_file(): return None return str(ckpt_file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)", "def get_latest_saved_model(model_dir):\n saved_models = os.path.join(model_dir, 'best_models')\n saved_chkp = sorted([int(mdl) for mdl in os.listdir(saved_models)])\n latest = saved_chkp[-1]\n path = os.path.join(saved_models, '%d' % latest)\n\n # Next, find the full path to the saved model\n mdl_time = os.listdir(path)\n\n # Return the final path\n return os.path.join(path, mdl_time[-1])", "def find_last(self):\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n f\"Could not find model directory under {self.model_dir}\")\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"OOD\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, f\"Could not find weight files in {dir_name}\")\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint", "def get_most_recent_checkpoint(model_folder):\n checkpoints = [a.stem for a in model_folder.glob(\"*.index\")]\n latest_checkpoint = sorted(checkpoints, key=lambda x: -int(x.split('-')[-1]))[0]\n return latest_checkpoint", "def find_last(self):\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"FCN_DenseNet\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint", "def get_latest_checkpoint_path(dirpath: str) -> Optional[str]:\n\n ret = None\n rank = get_global_rank()\n # Do all filesystem reads from rank 0 only\n if rank == 0:\n ret = _latest_checkpoint_path(dirpath)\n\n # If not running in a distributed setting, return as is\n if not (dist.is_available() and dist.is_initialized()):\n return ret\n\n # Otherwise, broadcast result from rank 0 to all ranks\n pg = PGWrapper(dist.group.WORLD)\n path_container = [ret] if rank == 0 else [None]\n pg.broadcast_object_list(path_container, 0)\n val = path_container[0]\n return val", "def get_latest_checkpoint(cls, experiment_path):\n checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)\n all_times = sorted(os.listdir(checkpoints_path), reverse=True)\n return os.path.join(checkpoints_path, all_times[0])", "def get_latest_checkpoint(ckpt_dir: str) -> Optional[str]:\n list_of_files = glob.glob(\"{}/*.ckpt\".format(ckpt_dir))\n latest_checkpoint = None\n if list_of_files:\n latest_checkpoint = max(list_of_files, key=os.path.getctime)\n return latest_checkpoint", "def poll_checkpoint_folder(\n checkpoint_folder: str, previous_ckpt_ind: int\n) -> Optional[str]:\n assert os.path.isdir(checkpoint_folder), (\n f\"invalid checkpoint folder \" f\"path {checkpoint_folder}\"\n )\n models_paths = list(\n filter(os.path.isfile, glob.glob(checkpoint_folder + \"/*\"))\n )\n models_paths.sort(key=os.path.getmtime)\n ind = previous_ckpt_ind + 1\n if ind < len(models_paths):\n return models_paths[ind]\n return None", "def _get_checkpoint(self):\n ckpt = tf.train.get_checkpoint_state(self.model)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = ckpt.model_checkpoint_path\n else:\n raise RuntimeError('No checkpoint file found')\n return ckpt_path", "def _checkpoint_dir(job_log_dir: str) -> str:\n return os.path.join(job_log_dir, 'checkpoints')", "def get_last_saved_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='step', desc=True)", "def _find_model(model_chkp_dir, mode='last'):\n\n if mode == 'last':\n file_name = sorted(os.listdir(model_chkp_dir))[-1]\n model_path = os.path.join(model_chkp_dir, file_name)\n\n elif mode == 'best':\n raise NotImplementedError\n\n return model_path", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def fetch_last_model_file(self):\n try:\n filename = self.model_files[-1]\n return self.make_path(filename)\n except IndexError:\n return None", "def get_oldest_checkpoint(cls, experiment_path):\n checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)\n all_times = sorted(os.listdir(checkpoints_path))\n return os.path.join(checkpoints_path, all_times[0])", "def latest_savedmodel_path_from_base_path(base_path):\n\n protein_export_base_path = os.path.join(base_path, 'export/protein_exporter')\n\n suffixes = [\n x for x in tf.io.gfile.listdir(protein_export_base_path)\n if 'temp-' not in x\n ]\n\n if not suffixes:\n raise ValueError('No SavedModels found in %s' % protein_export_base_path)\n\n # Sort by suffix to take the model corresponding the most\n # recent training step.\n return os.path.join(protein_export_base_path, sorted(suffixes)[-1])", "def test_model_checkpoint_path(tmpdir, logger_version, expected):\n tutils.reset_seed()\n model = EvalModelTemplate()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n overfit_pct=0.2,\n max_epochs=5,\n logger=logger\n )\n trainer.fit(model)\n\n ckpt_version = Path(trainer.ckpt_path).parent.name\n assert ckpt_version == expected", "def get_checkpoint_path(self) -> str:\n return self._j_checkpoint_storage.getCheckpointPath().toString()", "def write_checkpoint(self, session):\n base_save_path = self.params.cp_save_dir+self.params.model_name+\"_v\"+self.params.version\n full_save_path = self.full_saver.save(session,\n save_path=base_save_path,\n global_step=self.global_step,\n latest_filename=self.params.cp_latest_filename)\n self.logger.log_info(\"Full model saved in file %s\"%full_save_path)\n return base_save_path", "def find_checkpoint(load_dir, seen_step):\n ckpt = tf.train.get_checkpoint_state(load_dir)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = extract_step(ckpt.model_checkpoint_path)\n if int(global_step) != seen_step:\n return int(global_step), ckpt.model_checkpoint_path\n return -1, None", "def get_latest_path(self):\n files = [fname for fname in os.listdir(self.checkpoint_dir) if fname.endswith(\".pth\")]\n filepaths = [os.path.join(self.checkpoint_dir, filepath) for filepath in files]\n latest_file = max(filepaths, key=os.path.getctime)\n return latest_file", "def best_checkpoint(model_dir, model_name):\n filenames = glob.glob(os.path.join(model_dir,model_name+\".*\"))\n best = 0\n best_ckpt = \"\"\n for fname in filenames:\n tmp = float(fname.replace(os.path.join(model_dir,model_name+\".\"),\"\"))\n if best < tmp:\n best = tmp\n best_ckpt = fname\n return best_ckpt", "def get_checkpoint_path(self) -> Optional[str]:\n j_path = self._j_checkpoint_storage.getCheckpointPath()\n if j_path is None:\n return None\n else:\n return j_path.toString()", "def parse_checkpoint_dir(checkpoint_dir):\n paths = []\n subdirectories = tf.io.gfile.glob(os.path.join(checkpoint_dir, '*'))\n is_checkpoint = lambda f: ('checkpoint' in f and '.index' in f)\n for subdir in subdirectories:\n for path, _, files in tf.io.gfile.walk(subdir):\n if any(f for f in files if is_checkpoint(f)):\n latest_checkpoint_without_suffix = tf.train.latest_checkpoint(path)\n paths.append(os.path.join(path, latest_checkpoint_without_suffix))\n break\n return paths", "def get_saved_model_path(training_ckpt_base):\n ckpt_dir = os.path.dirname(training_ckpt_base)\n # If using a checkpoint from the best_exporter, return its saved_model.\n if os.path.basename(ckpt_dir) == 'variables':\n return os.path.join(\n os.path.dirname(ckpt_dir),\n tf.saved_model.constants.SAVED_MODEL_FILENAME_PB)\n # If using a training checkpoint, still return the eval saved_model.\n else:\n saved_models_dir = os.path.join(ckpt_dir, 'export', 'best_exporter')\n saved_model_paths = tf.gfile.Glob(os.path.join(saved_models_dir, '*'))\n if saved_model_paths:\n return os.path.join(saved_model_paths[0],\n tf.saved_model.constants.SAVED_MODEL_FILENAME_PB)\n # Otherwise, there is not eval saved_model.\n else:\n return None", "def get_checkpoint_path(self, checkpoint_id, name, path=''):\n\t\tself.log.debug(\"getting checkpoint path %s, %s\", name, path)\n\t\tbasename, _ = os.path.splitext(name)\n\t\tfilename = u\"{name}-{checkpoint_id}{ext}\".format(\n\t\t\tname=basename,\n\t\t\tcheckpoint_id=checkpoint_id,\n\t\t\text=self.filename_ext,\n\t\t)\n\n\t\t# Checkpoints are stored in relative directories\n\t\t# e.g. given:\n\t\t# folder1/notebook1.py\n\t\t# ... the checkpoint is at ...\n\t\t# folder1/<checkpoint directory>/notebook1.py\n\t\tbase_path = os.path.join(path, self.checkpoint_dir)\t\t \n\t\tfull_path = self._get_os_path( filename, path=base_path)\n\t\tself.log.debug(\"checkpoint path + filename %s\" % full_path)\n\t\treturn full_path", "def get_history_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, histories.DEFAULT_FILENAME_HISTORY)", "def output_dir(self):\n return os.path.join(self.checkpoint_dir, self.model_dir)", "def _get_checkpoint_name(self):\n dataset_basename = Path(self._train_dataset_path).resolve().stem\n filename = \"model_cpt_{}.pkl\".format(dataset_basename)\n return filename" ]
[ "0.77078223", "0.753826", "0.7491004", "0.7485388", "0.7307086", "0.7223708", "0.7215126", "0.7205785", "0.7110889", "0.70348877", "0.7023044", "0.7001256", "0.6954298", "0.69517803", "0.69449365", "0.6940163", "0.6933478", "0.6877685", "0.6873649", "0.6860138", "0.6805028", "0.6784692", "0.65825576", "0.65489674", "0.6529379", "0.6516671", "0.6481989", "0.64673406", "0.645477", "0.6416012" ]
0.77784103
0
Get stage folder from stage number
def stage_folder(stage_no): name = "stage.{}".format(stage_no) folder = path.join(output_path(), name) return ensure_path(folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scene_folder():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.get_scene_folder\")\n\n file_name = mc.file(q=True, sceneName=True)\n\n head, tail = os.path.split(file_name)\n\n flg.info(\"Scene fileName: {}\".format(tail))\n flg.info(\"Scene directory: {}\".format(head))\n\n return head", "def get_stage():\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"r\")\n stage = f.readline().strip()\n f.close()\n logger.debug(\"get stage: %s\" % (stage))\n return stage\n except:\n return reset_stage()", "def stage_image(filename, stage, index=None):\n name, ext = path.splitext(filename)\n if ext.lower() in VALID_DATA:\n filename = name\n # end if\n if index is not None:\n filename = \"{:02}.{}\".format(index, filename)\n # end if\n return path.join(stage_folder(stage), filename)", "def shpname(self):\n _, tail = os.path.split(self.url)\n return self.folder + ('/' + tail[:-4]) * 2", "def stage_url_for(stage):\n return '{base}/stages/{stage}'.format(\n base=job_url_for(stage.job),\n stage=stage.slug,\n )", "def stage_name(stages_dir):\r\n\r\n print(\"\\nLEVELS AVAILABLE:\"\r\n \"\\n\")\r\n stages_dir = os.path.expanduser(stages_dir)\r\n os.chdir(stages_dir)\r\n stage_lst = next(os.walk('.'))[1]\r\n os.chdir(config_writer.tool_path)\r\n\r\n for name in stage_lst:\r\n print(name)\r\n while True:\r\n stg_nm = input(\"\\nEnter stage name: \")\r\n if stg_nm not in stage_lst:\r\n print(\"\\nStage name not available! Try again.\")\r\n else:\r\n break\r\n\r\n return stg_nm", "def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)", "def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")", "def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")", "def stage_data(filename, stage, index=None):\n name, ext = path.splitext(filename)\n if ext.lower() not in VALID_DATA:\n filename += VALID_DATA[0]\n # end if\n if index is not None:\n filename = \"{:02}.{}\".format(index, filename)\n # end if\n return path.join(stage_folder(stage), filename)", "def get_last_stage_directory(\n last_stage_version: Union[str, Path],\n last_stage_directory: Union[str, Path] = None,\n last_stage_root: Path = None,\n) -> Path:\n if last_stage_directory:\n warn(\n f'Usage of the \"last_stage_directory\" argument is deprecated. Please use \"last_stage_version instead.',\n Warning,\n )\n last_stage_directory = (\n last_stage_directory if last_stage_directory is not None else last_stage_version\n )\n # If last_stage_directory is an absolute path, the last_stage_root will be ignored here\n last_stage_directory = (\n last_stage_root / last_stage_directory\n if last_stage_root is not None\n else last_stage_directory\n )\n if not last_stage_directory.is_absolute():\n raise ValueError(f\"Invalid version path: {last_stage_directory}\")\n return last_stage_directory", "def stage_name(self) -> str:\n return self._values.get(\"stage_name\")", "def folder(self, step=None):\n if step is None:\n return self._obs_group_folder / self._obs_folder\n else:\n return Path(step) / self._obs_group_folder / self._obs_folder", "def folder(self, step=None):\n if step is None:\n return self._obs_group_folder / self._obs_folder\n else:\n return Path(step) / self._obs_group_folder / self._obs_folder", "def folder(self, step=None):\n if step is None:\n return self._obs_group_folder / self._obs_folder\n else:\n return Path(step) / self._obs_group_folder / self._obs_folder", "def get_filename_from_stage(stage: str, device: TorchDevice) ->str:\n if stage not in [PREPROCESSOR, PREDICTOR, POSTPROCESSOR]:\n raise ValueError(f'Invalid stage: {stage}.')\n if stage == PREDICTOR:\n return f'inference_{stage}-{device}.pt'\n else:\n return f'inference_{stage}.pt'", "def stage_name(self) -> str:\n return self._stage_name", "def get_live_stack(self, domain_name):\n response = self.api_client.get_base_path_mappings(\n domainName=domain_name,\n )\n\n live_stage = None\n for item in response[\"items\"]:\n if item[\"basePath\"] == self.live_base_path:\n live_stage = item[\"stage\"]\n break\n\n if live_stage not in self.aliases:\n print \"[FAIL] Stage name not supported must be one of %s\" % str(self.aliases)\n sys.exit(1)\n\n print \"[INFO] Current Live Colour %s\" % live_stage\n return live_stage", "def next_stage(current_stage, upgrade_stage):\n if upgrade_stage is not None:\n if upgrade_stage is \"current\":\n return current_stage\n if upgrade_stage is \"fork\":\n return Stage.fork\n if upgrade_stage is \"base\":\n return Stage.base\n if upgrade_stage is \"crio\":\n return Stage.crio\n\n if current_stage == Stage.bare:\n return Stage.base\n elif current_stage == Stage.base:\n return Stage.build\n elif current_stage == Stage.build:\n return Stage.install\n elif current_stage == Stage.install:\n echo('Warning: No next stage exists past the \"{}\" stage. Overwriting current stage instead.'.format(Stage.install))\n return Stage.install\n else:\n raise ClickException('The current stage of the VM, \"{}\", has no next stage specified.'.format(current_stage))", "def get_label_db_path(self, stage):\n db = None\n if stage == constants.TRAIN_DB:\n s = 'Training'\n elif stage == constants.VAL_DB:\n s = 'Validation'\n else:\n return None\n for task in self.tasks:\n if task.purpose == '%s Labels' % s:\n db = task\n return self.path(db.database) if db else None", "def get_sls_config_file(path, stage, region):\n for name in gen_sls_config_files(stage, region):\n if os.path.isfile(os.path.join(path, name)):\n return name\n return \"config-%s.json\" % stage # fallback to generic json name", "def get_stage(stage_string):\n stage_choices = {\n 'PUT_START' : MigrationRequest.PUT_START,\n 'PUT_BUILDING' : MigrationRequest.PUT_BUILDING,\n 'PUT_PENDING' : MigrationRequest.PUT_PENDING,\n 'PUT_PACKING' : MigrationRequest.PUT_PACKING,\n 'PUTTING' : MigrationRequest.PUTTING,\n 'VERIFY_PENDING' : MigrationRequest.VERIFY_PENDING,\n 'VERIFY_GETTING' : MigrationRequest.VERIFY_GETTING,\n 'VERIFYING' : MigrationRequest.VERIFYING,\n 'PUT_TIDY' : MigrationRequest.PUT_TIDY,\n 'PUT_COMPLETED' : MigrationRequest.PUT_COMPLETED,\n\n 'GET_START' : MigrationRequest.GET_START,\n 'GET_PENDING' : MigrationRequest.GET_PENDING,\n 'GETTING' : MigrationRequest.GETTING,\n 'GET_UNPACKING' : MigrationRequest.GET_UNPACKING,\n 'GET_RESTORE' : MigrationRequest.GET_RESTORE,\n 'GET_TIDY' : MigrationRequest.GET_TIDY,\n 'GET_COMPLETED' : MigrationRequest.GET_COMPLETED,\n\n 'DELETE_START' : MigrationRequest.DELETE_START,\n 'DELETE_PENDING' : MigrationRequest.DELETE_PENDING,\n 'DELETING' : MigrationRequest.DELETING,\n 'DELETE_TIDY' : MigrationRequest.DELETE_TIDY,\n 'DELETE_COMPLETED' : MigrationRequest.DELETE_COMPLETED,\n\n 'FAILED' : MigrationRequest.FAILED,\n 'FAILED_COMPLETED' : MigrationRequest.FAILED_COMPLETED\n }\n return(stage_choices[stage_string])", "def _get_subdir(self, dmc, year, trigger, pt):\n full_trigger = get_full_trigger(trigger)\n return '/'.join([dmc, str(year), full_trigger, str(pt)])", "def bin_root(self):\n return os.path.join(self.build_dir, self.build, \"stage0\")", "def get_project_path(window: 'Any') -> 'Optional[str]':\n if not window:\n return None\n num_folders = len(window.folders())\n if num_folders == 0:\n return get_directory_name(window.active_view())\n elif num_folders == 1:\n folder_paths = window.folders()\n return folder_paths[0]\n else: # num_folders > 1\n return find_path_among_multi_folders(\n window.folders(),\n window.active_view())", "def folder_runnum():\n now = datetime.datetime.now()\n runnum = 1\n while True:\n folder_name = f\"astroNN_{now.month:0{2}d}{now.day:0{2}d}_run{runnum:0{3}d}\"\n if not os.path.exists(folder_name):\n break\n else:\n runnum += 1\n\n return folder_name", "def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"", "def video_directory_path(instance, filename):\n return 'gallery/video/{0}/{1}'.format(instance.video_name, filename)", "def folder(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"folder\")", "def dirname(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4]" ]
[ "0.6684712", "0.654478", "0.64453286", "0.61957896", "0.6165433", "0.6139131", "0.6111561", "0.59132767", "0.59132767", "0.5840295", "0.5827037", "0.579603", "0.57570565", "0.57570565", "0.57570565", "0.5732925", "0.5711266", "0.5544683", "0.5538145", "0.5508859", "0.54853666", "0.5479354", "0.54166675", "0.5414204", "0.5405906", "0.5402739", "0.536238", "0.53572893", "0.5346172", "0.53394985" ]
0.80924505
0
Rounds to nearest integer and clears outofboundary values. Intensity boundary is [0, 255].
def normalize(img): tol = 355 maxi = np.max(img) if maxi > tol: img = 255 * (img - (tol - 255)) / maxi # end if norm = np.round(img) norm[norm < 0] = 0 norm[norm > 255] = 255 return norm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clip_round(value):\n return max(0, min(np.round(value/64), 1023))", "def round_and_clip_image(image):\n \n for idx, pixel in enumerate(image[\"pixels\"]):\n if round(pixel) < 0 :\n image[\"pixels\"][idx] = 0\n elif round(pixel) > 255 :\n image[\"pixels\"][idx] = 255\n else:\n image[\"pixels\"][idx] = round(pixel)\n return image", "def iround(x):\n return int(round(x) - .5) + (x > 0)", "def iround(x):\n return ipart(x + 0.5)", "def round_and_clip_image(image):\n #initialize an image of all zeros\n new_image = {'height': image['height'], 'width': image['width'], 'pixels': image['pixels'].copy()}\n len_pixels = len(new_image['pixels'])\n #perform calculations to round and clip image\n for i in range(len_pixels):\n new_image['pixels'][i] = int(round(new_image['pixels'][i]))\n if new_image['pixels'][i] > 255:\n new_image['pixels'][i] = 255\n if new_image['pixels'][i] < 0:\n new_image['pixels'][i] = 0 \n return new_image", "def _custom_round(number: float, threshold=0.5) -> float:\n sign = np.copysign(1.0, number)\n number = abs(number)\n delta = number - np.trunc(number)\n if delta < threshold:\n return np.trunc(number) * sign\n else:\n return (np.trunc(number) + 1) * sign", "def normalize(x):\n # TODO: Implement Function\n \n return x/255", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def normalize_01(x):\n return x / 255.0", "def normalize(image):\r\n return image / 127.5 - 1.", "def RoundUp(value, boundary):\n return (value + boundary - 1) & ~(boundary - 1)", "def round_half_away_from_zero_inplace(a):", "def normalize(image):\n return image / 127.5 - 1.", "def scaleClipl(x):\n x = 0 if x < 0 else x\n x = 1 if x > 1 else x\n return int(round(x*255.))", "def round_half_away_from_zero(num):\n return np.sign(num) * np.floor(np.abs(num) + 0.5)", "def scaleClip(x):\n x = 0 if x < 0 else x\n x = 1 if x > 1 else x\n return int(round((x*.3+(x**3)*.7)*255))", "def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255", "def floor(x):\n return 0.0", "def normalize_image(image):\n return image / 255.", "def disc(x):\n return int(round(x))", "def clip(val):\n if val > 4.0:\n return 4.0\n elif val < -4.0:\n return -4.0\n else:\n return val", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def round(x):\n return int(x + copysign(0.5, x))", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "def test_nearest_boundary_even():\n assert _nearest_boundary(10, 20, 14, 0) == 0\n assert _nearest_boundary(10, 20, 14, 1) == 0\n assert _nearest_boundary(10, 20, 15, 0) == 1\n assert _nearest_boundary(10, 20, 15, 1) == 1", "def normalize_val(val, min_v, max_v):\n return (((val - min_v) / (max_v - min_v)) * 255).astype(np.uint8)", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume" ]
[ "0.659085", "0.6122267", "0.5983545", "0.5962671", "0.59548813", "0.5940595", "0.59342605", "0.5893791", "0.58673143", "0.5842229", "0.5834837", "0.5816023", "0.58125573", "0.5807283", "0.5797075", "0.5789674", "0.5788379", "0.5784362", "0.5770589", "0.57692945", "0.57363313", "0.5735548", "0.57272184", "0.57142663", "0.5705352", "0.5705352", "0.56956214", "0.56823015", "0.5672093", "0.5672093" ]
0.6147466
1
Deletes a stage folder
def delete_stage(stage): folder = stage_folder(stage) shutil.rmtree(folder) # delete old ensure_path(folder) # create new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_folder(path):\n command = ['rm', '-rf', TEST_DIR]\n file_operation(path, command)", "def _unstage_folder(dir_path):\n for dir_item in os.listdir(dir_path):\n full_path = os.path.join(dir_path, dir_item)\n if os.path.isfile(full_path) and dir_item != 'load.go':\n os.remove(full_path)", "def cleanup_staging_area(staging_path):\n if os.path.exists(staging_path):\n shutil.rmtree(staging_path)", "def delete(self):\n pdbox._args.get(\"dryrun\") or shutil.rmtree(self.path)\n pdbox.info(\"Deleted %s/\" % self.path)", "def _delete_root_dir(self):\n\n staf_request = ('DELETE ENTRY \"{0}\" RECURSE '\n 'CONFIRM '.format(unix_style_path(self._sut.bespoke_root)))\n\n result = self._staf_handle.submit(self._sut.network_address, 'fs', staf_request)\n\n if result.rc not in [result.Ok, result.DoesNotExist]:\n raise CoreError(result.result)", "def delete_folder(folder_path):\r\n if os.path.exists(folder_path):\r\n shutil.rmtree(folder_path)", "def delete_folder(folder_path):\n shutil.rmtree(folder_path)", "def delete_folder_from_s3(s3_folder, connection=None):\n if connection:\n run_out = connection.run(f\"aws s3 rm --recursive {s3_folder}\")\n else:\n run_out = run(f\"aws s3 rm --recursive {s3_folder}\")\n\n return run_out.return_code", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def delete_folder(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n rmdir(path)", "def handle_delete(uuid):\n location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)\n print(uuid)\n print(location)\n shutil.rmtree(location)", "def handle_delete(uuid):\n location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)\n print(uuid)\n print(location)\n shutil.rmtree(location)", "def _delete_sai_test_folder(ptfhost):\n logger.info(\"Delete SAI tests root folder: {0}.\".format(PTF_TEST_ROOT_DIR))\n ptfhost.file(path=PTF_TEST_ROOT_DIR, state=\"absent\")", "def delete_folder(path: str) -> None:\n\tuux.show_info(\"Deleting \" + path)\n\n\tif not os.path.exists(path):\n\t\t# Path does not exist\n\t\treturn\n\n\ttry:\n\t\tshutil.rmtree(path, True)\n\texcept OSError as ex:\n\t\tuux.show_warning(\"Failed to delete directory, \" + os.strerror(ex.errno))", "def rm_bundle_dir(output_path, uuid, db_targets):\n try:\n shutil.rmtree(output_path)\n\n # if people create s3 files, s3 file targets, inside of an s3 context,\n # then we will have to clean those up as well.\n\n for t in db_targets:\n t.rm()\n\n except IOError as why:\n _logger.error(\"Removal of hyperframe directory {} failed with error {}. Continuing removal...\".format(\n uuid, why))", "def delete_folder(self, instance, folder, where):\n\n instance = self.get_instance(instance)\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n subprocess.check_output([\"ssh\", key, username, 'rm', '-r', self.default_path_aws + where + folder])\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n subprocess.check_output(\n [\"ssh\", \"-i\", key, username, 'rm', '-r', self.default_path_aws + where + folder])\n return \"Success to delete the folder \" + folder + \" from \" + self.default_path_aws + where\n except:\n return \"Fail to access the instance\"", "def cleanup(folder):\n os.system('rm -rf %s/*' % folder)", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def unstage(self, path):\n self._git.index.remove(path)", "def remove_dir(path):\n pyCMD('hdfs', ['dfs', '-rm', '-r', '-f', '-skipTrash', path]).execute()", "def delete(self):\n pdbox._args.get(\"dryrun\") or os.remove(self.path)\n pdbox.info(\"Deleted %s\" % self.path)", "def delete_folder(path: str):\n try:\n if os.path.exists(path):\n shutil.rmtree(path)\n return True\n except:\n print(\"An error occured.\")", "def delete(self):\n if self.dir in self.filelist:\n self.remove(self.dir)", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)", "def DeleteFolderContents(dir):\n create_dir(dir)\n shutil.rmtree(dir)\n create_dir(dir)", "def delete(seed):\n shutil.rmtree(os.path.join(DATA_DIR, seed))", "def _remove_workspace(self, destn_dir):\n _cmd = \"rm -rf {}\".format(destn_dir)\n return self._remote_cmd(_cmd)", "def cleanup(self):\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n\r\n session.home = self._config['dir']\r\n path = os.path.join(session.home, project.name)\r\n project.work_area(False, True, True, path=path)\r\n\r\n target_dir = os.path.normpath(os.path.join(self._config['dir'], project.name))\r\n _logger.info(\"Deleting snapshot under %s.\" % target_dir)\r\n if os.path.exists(target_dir):\r\n _logger.info(\"Deleting '%s'.\" % target_dir)\r\n fileutils.rmtree(target_dir)", "def delete_folder(self, name):\n return self.DeleteFolder(name, 0)", "def deleteImageFolder(pause=5):\n try:\n shutil.rmtree(imageFolder)\n except PermissionError:\n # Still busy creating the montage or something. Try once more\n time.sleep(pause)\n shutil.rmtree(imageFolder)\n except FileNotFoundError:\n # Folder already gone\n pass" ]
[ "0.6754427", "0.6659505", "0.6353486", "0.6346276", "0.63430285", "0.6243483", "0.6221596", "0.62102133", "0.62036866", "0.61407954", "0.6134758", "0.6134758", "0.6118617", "0.6117536", "0.6064411", "0.60489357", "0.6011915", "0.5986747", "0.5982103", "0.59773934", "0.59722", "0.59705275", "0.59657955", "0.59425247", "0.593383", "0.5927797", "0.59267884", "0.5925348", "0.59159285", "0.5900955" ]
0.8571731
0
Rescale molecule coords to a given factor
def rescale_molecule(path, factor): mol = Chem.MolFromMolFile(path, sanitize=True) matrix = numpy.zeros((4, 4), float) for i in range(3): matrix[i, i] = factor matrix[3, 3] = 1 AllChem.TransformMol(mol, matrix) Chem.MolToMolFile(mol, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self", "def _force_rescale(self, setpoint_x, setpoint_y):", "def scale_coords(self, mol, target=1.5):\n\t#Compute Median Bond Length\n\tbondlengths = []\n\tconf = mol.GetConformer()\n\tfor bond in mol.GetBonds():\n\t\tstartatomidx = bond.GetBeginAtomIdx()\n\t\tendatomidx = bond.GetEndAtomIdx()\n\t\tlenght = AllChem.GetBondLength(conf, startatomidx, endatomidx)\n\t\tbondlengths.append(lenght)\n\tfactor = target/numpy.median(bondlengths)\n\t#Scale coords if too divergent bondlength from RDkit 1.5\n\tif (factor < 0.98) or (factor > 1.02):\n\t\tlog.info('Scaling original coords with factor %s'%(str(factor)))\n\t\tcenter = AllChem.ComputeCentroid(mol.GetConformer())\n\t\ttf = numpy.identity(4,numpy.float)\n\t\ttf[0][3] -= center[0]\n\t\ttf[1][3] -= center[1]\n\t\ttf[0][0] = tf[1][1] = tf[2][2] = factor\n\t\tAllChem.TransformMol(mol,tf)", "def scale(coord):\n scl = 0.07465 # scale (for 21zoom use 0.07465)\n for i in range(len(coord)):\n for j in range(len(coord[i])):\n coord[i][j][0], coord[i][j][1] = coord[i][j][0] * scl, coord[i][j][1] * scl\n return coord", "def rescale(self, factor):\n scaled_size = (int(self.width * factor), int(self.height * factor))\n return self.resize(scaled_size)", "def scale_positions_and_cell(self):\n\n taupscl = self.dt / self.taup\n stress = self.atoms.get_stress()\n old_pressure = self.atoms.get_isotropic_pressure(stress)\n scl_pressure = 1.0 - taupscl * self.compressibility / 3.0 * \\\n (self.pressure - old_pressure)\n\n #print \"old_pressure\", old_pressure\n #print \"volume scaling by:\", scl_pressure\n\n cell = self.atoms.get_cell()\n cell = scl_pressure * cell\n self.atoms.set_cell(cell, scale_atoms=True)", "def _rescale(x, xlim, ylim):\n m = (ylim[1] - ylim[0]) / (xlim[1] - xlim[0])\n c = ylim[1] - m * xlim[1]\n y = m * x + c\n return y", "def scaling(mat, factor):\n\treturn mat / (mat + factor)", "def scale(self, factor):\n self.x *= factor\n self.y *= factor\n for a in self.annotations:\n a.scale(factor)", "def normalize(self, factor):", "def scale(self):", "def scalepos(pos):\n return pos[0] * scalefactor, pos[1] * scalefactor", "def scaled_coordinates(x, rng=None):\n return x if rng is None else 2 * (x - rng[0]) / (rng[1] - rng[0]) - 1", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def _scale_coordinate(self, x, y, scale_factor):\n\n return (x * scale_factor, y * scale_factor)", "def rescale(self, points, inplace=True):\n if inplace == False:\n points = points.copy()\n points *= self.scale_factor\n points += self.origin\n return points", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def rescale(pts):\n bbox = bounding_box(pts)\n # There'll be a bigger diff on one axis\n max_diff = np.max(bbox.lengths)\n num_pts = pts.shape[0]\n pts = pts - np.repeat([bbox.min], num_pts, axis=0)\n pts = pts / max_diff\n return pts", "def scale_pos(q_unsc):\n q_sc = (q_unsc - 0.125)/0.125\n return q_sc", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def scale(self, size=128):\n scale_factor = size / max(self.voxels.shape)\n self.voxels = ndimage.zoom(self.voxels, scale_factor)\n self.point_position = self.point_position * scale_factor\n self.voxel_size = False # To ignore this\n \n return(self)", "def normalize(self, factor):\n self.n_atoms /= factor", "def normalize(self, factor):\n self.n_atoms /= factor", "def zoom(self, factor):\n self._transform(\n [\n [factor, 0, 0],\n [0, factor, 0],\n [0, 0, factor],\n ])", "def scale_positions_and_cell(self):\n\n taupscl = self.dt * self.compressibility / self.taup / 3.0\n stress = - self.atoms.get_stress() * 1e-5 / units.Pascal\n if stress.shape == (6,):\n stress = stress[:3]\n elif stress.shape == (3,3):\n stress = [stress[i][i] for i in range(3)]\n else:\n raise ValueError(\"Cannot use a stress tensor of shape \" + str(stress.shape))\n pbc = self.atoms.get_pbc()\n scl_pressurex = 1.0 - taupscl * (self.pressure - stress[0]) * pbc[0]\n scl_pressurey = 1.0 - taupscl * (self.pressure - stress[1]) * pbc[1]\n scl_pressurez = 1.0 - taupscl * (self.pressure - stress[2]) * pbc[2]\n \n cell = self.atoms.get_cell()\n cell = np.array([scl_pressurex * cell[0],scl_pressurey * cell[1],scl_pressurez * cell[2]])\n self.atoms.set_cell(cell, scale_atoms=True)", "def scale(self, factor: float) -> Point:\n return Point(self.x * factor, self.y * factor)", "def scale(self, factor):\n self.b = factor * self.b", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def _enlarge_ordinate(ordinate, unit=\"1km\"):\n try:\n factor = TILE_FACTORS[unit]\n except:\n raise ValueError(\"Tile unit not recognised!\")\n\n return factor * int(ordinate)", "def scale(self, x, y, z) -> None:\n ..." ]
[ "0.7354363", "0.6686502", "0.66113037", "0.6565813", "0.6292672", "0.62915665", "0.6285049", "0.62842643", "0.62798035", "0.6210972", "0.6203826", "0.6155714", "0.61438906", "0.6143718", "0.6099302", "0.6092187", "0.6081201", "0.6037964", "0.60108364", "0.59721416", "0.5957903", "0.5949993", "0.5949993", "0.5890719", "0.5889533", "0.588413", "0.58579224", "0.58493066", "0.5846064", "0.58430934" ]
0.6958495
1
Update 2d images of pdbechem components which are available in the pubchem database from CCD files.
def update_ccd_file(self, ccd: str) -> None: components = ccd_reader.read_pdb_components_file(ccd) for i in components.values(): self.process_template(i.component)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def write_component_image_info_area(pldm_fw_up_pkg, metadata, image_files):\n components = metadata[\"ComponentImageInformationArea\"]\n # ComponentImageCount\n pldm_fw_up_pkg.write(struct.pack(\"<H\", len(components)))\n component_location_offsets = []\n # ComponentLocationOffset position in individual component image\n # information\n component_location_offset_pos = 12\n\n for component in components:\n # Record the location of the ComponentLocationOffset to be updated\n # after appending images to the firmware update package\n component_location_offsets.append(\n pldm_fw_up_pkg.tell() + component_location_offset_pos\n )\n\n # ComponentClassification\n component_classification = component[\"ComponentClassification\"]\n if component_classification < 0 or component_classification > 0xFFFF:\n sys.exit(\n \"ERROR: ComponentClassification should be [0x0000 - 0xFFFF]\"\n )\n\n # ComponentIdentifier\n component_identifier = component[\"ComponentIdentifier\"]\n if component_identifier < 0 or component_identifier > 0xFFFF:\n sys.exit(\"ERROR: ComponentIdentifier should be [0x0000 - 0xFFFF]\")\n\n # ComponentComparisonStamp\n component_comparison_stamp = get_component_comparison_stamp(component)\n\n # ComponentOptions\n component_options = bitarray(16, endian=\"little\")\n component_options.setall(0)\n supported_component_options = [0, 1, 2]\n for option in component[\"ComponentOptions\"]:\n if option not in supported_component_options:\n sys.exit(\n \"ERROR: unsupported ComponentOption in \"\n \" ComponentImageInformationArea section\"\n )\n component_options[option] = 1\n\n # RequestedComponentActivationMethod\n requested_component_activation_method = bitarray(16, endian=\"little\")\n requested_component_activation_method.setall(0)\n supported_requested_component_activation_method = [0, 1, 2, 3, 4, 5]\n for option in component[\"RequestedComponentActivationMethod\"]:\n if option not in supported_requested_component_activation_method:\n sys.exit(\n \"ERROR: unsupported RequestedComponent \"\n \" ActivationMethod entry\"\n )\n requested_component_activation_method[option] = 1\n\n # ComponentLocationOffset\n component_location_offset = 0\n # ComponentSize\n component_size = 0\n # ComponentVersionStringType\n component_version_string_type = string_types[\"ASCII\"]\n # ComponentVersionStringlength\n # ComponentVersionString\n component_version_string = component[\"ComponentVersionString\"]\n check_string_length(component_version_string)\n\n format_string = \"<HHIHHIIBB\" + str(len(component_version_string)) + \"s\"\n pldm_fw_up_pkg.write(\n struct.pack(\n format_string,\n component_classification,\n component_identifier,\n component_comparison_stamp,\n ba2int(component_options),\n ba2int(requested_component_activation_method),\n component_location_offset,\n component_size,\n component_version_string_type,\n len(component_version_string),\n component_version_string.encode(\"ascii\"),\n )\n )\n\n index = 0\n pkg_header_checksum_size = 4\n start_offset = pldm_fw_up_pkg.tell() + pkg_header_checksum_size\n # Update ComponentLocationOffset and ComponentSize for all the components\n for offset in component_location_offsets:\n file_size = os.stat(image_files[index]).st_size\n pldm_fw_up_pkg.seek(offset)\n pldm_fw_up_pkg.write(struct.pack(\"<II\", start_offset, file_size))\n start_offset += file_size\n index += 1\n pldm_fw_up_pkg.seek(0, os.SEEK_END)", "def update_ccd_dir(self, components: str):\n\n for f in os.listdir(components):\n c = ccd_reader.read_pdb_cif_file(os.path.join(components, f)).component\n self.process_template(c)", "def __updateEditedImages(self, recipe):\n\n recipeProperties = recipe.getProperties()\n mode = Mode(recipeProperties.get('show', None), recipeProperties.get('sequence', None))\n multiTrackPath = mode.get(Recipe.MULTITRACK_FILE, recipeProperties)\n\n try:\n multiTrackXML = self.fileService.loadXMLFile(multiTrackPath)\n except Exception, e:\n multiTrackXML = XMLUtils.loadAndCleanXML(multiTrackPath)\n\n setupName = recipeProperties[\"setup\"]\n\n updatedPoses = list()\n\n # check to see if we have any new poses\n tempPath = mode.get(Recipe.POSE_EDITING_FOLDER, recipeProperties)\n\n multiTrackModified = False\n\n if self.fileServiceLocal.exists(tempPath):\n tempFiles = self.fileServiceLocal.listFolder(tempPath)\n log('Temp Files %s' % tempFiles)\n for f in tempFiles:\n if f.endswith(\".psd\"):\n if f.startswith(\".\"):\n continue\n try:\n isPsdMultiLayerPath = f.replace(\".psd\", \".psd.multilayer\")\n keepPsd = False\n if isPsdMultiLayerPath in tempFiles:\n keepPsd = True\n\n outPsdPath = tempPath + \"/\" + f\n\n infoXMLPath = tempPath + \"/\" + f.replace(\".psd\", \".xml\")\n poseXML = self.fileService.loadXMLFile(infoXMLPath)\n\n poseId = poseXML.attrib.get(\"id\", -1)\n\n poseXMLItems = self.__getMultiTrackElementWithId(multiTrackXML, poseId)\n if poseXMLItems is None:\n continue\n if poseXMLItems.get(\"pose\", None) is None:\n continue\n\n pose = poseXMLItems[\"pose\"].attrib[\"file\"]\n\n userRigName = poseXMLItems[\"rig\"].attrib[\"userRigName\"]\n\n posePropertiesXML = poseXML.find(\"Properties\")\n\n if posePropertiesXML is None:\n posePropertiesXML = XMLUtils.getXMLElementForClass(\"Properties\")\n poseXML.append(posePropertiesXML)\n\n poseProperties = posePropertiesXML.attrib\n\n # determine the pose name for which to save the pose as\n basename = poseProperties.get(\"pose\", \"\")\n\n if pose != \"[poseFile]\":\n basename = os.path.basename(pose)\n basename = basename.replace(\".png\", \"\")\n if basename.rfind(\"_v\") > 0:\n pose = basename[0:basename.rfind(\"_v\")]\n\n if (\"[\" in basename) or (\"]\" in basename):\n pose = \"[defaultPose]\"\n\n # create a new pose if necessary\n if (pose == \"[clearPose]\") or (pose == mode.get(\"[clearPose]\", recipeProperties)) or (pose == \"[defaultPose]\") or (pose == mode.get(\"[defaultPose]\", recipeProperties)):\n basename = setupName + \"_\" + userRigName + \"_\" + poseXML.attrib.get(\"id\", \"\")\n\n # define the filename for the pose name\n poseProperties[\"pose\"] = basename\n poseProperties[\"version\"] = \">\"\n\n poseProperties[\"show\"] = recipeProperties[\"show\"]\n poseProperties[\"sequence\"] = recipeProperties[\"sequence\"]\n poseProperties[\"beat\"] = recipeProperties[\"beat\"]\n\n # gets the latest incremental number for this pose\n version = FlixVersioning().poseVersionUp(poseProperties[\"show\"],\n poseProperties[\"sequence\"],\n poseProperties[\"beat\"],\n poseProperties[\"pose\"])\n\n if posePropertiesXML.attrib.get('poseFileExtension'):\n del(posePropertiesXML.attrib[\"poseFileExtension\"])\n\n log('new pose version %s' % version)\n poseProperties[\"version\"] = str(version)\n\n poseXML.attrib[\"file\"] = \"[poseFile]\"\n posePropertiesXML.attrib[\"version\"] = poseProperties[\"version\"]\n posePropertiesXML.attrib[\"pose\"] = poseProperties[\"pose\"]\n\n posePropertiesXML.attrib[\"show\"] = recipeProperties[\"show\"]\n posePropertiesXML.attrib[\"sequence\"] = recipeProperties[\"sequence\"]\n posePropertiesXML.attrib[\"beat\"] = recipeProperties[\"beat\"]\n\n mode = Mode(posePropertiesXML.attrib.get(\"show\", None), posePropertiesXML.attrib.get(\"sequence\", None))\n newPosePath = mode.get(\"[poseFile]\", posePropertiesXML.attrib)\n\n poseFolder = os.path.dirname(newPosePath)\n self.fileService.createFolder(poseFolder)\n\n self.fileServiceLocal.refreshCache(outPsdPath)\n # copy the psd file if any\n for i in range(10):\n if keepPsd:\n posePropertiesXML.attrib[\"poseFileExtension\"] = '.psd'\n newPosePath = mode.get(\"[poseFile]\", posePropertiesXML.attrib)\n if self.fileServiceLocal.exists(outPsdPath):\n self.fileServiceLocal.copy(outPsdPath, newPosePath)\n break\n else:\n # copy the poses to the pose paths.\n posePropertiesXML.attrib[\"poseFileExtension\"] = '.png'\n newPosePath = mode.get(\"[poseFile]\", posePropertiesXML.attrib)\n outPngPath = self.__toPoseSpace(outPsdPath)\n if self.fileServiceLocal.exists(outPngPath):\n self.fileServiceLocal.copy(outPngPath, newPosePath)\n break\n time.sleep(0.5) # some issues with file locking on windows file servers\n\n self.fileServiceLocal.removeFile(outPsdPath)\n self.fileService.copyFromLocal(newPosePath, True)\n\n updatedPoses.append({\"poseXML\":poseXML, \"outPsdPath\":outPsdPath})\n\n # Update the xml to reflect the new pose\n self.fileServiceLocal.saveXMLFile(infoXMLPath, poseXML)\n\n multiTrackModified = True\n\n except:\n log(\"Error in updating poses\", isError=True, trace=True)\n return None\n\n\n # Update the path in the multiTrack\n if len(updatedPoses) > 0:\n for updatedPose in updatedPoses:\n poseXML = updatedPose[\"poseXML\"]\n poseId = poseXML.attrib.get(\"id\", -1)\n rigName = poseXML.attrib[\"rig\"]\n billboardName = poseXML.attrib[\"billboard\"]\n poseIndex = int(poseXML.attrib[\"index\"])\n trackName = poseXML.attrib[\"track\"]\n outPsdPath = updatedPose[\"outPsdPath\"]\n foundPose = None\n # find the pose in the xml\n for rigType in multiTrackXML.getchildren():\n for r in rigType.getchildren():\n for rig in r.getchildren():\n if rig.attrib[\"name\"] != rigName:\n continue\n for billboard in rig.getchildren():\n if billboard.attrib[\"billboard\"] != billboardName:\n continue\n for multiTrack in billboard.getchildren():\n for track in multiTrack.getchildren():\n if track.attrib[\"name\"] == trackName:\n for cdl in track.getchildren():\n for clip in cdl.getchildren():\n for clipType in clip.getchildren():\n for pose in clipType.getchildren():\n if poseId == pose.attrib.get(\"id\", None) or poseId == -1:\n foundPose = pose\n\n if foundPose is not None:\n if int(foundPose.attrib[\"index\"]) == int(poseIndex):\n foundPose.attrib[\"file\"] = poseXML.attrib[\"file\"]\n foundPoseXML = foundPose.find(\"Properties\")\n if foundPoseXML is None:\n foundPoseXML = XMLUtils.getXMLElementForClass(\"Properties\")\n foundPose.append(foundPoseXML)\n foundPoseXML.attrib = poseXML.find(\"Properties\").attrib.copy()\n # delete the source pose\n self.fileServiceLocal.removeFile(outPsdPath)\n isPsdMultiLayerPath = outPsdPath.replace(\".out.psd\", \".out.psd.multilayer\")\n if self.fileServiceLocal.exists(isPsdMultiLayerPath):\n self.fileServiceLocal.removeFile(isPsdMultiLayerPath)\n\n\n if multiTrackModified:\n if multiTrackXML is not None:\n log(\"refreshing multitrack\")\n multiTrackXML.attrib['locked'] = '0'\n recipeProperties['frame'] = recipeProperties.get('frame', '0001')\n cachedProperties = recipeProperties\n recipeProperties['pose'] = mode.get('[recipeName]', cachedProperties)\n cachedPropertiesPath = mode.get(\"[poseEditingFile]\", recipeProperties).replace(\".psd\", \".json\")\n # If this file exists, it means that it was previously being edited\n # Once edited in photoshop, this file gets created and contains\n # the most recent version of this setup created by flix\n if self.fileServiceLocal.exists(cachedPropertiesPath):\n cachedProperties = json.loads(self.fileServiceLocal.loadTextFile(cachedPropertiesPath))\n\n # Publish a new version\n recipe.updateRecipeFileData('multiTracks', multiTrackXML)\n recipe.publishNewVersion(copyRenders=False)\n newRecipeProperties = recipe.getProperties()\n# multiTrackPath = Mode.getPath(Recipe.MULTITRACK_FILE, newRecipeProperties)\n# self.fileService.saveXMLFile(multiTrackPath, multiTrackXML)\n\n # Create the xml to be sent to flix\n recipiesXML = ET.fromstring('<Recipies/>')\n setupXML = ET.fromstring('<OldSetup show=\"%s\" sequence=\"%s\" beat=\"%s\" setup=\"%s\" version=\"%s\" />'\\\n % (cachedProperties[\"show\"], cachedProperties[\"sequence\"], cachedProperties[\"beat\"], cachedProperties[\"setup\"], cachedProperties[\"version\"]))\n\n newSetupXML = recipe.getMasterXML()\n setupXML.append(newSetupXML)\n\n recipiesXML.append(setupXML)\n\n # Store the new version of this setup so that it can automatically be replaced in the next version\n self.fileServiceLocal.saveTextFile(cachedPropertiesPath, json.dumps(newRecipeProperties))\n log(['replacing setups %s' % ET.tostring(recipiesXML)])\n self.addFeedback(\"replaceSetupsMultiTracks\", ET.tostring(recipiesXML))\n\n return multiTrackXML", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def updateImages(self, msg, arg2=None):\n\t\tself.picPaths = msg\n\t\tself.totalPictures = len(self.picPaths)\n\t\tself.loadImage(self.picPaths[0])", "def _load_components(self):\n compsf = self._fetch_components_file()\n comps_img = niimg.load_img(compsf)\n return comps_img", "def update(i):\n print(\"{}/{}\".format(i, len(embs[query])))\n ims[0].set_data(unnorm(frames[query][i]))\n ims[1].set_data(unnorm(frames[candidate][nns[i]]))\n plt.tight_layout()", "def refreshImages(self):\n fileName1 = \"DECK/\" + str(self.card1) + \".gif\"\n fileName2 = \"DECK/\" + str(self.card2) + \".gif\"\n fileName3 = \"DECK/\" + str('b') + \".gif\"\n self.image1 = PhotoImage(file = fileName1)\n self.cardLabel1[\"image\"] = self.image1\n self.image2 = PhotoImage(file = fileName2)\n self.cardLabel2[\"image\"] = self.image2\n self.image3 = PhotoImage(file = fileName3)\n self.cardLabel3[\"image\"] = self.image3", "def update_processed_files(connection, cursor, image_pk, design_pk, file):\n try:\n if(connection):\n\n cursor.execute(UPDATE_IMAGE % (file, str(image_pk)))\n connection.commit()\n\n cursor.execute(UPDATE_DESIGN_STATUS % str(design_pk))\n connection.commit()\n \n print(\"{} \\t Actualización de diseño correcta\".format(datetime.now()))\n\n except (Exception, psycopg2.Error) as error:\n print(\"{} \\t Error en el método update_processed_files\".format(datetime.now()))\n print(\"Ocurrió actualizando el estado - \", error)", "def reverseDCT(self, components):\n for cp in components.values():\n for i in range(cp.nr_blocks_ver):\n for j in range(cp.nr_blocks_hor):\n cp.blocks[i][j] = IDCT_matrix(cp.blocks[i][j])", "def update_CPPN(genotype, E1, E2, subp_1, subp_2, norm_in, num_x, num_y, canvas):\n\n\t# get values from fields, should all be comma separated\n\tinnov_nums = E1.get().split(\",\")\n\tnew_weights = E2.get().split(\",\")\n\n\t# get all values from the above lists\n\tfor innov_num, new_weight in zip(innov_nums, new_weights):\n\t\t# search through connections to find the correct\n\t\t# weight to change\n\t\tfor con in genotype.connections:\n\t\t\tif(con.getInnovationNumber() == int(innov_num)):\n\t\t\t\tcon.setWeight(float(new_weight))\n\n\t# clear both subplots so new graphs can be placed into the GUI\n\tsubp_1.clear()\n\tsubp_2.clear()\n\n\t# create graphs again and put graphs onto them\n\toutputs = []\n\tfor ins in norm_in:\n\t\toutputs.append(genotype.getOutput(ins)[0])\n\toutputs_np = np.array(outputs, copy=True)\n\tsubp_1.imshow(np.reshape(outputs_np, (num_x, num_y)), cmap='Greys')\n\n\tgraph_genotype_GUI(genotype, subp_2)\n\n\tcanvas.show()\n\tcanvas.get_tk_widget().pack()", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def test_cspad2x2():\n basedir = '/reg/g/psdm/detector/alignment/cspad2x2/calib-cspad2x2-01-2013-02-13/'\n fname_geometry = basedir + 'calib/CsPad2x2::CalibV1/MecTargetChamber.0:Cspad2x2.1/geometry/0-end.data'\n fname_data = basedir + 'cspad2x2.1-ndarr-ave-meca6113-r0028.dat'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377, use_wide_pix_center=False)\n amp_range = (0,15000)\n\n # get pixel coordinate index arrays:\n #xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n\n rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n arr.shape= (185,388,2)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def cropnscaleImageDB(imagedb,newimagedb,ox,oy,width,height,scale,folder=\"\",verbose=False):\n\n\n import procdb\n import os\n\n images,shapes,labels=procdb.processImageDB(imagedb)\n shapes=np.asarray(shapes)\n #print shapes.shape\n\n if verbose==True:\n print str(len(images))+\" images to process.\"\n \n \n suffix=\"_\"+str(int(width*scale))+\"x\"+str(int(height*scale))\n if folder==\"\":\n folder=str(int(width*scale))+\"x\"+str(int(height*scale))\n if not os.path.exists(folder): os.makedirs(folder)\n else:\n if not os.path.exists(folder):os.makedirs(folder)\n\n newimagedb=open(folder+\"/\"+newimagedb,'w')\n\n for i in range(len(images)):\n im=cv2.imread(images[i])\n im_cropped=crop(im,ox,oy,width,height)\n newheight=int(height*scale)\n newwidth=int(width*scale)\n im_resized=np.asarray(np.zeros((newheight,newwidth)))\n im_resized=cv2.resize(im_cropped,(newwidth,newheight),im_resized,scale,scale,cv2.INTER_AREA)\n fileName, fileExtension = os.path.splitext(images[i])\n \n retval=cv2.imwrite(folder+\"/\"+fileName+suffix+fileExtension,im_resized)\n if retval==False:\n print \"Problem to save modified image.\"\n return False\n shapes[i,:,0]=shapes[i,:,0]-ox\n shapes[i,:,1]=shapes[i,:,1]-oy\n shapes[i]=shapes[i]*scale\n\n newshapes=''\n for j in range(shapes.shape[1]):\n newshapes=newshapes+',('+str(shapes[i,j,0])+';'+str(shapes[i,j,1])+')'\n\n newlabels=''\n for k in range(len(labels[i])):\n newlabels=newlabels+','+str(labels[i][k])\n\n newimagedb.write(fileName+suffix+fileExtension+newlabels+newshapes+'\\n')\n\n if verbose==True:\n print \"Image \"+str(i+1)+\" successfully processed.\"\n \n newimagedb.close()\n\n return True", "def _update_(self,update_background=True):\n # -- Make sure the fundamental update (if any) are made\n super(Image,self)._update_()\n # - Data\n self._update_data_(update_background=update_background)", "def update_image(self, cv_img):\n\t\tqt_img = self.convert_cv_qt(cv_img)\n\t\tself.label.setPixmap(qt_img)\n\t\tself.display_info()", "def test():\n\n fname='./MedData/Lung-PET-CT-Dx/Lung_Dx-A0164/04-12-2010-PET01PTheadlung Adult-08984/8.000000-Thorax 1.0 B31f-52757/1-001.dcm' \n \n ds=pydicom.dcmread(fname)\n # print(ds.pixel_array.shape)\n print(ds.pixel_array[1])\n plt.figure(figsize=(10,10))\n plt.imshow(ds.pixel_array, cmap=plt.cm.bone)\n plt.show()", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n if(self.iscapture):\n print(\"update\")\n direct = self.label1.text()\n if direct == \"~default\":\n direct = \"face_dataframes\"\n else:\n direct = direct + \"/face_dataframes\"\n \n if (not os.path.exists(direct)):\n os.mkdir(direct)\n cv2.imwrite(\"{1}/{2}{0}.jpeg\".format(self.count, direct,self.textbox.text()), cv_img)\n self.iscapture = False\n self.label2.setText(\"Image # 0{0} Saved\".format(self.count))\n self.pushButton0.setEnabled(False)\n self.count += 1\n \n \n if(self.count == 6):\n #print(\"greater\")\n self.pushButton.setEnabled(False)\n self.pushButton2.setDisabled(False)\n\n\n self.image_label.setPixmap(qt_img)", "def genebmp(dirName, sou,slnt,dx,dy):\r\n\r\n if sou=='source':\r\n tabres=np.zeros((slnt,dx,dy),np.int16)\r\n else:\r\n tabres=np.zeros((slnt,dx,dy),np.uint8)\r\n\r\n\r\n dirFileP = os.path.join(dirName, sou)\r\n\r\n (top,tail)=os.path.split(dirName)\r\n print ('generate image in :',tail, 'directory :',sou)\r\n fileList =[name for name in os.listdir(dirFileP) if \".dcm\" in name.lower()]\r\n\r\n for filename in fileList:\r\n FilesDCM =(os.path.join(dirFileP,filename))\r\n RefDs = dicom.read_file(FilesDCM)\r\n dsr= RefDs.pixel_array\r\n dsr=dsr.astype('int16')\r\n fxs=float(RefDs.PixelSpacing[0])/avgPixelSpacing\r\n scanNumber=int(RefDs.InstanceNumber)\r\n if dsr.max()>dsr.min():\r\n if sou !='source' :\r\n dsr=normi(dsr)\r\n dsr=cv2.resize(dsr,None,fx=fxs,fy=fxs,interpolation=cv2.INTER_LINEAR)\r\n if sou == 'lung':\r\n np.putmask(dsr,dsr>0,100)\r\n\r\n elif sou !='source':\r\n np.putmask(dsr,dsr==1,0)\r\n np.putmask(dsr,dsr>0,100)\r\n else :\r\n dsr[dsr == -2000] = 0\r\n intercept = RefDs.RescaleIntercept\r\n slope = RefDs.RescaleSlope\r\n if slope != 1:\r\n dsr = slope * dsr.astype(np.float64)\r\n dsr = dsr.astype(np.int16)\r\n\r\n dsr += np.int16(intercept)\r\n dsr = dsr.astype('int16')\r\n# print dsr.min(),dsr.max(),dsr.shape\r\n dsr=cv2.resize(dsr,None,fx=fxs,fy=fxs,interpolation=cv2.INTER_LINEAR)\r\n\r\n tabres[scanNumber]= dsr\r\n\r\n return tabres", "def update_parts():\n syt.log_info(\"$$$ Get Rebrickable Part info\")\n part_list = [x[0] for x in reapi.pull_all_pieces()] # ['piece_id', 'descr', 'category')\n part_list.pop(0) # Remove the header\n secondary_parts.add_parts_to_database(part_list, type=\"re\")\n # Todo: need to create a scraper for rebrickable piece num information\n syt.log_info(\"%%% Rebrickable Part info added to parts table\")", "def slider_update_CPPN(self):\n\n\t\t# get needed CPPN inputs\n\t\tnorm_in = getNormalizedInputs(num_x, num_y)\n\n\t\tkeys = list(self.scale_dict.keys())\n\t\t# for each connection with a slider, update weight to its current value\n\t\tfor innov_num in keys:\n\t\t\tinnov_num_int = int(innov_num)\n\t\t\tfor con in genotype.connections:\n\t\t\t\tif(con.getInnovationNumber() == innov_num_int):\n\t\t\t\t\tcon.setWeight(self.scale_dict[innov_num].get())\n\n\t\t# replot the CPPN with new weights\n\t\t# both subplots must be cleared to replace them with new ones\n\t\tself.subp_1.clear()\n\t\tself.subp_2.clear()\n\n\t\toutputs = []\n\t\tfor ins in norm_in:\n\t\t\toutputs.append(genotype.getOutput(ins)[0])\n\t\toutputs_np = np.array(outputs, copy=True)\n\t\tself.subp_1.imshow(np.reshape(outputs_np, (num_x, num_y)), cmap='Greys')\n\n\t\tgraph_genotype_GUI(genotype, self.subp_2)\n\n\t\tself.canvas.show()\n\t\tself.canvas.get_tk_widget().pack()", "def update_compdatabase():\n for comp_group in comp_entry:\n#\n#--- read the last set of the input data and find the last entry \n#\n past = house_keeping + comp_group + '_past'\n past = mcf.read_data_file(past)\n\n last = past[-1]\n#\n#--- find today's data entry\n#\n cmd = 'ls /data/mta_www/mp_reports/*/' + comp_group + '/data/mta*fits* >' + zspace\n os.system(cmd)\n current = mcf.read_data_file(zspace)\n\n cmd = 'mv '+ zspace + ' ' + house_keeping + comp_group + '_past'\n os.system(cmd)\n#\n#--- find the data which are not read\n#\n new_fits = []\n chk = 0\n for ent in current:\n if chk == 0:\n if ent == last:\n chk = 1\n continue\n new_fits.append(ent)\n#\n#--- uppend the data to the local fits data files\n#\n for fits in new_fits:\n [cols, tbdata] = ecf.read_fits_file(fits)\n\n time = tbdata['time']\n\n for col in cols:\n#\n#--- ignore columns with \"ST_\" (standard dev) and time\n#\n if col.lower() == 'time':\n continue\n\n mc = re.search('st_', col.lower())\n if mc is not None:\n continue\n\n mdata = tbdata[col]\n cdata = [time, mdata]\n ocols = ['time', col.lower()]\n\n ofits = out_dir + col.lower()+ '_full_data.fits'\n if os.path.isfile(ofits):\n update_fits_file(ofits, ocols, cdata)\n else:\n create_fits_file(ofits, ocols, cdata)", "def updateEditedRefImages(self, recipe):\n oldProperties = recipe.getProperties()\n\n show = oldProperties['show']\n sequence = oldProperties['sequence']\n oldMode = Mode(show, sequence)\n updatedPoses = self.__getUpdatedPoses(recipe)\n\n newRecipeXML = CopySetup.localizeSetup(oldProperties, show, sequence,\n renderCallback=self.__renderCallback,\n setupCallback=self.feedReloadSetupsMultiTracks,\n multiTrackCallback=self.feedReloadSetupsMultiTracks)\n\n newRecipe = Recipe.fromXMLElement(newRecipeXML)\n\n self.__storeUpdatedPoses(newRecipe, updatedPoses)\n self.__updatePosesInSetup(newRecipe, updatedPoses)\n\n oldProperties = recipe.getProperties()\n existingRecipeEditingPath = oldMode.get(\"[poseEditingFile]\", oldProperties).replace(\".psd\", \".xml\")\n\n # if a setup has been send to flix already we will now use the new recipe version\n if self.fileServiceLocal.exists(existingRecipeEditingPath):\n oldProperties = self.fileServiceLocal.loadXMLFile(existingRecipeEditingPath).find(\"Properties\").attrib\n\n\n recipiesXML = ET.fromstring('<Recipies/>')\n newSetupXML = newRecipe.getMasterXML()\n\n setupXML = ET.fromstring('<OldSetup show=\"%s\" sequence=\"%s\" beat=\"%s\" setup=\"%s\" version=\"%s\" />'\\\n % (oldProperties[\"show\"],\n oldProperties[\"sequence\"],\n oldProperties[\"beat\"],\n oldProperties[\"setup\"],\n oldProperties[\"version\"]))\n\n\n setupXML.append(newSetupXML)\n recipiesXML.append(setupXML)\n self.addFeedback(\"replaceSetupsMultiTracks\", recipiesXML)\n\n FlixNuke().fromRecipe(newRecipe)\n FlixNuke().compRecipe(newRecipe, renderCallback=self.__renderCallback)\n\n newProperties = newRecipe.getProperties()\n mode = Mode(newProperties.get('show', None), newProperties.get('sequence', None))\n newMultitrackFile = mode.get('[recipeMultiTrackFile]', newProperties)\n newMultitrack = self.fileServiceLocal.loadTextFile(newMultitrackFile)\n\n data = []\n data.append('<Recipies>')\n data.append(\n \"\"\"<Setup\n show=\"%(show)s\"\n sequence=\"%(sequence)s\"\n beat=\"%(beat)s\"\n setup=\"%(setup)s\"\n version=\"%(version)s\">'\"\"\" % newProperties)\n data.append(newMultitrack + \"</Setup>\" + \"</Recipies>\")\n dataString = \"\".join(data)\n\n self.feedReloadSetupsMultiTracks(dataString)\n\n# FlixNuke().compRecipe(newRecipe, fileOutNodes='fileOut_master_png')\n\n return newRecipe", "def update(update_db=True):\n try:\n # time update was triggered\n updated_at = datetime.now().strftime(\"%d %b %Y, %H:%M\")\n\n vis = Visualise(with_tracked_time=False, update_db=update_db)\n\n # Generate preference table\n print(\"Generate preference table...\")\n preferences_table = pref.get_all_preferences_table(\n wim=vis.wim, first_date=vis.START_DATE, last_date=vis.END_DATE\n )\n\n # Save preference table to file\n check_dir(app.config.get(\"DATA_DIR\") + \"/figs/preferences\")\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/preferences/preferences.html\", \"w\"\n ) as f:\n f.write(preferences_table)\n\n # Generate whiteboards\n print(\"Generate whiteboards...\")\n whiteboards = vis.all_whiteboards(update_timestamp=updated_at)\n\n # Save whiteboards to file\n check_dir(app.config.get(\"DATA_DIR\") + \"/figs/projects\")\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/projects.html\", \"w\"\n ) as f:\n f.write(whiteboards[\"project_print\"])\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\", \"w\"\n ) as f:\n f.write(whiteboards[\"project_screen\"])\n\n check_dir(app.config.get(\"DATA_DIR\") + \"/figs/people\")\n\n with open(app.config.get(\"DATA_DIR\") + \"/figs/people/people.html\", \"w\") as f:\n f.write(whiteboards[\"person_print\"])\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/people/person_screen.html\", \"w\"\n ) as f:\n f.write(whiteboards[\"person_screen\"])\n\n print(\"Convert whiteboards to pdf...\")\n # convert print version html to pdf\n cmd = \"bash {home_dir}/scripts/whiteboard_to_pdf.sh\".format(\n home_dir=app.config.get(\"HOME_DIR\")\n )\n result = subprocess.run(cmd, shell=True, check=True, capture_output=True)\n\n if result.returncode != 0:\n raise ValueError(\n \"whiteboard_to_pdf.sh returned with code \" + str(result.returncode)\n )\n\n # Generate & save demand vs capacity plot\n print(\"Demand vs capacity...\")\n capacity_fig = vis.plot_demand_vs_capacity(\n start_date=datetime.now() - timedelta(365),\n end_date=datetime.now() + timedelta(548),\n freq=\"W-MON\",\n )\n capacity_fig.tight_layout()\n capacity_fig.savefig(\n app.config.get(\"DATA_DIR\") + \"/figs/demand_vs_capacity.png\", dpi=300\n )\n plt.close(\"all\")\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/demand_vs_capacity.html\", \"w\"\n ) as f:\n f.write(\n \"\"\"<!DOCTYPE html>\n <html>\n <head>\n <title>Index</title>\n </head>\n <body>\n <img src=\"demand_vs_capacity.png\" alt=\"demand_vs_capacity\">\n </body>\n </html>\"\"\"\n )\n\n print(\"Make zip file...\")\n # create zip of print version whiteboard files\n with zipfile.ZipFile(\n app.config.get(\"DATA_DIR\") + \"/whiteboard.zip\", \"w\"\n ) as zipf:\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\",\n \"projects.html\",\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/people/person_screen.html\",\n \"people.html\",\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/projects.pdf\",\n \"projects.pdf\",\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/people/people.pdf\", \"people.pdf\"\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/demand_vs_capacity.png\",\n \"demand_vs_capacity.png\",\n )\n\n # save update time to file if everything was successful\n with open(app.config.get(\"DATA_DIR\") + \"/.last_update\", \"w\") as f:\n f.write(updated_at)\n\n return render_template(\"update.html\", updated_at=updated_at)\n\n except:\n return traceback.format_exc()", "def process_IOP_graphics(identifiers, force, dryrun=False):\n # Regular expression for parsing full text files\n doi_pat = re.compile(\n '''<article-id\\s+pub-id-type=\"doi\">(?P<doi>.*?)</article-id>''')\n # Create the mapping from bibcode to full text location\n bibcode2fulltext = {}\n map_file = current_app.config.get('GRAPHICS_FULLTEXT_MAPS').get('IOP')\n with open(map_file) as fh_map:\n for line in fh_map:\n try:\n bibcode, ft_file, source = line.strip().split('\\t')\n if ft_file[-3:].lower() == 'xml':\n bibcode2fulltext[bibcode] = ft_file\n except:\n continue\n # If there is back data for image data, load this\n back_file = current_app.config.get('GRAPHICS_BACK_DATA_FILE').get('IOP')\n id2thumb = {}\n if back_file and os.path.exists(back_file):\n with open(back_file) as back_data:\n for line in back_data:\n doi, id, thumb = line.strip().split(',')\n id2thumb[doi] = thumb\n # Get source name\n src = current_app.config.get('GRAPHICS_SOURCE_NAMES').get('IOP')\n # Now process the records submitted\n nfigs = None\n updates = []\n new = []\n bibcodes = [b['bibcode'] for b in identifiers]\n for bibcode in bibcodes:\n resp = db.session.query(GraphicsModel).filter(\n GraphicsModel.bibcode == bibcode).first()\n if force and resp:\n updates.append(bibcode)\n elif not resp:\n new.append(bibcode)\n else:\n continue\n # First process the updates\n for paper in updates:\n # Get the full text for this article\n ft_file = bibcode2fulltext.get(paper, None)\n if ft_file and os.path.exists(ft_file):\n buffer = open(ft_file).read()\n else:\n # No full text file, skip\n continue\n dmat = doi_pat.search(buffer)\n try:\n DOI = dmat.group('doi')\n except:\n sys.stderr.write('Cannot find DOI: %s\\n' % ft_file)\n continue\n nfigs = manage_IOP_graphics(buffer, paper, DOI, src, id2thumb,\n update=True, dryrun=dryrun)\n\n # Next, process the new records\n for paper in new:\n # Get the full text for this article\n ft_file = bibcode2fulltext.get(paper, None)\n if ft_file and os.path.exists(ft_file):\n buffer = open(ft_file).read()\n else:\n # No full text file, skip\n if ft_file:\n sys.stderr.write('Incorrect full text mapping for %s: %s\\n'%(paper, ft_file))\n else:\n sys.stderr.write('No full text found for %s\\n' % paper)\n continue\n dmat = doi_pat.search(buffer)\n try:\n DOI = dmat.group('doi')\n except:\n sys.stderr.write('Cannot find DOI: %s\\n' % ft_file)\n continue\n try:\n nfigs = manage_IOP_graphics(buffer, paper, DOI, src, id2thumb, dryrun=dryrun)\n except Exception, e:\n sys.stderr.write('Error processing %s (%s)\\n'%(paper, e))\n continue\n return nfigs", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df" ]
[ "0.59459436", "0.5931459", "0.5890593", "0.56384003", "0.542464", "0.5374642", "0.5374642", "0.53658164", "0.5333321", "0.5295699", "0.52952296", "0.5214089", "0.5175713", "0.5169607", "0.5164791", "0.5131144", "0.51290625", "0.5110845", "0.50653243", "0.50643325", "0.5064164", "0.5019687", "0.50088376", "0.5006107", "0.5003297", "0.5002704", "0.49940413", "0.49714115", "0.49598303", "0.4959639" ]
0.62722003
0
Process template for a given component. First the component is attempted to be downloaded and rescaled. Since the RDKit default depiction has 1.5A single bond size whereas templates from pubchem are 1.0A.
def process_template(self, component): destination = os.path.join(self.pubchem_templates, f"{component.id}.sdf") downloaded = download_template(destination, component.id, component.inchikey) if downloaded: rescale_molecule(destination, 1.5) return downloaded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processTemplates(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n ## Now fetch all the template paths from shotgun\r\n getTemplatePaths = tk.paths_from_template(templateFile, {'Step' : 'Light', 'id' : id, 'Shot' : shotNum})\r\n debug(app = self, method = 'processTemplates', message = 'getTemplatePaths: %s' % getTemplatePaths, verbose = False)\r\n \r\n ## Now look for each assets template path: \r\n xmlFile = max(getTemplatePaths) \r\n debug(app = self, method = 'processTemplates', message = 'Max Version xmlFile.... %s' % xmlFile, verbose = False)\r\n \r\n ## Now if versions has stuff in it..\r\n if not xmlFile:\r\n debug(app = self, method = 'processTemplates', message = 'Can not find any xml files for %s' % shotNum, verbose = False)\r\n pass\r\n else:\r\n \r\n debug(app = self, method = 'processTemplates', message = 'PathTo: %s' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n if os.path.isfile(xmlFile.replace(os.path.sep, \"/\")):## is this a valid xml file!?\r\n inprogressBar.updateProgress(percent = 10, doingWhat = 'createAll shaders...')\r\n self._createAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n \r\n inprogressBar.updateProgress(percent = 30, doingWhat = 'connectAll shaders...')\r\n self._connectAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n else:\r\n debug(app = self, method = 'processTemplates', message = 'FAILED Can not find a valid published xml file for %s ...' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n pass", "def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template", "def parse_component(component):\n # Volume of the component in m^3\n volume = (component.X * component.Y * component.Z).values[0]\n\n # How many slots the component takes up\n internal_slots = component['Internal Slots'].values[0]\n\n if not component['External Slots'].values == 0:\n external = True\n external_slots = component['External Slots'].values[0]\n else:\n external = False\n external_slots = 0\n\n min_temp = component['Min Temp'].values[0]\n max_temp = component['Max Temp'].values[0]\n\n mass = component['Mass'].values[0]\n max_voltage = component['Voltage'].values[0]\n nom_power = component['Nom Power'].values[0]\n max_power = component['Power (W)'].values[0] - nom_power # This returns the difference when activated\n discharge_time = component['Discharge Time (Wh)'].values[0]\n pixel_resolution = component['Resolution (m)'].values[0]\n wavelength_resolution = component['Resolution(nm)'].values[0]\n min_wavelength = component['Min Wavelength (nm)'].values[0]\n max_wavelength = component['Max Wavelength (nm)'].values[0]\n field_of_view = component['Field of View (deg)'].values[0]\n rx_min = component['Receiver Min (MHz)'].values[0]\n rx_max = component['Receiver Max'].values[0]\n tx_min = component['Transmitter Min'].values[0]\n tx_max = component['Transmitter Max'].values[0]\n duplex = component['Duplex'].values[0] + 1\n br_down = component['Bit Rate Down'].values[0]\n br_up = component['Bit Rate Up'].values[0]\n data = component['Data Storage (MB)'].values[0]\n code = component['Code Storage (MB)'].values[0]\n ram = component['RAM'].values[0]\n att_know = component['Attitude Know (deg)'].values[0]\n att_view = component['Attitude View'].values[0]\n att_mom = component['Attitude Control moment'].values[0]\n max_prop = component['Max Propulsion (mN)'].values[0]\n att_type = component['Attitude Type'].values[0]\n axis = component['Axis control'].values[0]\n ctrl_area = component['Control Area (m^2)'].values[0]\n disposal = component['Disposal time(km/day)'].values[0]\n int_comms = component['Internal Comms'].values[0]\n comm_conn = component['IntCommConn'].values[0]\n price = component['Price ($US)'].values[0]\n\n metric_sums = np.array([[mass, duplex, br_down, br_up, data, code, ram, att_view, att_mom, max_prop, axis,\n ctrl_area, disposal, price, pixel_resolution, wavelength_resolution, min_wavelength,\n max_wavelength]]).T.astype(np.float)\n metric_mins = np.array([[att_know]]).T.astype(np.float)\n metric_maxs = np.array([[]]).T.astype(np.float)\n\n summation_values = np.array([[volume, mass, internal_slots, external_slots, nom_power, discharge_time, duplex,\n br_down, br_up, data, code, ram, att_know, att_view, att_mom, max_prop, att_type,\n axis, ctrl_area, disposal, price]]).T\n min_max_values = np.array([[max_voltage, max_power, pixel_resolution, wavelength_resolution, min_temp, max_temp,\n min_wavelength, max_wavelength, field_of_view, rx_min, rx_max, tx_min, tx_max]]).T\n\n #Todo, figure out a way to deal with the comms issue. possibly a later problem\n\n # print(summation_values)\n\n # Todo create matrix from arrays then sum each feature on the correct axis\n # Todo This will create the correct feature set\n # Other features will be made from summation of available slots/connects vs used\n return metric_sums, metric_mins, metric_maxs, summation_values, min_max_values", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def doMakeLimbTemplate2(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n try:#Gather limb specific data and check\n #==============\n self.curveDegree = self._mi_templateNull.curveDegree\n self.rollOverride = self._mi_templateNull.rollOverride\n\n doCurveDegree = getGoodCurveDegree(self)\n if not doCurveDegree:raise ValueError,\"Curve degree didn't query\"\n\n #>>>Scale stuff\n size = returnModuleBaseSize(self._mi_module)\n\n lastCountSizeMatch = len(self.corePosList) -1\n except Exception,error:raise Exception,\"Gather limb data | {0}\".format(error)\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Making the template objects\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[1]), progress=1) \t\t\t\t\t \n try:\n templHandleList = []\n self.ml_controlObjects = []\n self._mi_locs = []\n for i,pos in enumerate(self.corePosList):# Don't like this sizing method but it is what it is for now\n #>> Make each of our base handles\n #============================= \n if i == 0:\n sizeMultiplier = 1\n elif i == lastCountSizeMatch:\n sizeMultiplier = .8\n else:\n sizeMultiplier = .75\n\n #>>> Create and set attributes on the object\n i_obj = cgmMeta.validateObjArg( curves.createControlCurve('sphere',(size * sizeMultiplier)),'cgmObject',setClass = True )\n\n curves.setCurveColorByName(i_obj.mNode,self.moduleColors[0])\n\n i_obj.doStore('cgmName','%s.%s'%(self._mi_module.coreNames.mNode,self.d_coreNamesAttrs[i])) \n #i_obj.addAttr('cgmName',value = str(self.l_coreNames[i]), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n if self.direction != None:\n i_obj.addAttr('cgmDirection',value = self.direction,attrType = 'string',lock=True) \n i_obj.addAttr('cgmType',value = 'templateObject', attrType = 'string',lock=True) \n i_obj.doName()#Name it\n\n mc.move (pos[0], pos[1], pos[2], [i_obj.mNode], a=True)\n i_obj.parent = self._mi_templateNull\n\n #>>> Loc it and store the loc\n #i_loc = cgmMeta.cgmObject( i_obj.doLoc() )\n i_loc = i_obj.doLoc()\n i_loc.addAttr('cgmName',value = self._mi_module.getShortName(), attrType = 'string', lock=True) #Add name tag\n i_loc.addAttr('cgmType',value = 'templateCurveLoc', attrType = 'string', lock=True) #Add Type\n i_loc.v = False # Turn off visibility\n i_loc.doName()\n\n self._mi_locs.append(i_loc)\n i_obj.connectChildNode(i_loc.mNode,'curveLoc','owner')\n i_loc.parent = self._mi_templateNull#parent to the templateNull\n\n mc.pointConstraint(i_obj.mNode,i_loc.mNode,maintainOffset = False)#Point contraint loc to the object\n\n templHandleList.append (i_obj.mNode)\n self.ml_controlObjects.append(i_obj)\n except Exception,error:raise Exception,\"Template object creation | {0}\".format(error)\n\n try:#>> Make the curve\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[2]), progress=2) \t\t\t\t\t \n i_crv = cgmMeta.validateObjArg( mc.curve (d=doCurveDegree, p = self.corePosList , os=True),'cgmObject',setClass = True )\n\n i_crv.addAttr('cgmName',value = str(self._mi_module.getShortName()), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n if self.direction != None:\n i_crv.addAttr('cgmDirection',value = self.direction, attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n\n i_crv.addAttr('cgmType',value = 'templateCurve', attrType = 'string', lock=True)\n curves.setCurveColorByName(i_crv.mNode,self.moduleColors[0])\n i_crv.parent = self._mi_templateNull \n i_crv.doName()\n i_crv.setDrawingOverrideSettings({'overrideEnabled':1,'overrideDisplayType':2},True)\n\n for i,i_obj in enumerate(self.ml_controlObjects):#Connect each of our handles ot the cv's of the curve we just made\n mc.connectAttr ( (i_obj.curveLoc.mNode+'.translate') , ('%s%s%i%s' % (i_crv.mNode, '.controlPoints[', i, ']')), f=True )\n\n\n self.foundDirections = returnGeneralDirections(self,templHandleList)\n log.debug(\"directions: %s\"%self.foundDirections )\n except Exception,error:raise Exception,\"template curve | {0}\".format(error)\n\n try:#>> Create root control\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[3]), progress=3) \t\t\t\t\t \n\n rootSize = (distance.returnBoundingBoxSizeToAverage(templHandleList[0],True)*1.25) \n i_rootControl = cgmMeta.validateObjArg( curves.createControlCurve('cube',rootSize),'cgmObject',setClass = True )\n\n curves.setCurveColorByName(i_rootControl.mNode,self.moduleColors[0])\n i_rootControl.addAttr('cgmName',value = str(self._mi_module.getShortName()), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug \n i_rootControl.addAttr('cgmType',value = 'templateRoot', attrType = 'string', lock=True)\n if self.direction != None:\n i_rootControl.addAttr('cgmDirection',value = self.direction, attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n i_rootControl.doName()\n\n #>>> Position it\n if self._mi_module.moduleType in ['clavicle']:\n position.movePointSnap(i_rootControl.mNode,templHandleList[0])\n else:\n position.movePointSnap(i_rootControl.mNode,templHandleList[0])\n\n #See if there's a better way to do this\n log.debug(\"templHandleList: %s\"%templHandleList)\n if self._mi_module.moduleType not in ['foot']:\n if len(templHandleList)>1:\n log.debug(\"setting up constraints...\") \n constBuffer = mc.aimConstraint(templHandleList[-1],i_rootControl.mNode,maintainOffset = False, weight = 1, aimVector = [0,0,1], upVector = [0,1,0], worldUpVector = self.worldUpVector, worldUpType = 'vector' )\n mc.delete (constBuffer[0]) \n elif self._mi_module.getMessage('moduleParent'):\n #l_parentTemplateObjects = self._mi_module.moduleParent.templateNull.getMessage('controlObjects')\n helper = self._mi_module.moduleParent.templateNull.msgList_get('controlObjects',asMeta = True)[-1].helper.mNode\n if helper:\n log.info(\"helper: %s\"%helper)\n constBuffer = mc.orientConstraint( helper,i_rootControl.mNode,maintainOffset = False)\n mc.delete (constBuffer[0]) \n\n i_rootControl.parent = self._mi_templateNull\n i_rootControl.doGroup(maintain=True)\n except Exception,error:raise Exception,\"Root creation | {0}\".format(error)\n\n\n try:#>> Store objects\n #============================= \n self._mi_templateNull.curve = i_crv.mNode\n self._mi_templateNull.root = i_rootControl.mNode\n self._mi_templateNull.msgList_connect('controlObjects',templHandleList)\n\n self._mi_rootControl = i_rootControl#link to carry\n except Exception,error:raise Exception,\"store | {0}\".format(error)\n\n try:#>> Orientation helpers\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[3]), progress=3) \t\t\t\t\t \n \"\"\" Make our Orientation Helpers \"\"\"\n doCreateOrientationHelpers(self)\n doParentControlObjects(self)\n\n #if self._mi_module.getMessage('moduleParent'):#If we have a moduleParent, constrain it\n #constrainToParentModule(self.m)\n\n #doOrientTemplateObjectsToMaster(self._mi_module)\n except Exception,error:raise Exception,\"Orientation helpers | {0}\".format(error)\n\n return True", "def update_ccd_file(self, ccd: str) -> None:\n components = ccd_reader.read_pdb_components_file(ccd)\n\n for i in components.values():\n self.process_template(i.component)", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def main(template_initial_path, template_grown_path, step, total_steps, hydrogen_to_replace, core_atom_linker,\n tmpl_out_path, null_charges=False, growing_mode=\"SoftcoreLike\"):\n lambda_to_reduce = float(step/(total_steps+1))\n templ_ini = TemplateImpact(template_initial_path)\n \n for bond in templ_ini.list_of_bonds:\n key, bond_cont = bond\n templ_grw = TemplateImpact(template_grown_path)\n fragment_atoms, core_atoms_in, core_atoms_grown = detect_atoms(template_initial=templ_ini, \n template_grown=templ_grw,\n hydrogen_to_replace=hydrogen_to_replace)\n set_fragment_atoms(list_of_fragment_atoms=fragment_atoms)\n set_connecting_atom(template_grown=templ_grw, pdb_atom_name=core_atom_linker)\n fragment_bonds = detect_fragment_bonds(list_of_fragment_atoms=fragment_atoms, template_grown=templ_grw)\n set_fragment_bonds(list_of_fragment_bonds=fragment_bonds)\n set_linker_bond(templ_grw)\n if growing_mode == \"SoftcoreLike\":\n modify_core_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, exp_charges=True,\n null_charges=null_charges)\n reduce_fragment_parameters_linearly(templ_grw, lambda_to_reduce, exp_charges=True, \n null_charges=null_charges)\n \n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n elif growing_mode == \"AllLinear\":\n modify_core_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, exp_charges=False)\n reduce_fragment_parameters_linearly(templ_grw, lambda_to_reduce, exp_charges=False,\n null_charges=False)\n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n elif growing_mode == \"SpreadHcharge\":\n if step > 1:\n reduce_fragment_parameters_originaly(templ_grw, templ_ini, lambda_to_reduce, \n hydrogen=hydrogen_to_replace, n_GS=total_steps)\n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n else:\n reduce_fragment_parameters_spreading_H(templ_grw, templ_ini, lambda_to_reduce, \n hydrogen=hydrogen_to_replace, n_GS=total_steps)\n else:\n raise ValueError(\"Growing mode Not valid. Choose between: 'SoftcoreLike', 'SpreadHcharge', 'AllLinear'.\")\n templ_grw.write_template_to_file(template_new_name=tmpl_out_path)\n return [atom.pdb_atom_name for atom in fragment_atoms], \\\n [atom.pdb_atom_name for atom in core_atoms_grown]", "def cheetah_template(self, pre=False):\n if self.is_req_output:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_output:\n xml_out = self.xml_out\n xml_out['out_sel_name'] = self.out_sel_name\n cht_tmpl = self.file_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and not pre:\n if self.pname in self.gen_in_fmt:\n if self.gen_in_fmt[self.pname] == 'vcf,vcf_bgzip':\n cht_tmpl = self.vcf_choose\n else:\n cht_tmpl = PercentTemplate(self.reg_arg)\n elif self.pname in self.tool_data[self.tool_name]['input_fmt']:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and pre:\n cht_tmpl = self.vcf_tabix\n return cht_tmpl.substitute(self.xml_out)\n else:\n if self.xml_out['section'] not in ['required']:\n template_string = self.ext_arg\n else:\n template_string = self.reg_arg\n if self.xml_out['type'] == 'boolean':\n cht_tmpl = PercentTemplate(template_string.replace('%argument ', ''))\n else:\n cht_tmpl = PercentTemplate(template_string)\n return cht_tmpl.substitute(self.xml_out)", "def updateTemplate2(mModule = None, saveTemplatePose = False, **kws):\n try:\n mModule = kws.get['mModule'] or mModule\n saveTemplatePose = kws.get['saveTemplatePose'] or saveTemplatePose\n\n #if not mModule.isSized():\n #log.warning(\"'%s' not sized. Can't update\"%mModule.getShortName())\n #return False\n if not mModule.isTemplated():\n log.warning(\"'%s' not templated. Can't update\"%mModule.getShortName())\n return False\n\n if saveTemplatePose:\n mModule.templateSettings_call('store')#Save our pose before destroying anything\n\n mi_templateNull = mModule.templateNull\n\n corePosList = mi_templateNull.templateStarterData\n i_root = mi_templateNull.root\n ml_controlObjects = mi_templateNull.msgList_get('controlObjects',asMeta = True)\n\n #if not cgmMath.isVectorEquivalent(i_templateNull.controlObjects[0].translate,[0,0,0]):\n #raise StandardError,\"updateTemplate: doesn't currently support having a moved first template object\"\n #return False\n\n mc.xform(i_root.parent, translation = corePosList[0],worldSpace = True)\n mc.xform(ml_controlObjects[0].parent, translation = corePosList[0],worldSpace = True)\n\n for i,i_obj in enumerate(ml_controlObjects[1:]):\n log.info(i_obj.getShortName())\n #objConstraints = constraints.returnObjectConstraints(i_obj.parent)\n #if objConstraints:mc.delete(objConstraints) \n #buffer = search.returnParentsFromObjectToParent(i_obj.mNode,i_root.mNode)\n #i_obj.parent = False\n #if buffer:mc.delete(buffer)\n mc.xform(i_obj.mNode, translation = corePosList[1:][i],worldSpace = True) \n\n buffer = search.returnParentsFromObjectToParent(ml_controlObjects[0].mNode,i_root.mNode)\n ml_controlObjects[0].parent = False\n if buffer:mc.delete(buffer)\n\n doParentControlObjects(mModule)\n doCastPivots(mModule)\n\n mModule.templateSettings_call('load')#Restore the pose\n return True\n except Exception,error:raise Exception,\"updateTemplate | {0}\".format(error)", "def fillBackgroundTemplates(opt):\n\n totalBkg={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n for f in [os.path.join(opt.input,x) for x in os.listdir(opt.input) if 'Data13TeV' in x]:\n if 'MuonEG' in f : continue\n data.AddFile(f)\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel \n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n\n print '\\t',catName,categCut\n\n #background modelling histos\n histos=[]\n data_obs=None\n for name,pfix in [('bkg_'+catName,'mix'),('bkg_%s_bkgShape'%catName,'mixem')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),templCuts,'goff')\n h=data.GetHistogram()\n histos.append(h.Clone(name))\n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalBkg[icat]=h.Integral()\n if not opt.unblind :\n data_obs=h.Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n\n #observed data in this category if unblinding\n if opt.unblind:\n data.Draw('mmiss >> h({1},{2},{3})'.format(opt.nbins,opt.mMin,opt.mMax),categCut,'goff')\n data_obs=data.GetHistogram().Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n templates.append(data_obs)\n\n print '\\t total background:',totalBkg\n return totalBkg,templates", "def saved_template(self, template_id):\n\n # From user params get the wanted type and size\n category, size = template_id.split('_')\n\n # Parse the xml file\n template_tree = Etree.parse(\"patron.xml\")\n root = template_tree.getroot()\n\n # Find The selected template\n for template in root.findall(\"./type[@name='%s']/template[@size='%s']\" % (category, size)):\n # Find useful data\n info = 'T-shirt_template_%s_%s' % (category, size)\n transform = template.find('transform')\n\n # Creation of a main group for the Template\n template_attribs = {\n inkex.addNS('label', 'inkscape'): info,\n 'transform': transform.text if transform is not None else ''\n }\n template_group = inkex.etree.SubElement(self.current_layer, 'g', template_attribs)\n\n # For each pieces of the template\n for piece in template.findall('piece'):\n # Find useful data\n pieceinfo = info + \"_\" + piece.find('name').text\n transform = piece.find('transform')\n\n # Create a group for the piece\n piece_attribs = {\n inkex.addNS('label', 'inkscape'): pieceinfo,\n 'transform': transform.text if transform is not None else ''\n }\n piece_group = inkex.etree.SubElement(template_group, 'g', piece_attribs)\n\n # Add a text to display the piece info\n add_text(piece_group, pieceinfo.replace('_', ' '), piece.find('info').text, 15)\n\n # For each paths of the piece\n for part in piece.findall('part'):\n # Find useful data\n label = part.find('name').text\n partinfo = pieceinfo + \"_\" + label\n transform = part.find('transform')\n\n # Create a group for the shape\n part_attribs = {\n inkex.addNS('label', 'inkscape'): partinfo,\n 'transform': transform.text if transform is not None else ''\n }\n part_group = inkex.etree.SubElement(piece_group, 'g', part_attribs)\n\n # Add the path to the group\n style = self.normal_line if self.options.style == 'print' or label != 'offset' else self.cut_line\n path_attribs = {\n inkex.addNS('label', 'inkscape'): partinfo,\n 'style': simplestyle.formatStyle(style),\n 'd': part.find('path').text\n }\n inkex.etree.SubElement(part_group, inkex.addNS('path', 'svg'), path_attribs)", "def generateComponent( self, bGenerateRandom = 1, \\\n Size = 0, N = 0, M = 0, S = 0, C = 0, MaxWallTime = 0 ):\n if bGenerateRandom > 0:\n if self.HaveInput > 0:\n InputSize = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParKSuperSizes )\n else:\n InputSize = 0\n if self.HaveOutput > 0:\n OutputSize = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParKSuperSizes )\n else:\n OutputSize = 0\n N = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParSupersteps )\n M = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParMemoryKItems )\n S = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParMemoryElementsPerItem )\n C = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParComputationPerMemoryItem )\n\n# N = SSERComponent.SSER_ParSupersteps[2]\n# M = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParMemoryKItems )\n# S = AIRandomUtils.getRandomListElement( SSERComponent.SSER_ParMemoryElementsPerItem )\n# C = SSERComponent.SSER_ParComputationPerMemoryItem[3]\n\n MaxWallTime = AIRandomUtils.getRandomListElement( SSERComponent.SSER_RunTimeInMinutes )\n \n if self.UnitDir[0] != '/':\n if sys.platform.find(\"linux\") >= 0:\n self.UnitDir = os.path.join( os.environ['PWD'], self.UnitDir )\n else:\n self.UnitDir = os.path.join( os.getcwd(), self.UnitDir )\n \n ## too long component dir name\n #ComponentDirName = \"%s_sser_%dx_%d_i%d_o%d\" % \\\n # (self.ComponentData['id'], self.ComponentData['count'], int(N), int(InputSize), int(OutputSize))\n ComponentDirName = \"%s_sser\" % self.ComponentData['id']\n FullComponentDirName = os.path.join( self.UnitDir, ComponentDirName ) \n #--- Create output directory, if it does not exist\n if os.path.exists( FullComponentDirName ):\n if not os.path.isdir( FullComponentDirName ):\n print \"Output for job\", self.ComponentData['id'], \"(\"+FullComponentDirName+\")\", \"exists, but is not a directory\", \"...skipping job\"\n return -1\n else:\n try:\n os.makedirs( FullComponentDirName )\n except OSError, e:\n print \"Cannot create output directory for job\", self.ComponentData['id'] , \"...skipping job\"\n print '\\tOS returned:', e\n return -1\n \n # support the directory stagein\n EmptyFileName = os.path.join( FullComponentDirName, \"__empty_file__\" )\n try:\n EmptyFile = open( EmptyFileName, \"w\" )\n EmptyFile.close()\n except:\n pass \n \n OutFileName = \"%s.jdf\" % ComponentDirName\n FullOutFileName = os.path.join( self.UnitDir, OutFileName ) \n \n self.ComponentData['executable'] = SSERComponent.SSER_Exe\n self.ComponentData['stdout'] = \"sser-\"+self.ComponentData['id']+\".out\"#os.path.join( FullComponentDirName, \"sser-%s.out\" % self.ComponentData['id'] )\n self.ComponentData['stderr'] = \"sser-\"+self.ComponentData['id']+\".err\"#os.path.join( FullComponentDirName, \"sser-%s.err\" % self.ComponentData['id'] )\n self.ComponentData['logfile'] = os.path.join( FullComponentDirName, \"sser-%s.log\" % self.ComponentData['id'] )\n self.ComponentData['name'] = \"%s_sser\" % self.ComponentData['id']\n self.ComponentData['description'] = \\\n \"SSER, Count=%d, N=%d, M=%d, S=%d, C=%d, I1=I2=%d, O1=O2=O3=%d\" % \\\n (int(self.ComponentData['count']), int(N), int(M), int(S), int(C), int(InputSize), int(OutputSize))\n self.ComponentData['directory'] = FullComponentDirName\n self.ComponentData['maxWallTime'] = MaxWallTime\n \n # I1 = InputSize, I2 = InputSize, O1 = OutputSize, O2 = OutputSize, O3 = OutputSize\n InputSize=0\n self.ComponentData['arguments'] = \\\n self.generateArgsList( InputSize, None, OutputSize, OutputSize, OutputSize, N, M, S, C )\n self.ComponentData['env'] = self.generateEnvList( self.ComponentIndex )\n \n #StageInData = { 'EmptyFileName': os.path.join( FullComponentDirName, os.path.basename(EmptyFileName) ) }\n StageInData = {'EmptyFileName': \"\" }\n self.ComponentData['stagein'] = self.generateStageInList( StageInData )\n \n StageOutData = { }\n self.ComponentData['stageout'] = self.generateStageOutList( StageOutData )\n \n return 0", "def extract_template(temp_dir, fea_type):\n kps = []\n descriptors = np.array([])\n in_path = temp_dir + 'imgs/' # images\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n print(img.shape)\n kp, des = get_des(fea_type, img)\n if descriptors.size == 0:\n kps = kp\n descriptors = des\n else:\n kps.extend(kp)\n descriptors = np.vstack((descriptors, des))\n\n print(\"template descriptors shape: \" + str(descriptors.shape))\n with open(temp_dir + fea_type + '_template_0.pickle', 'wb') as ff:\n pickle.dump(descriptors, ff)\n\n # with open(temp_dir + fea_type + '_template_0.pickle', 'rb') as f:\n # template = pickle.load(f)\n\n return", "def prepare_templates(params, outfile, redo=False):\n if os.path.exists(outfile) and not redo:\n return\n emiles = EMILES()\n wmin = params[\"wmin\"] * u.micrometer\n wmax = params[\"wmax\"] * u.micrometer\n # Modify wmin to compensate for the recession velocity of the system\n zmax = (params[\"vsyst\"] + 3000) / const.c.to(\"km/s\").value\n wrest = wmin / (1 + zmax)\n grid = np.array(np.meshgrid(params[\"ages\"], params[\"metals\"],\n params[\"bis\"])).T.reshape(-1, 3)\n ssppars = Table(grid, names=[\"T\", \"Z\", \"imf\"])\n filenames = []\n for args in grid:\n filenames.append(os.path.join(emiles.data_dir,\n emiles.filename(*args)))\n wave, spec = misc.read_spec(filenames[0])\n wave = wave * u.angstrom\n idx = np.where((wave > wrest) & (wave <= wmax))\n wave = wave[idx]\n spec = spec[idx]\n wrange = [wave[0].to(\"angstrom\").value, wave[-1].to(\"angstrom\").value]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps = np.zeros((len(filenames), len(newflux)))\n print(\"Processing SSP files\")\n for i, fname in tqdm(enumerate(filenames)):\n spec = fits.getdata(fname)[idx]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps[i] = newflux\n norm = np.median(ssps)\n ssps /= norm\n hdu1 = fits.PrimaryHDU(ssps)\n hdu1.header[\"EXTNAME\"] = \"SSPS\"\n hdu1.header[\"BSCALE\"] = (norm, \"Scale to convert from ADU to flux.\")\n hdu2 = fits.BinTableHDU(ssppars)\n hdu2.header[\"EXTNAME\"] = \"PARAMS\"\n hdu1.header[\"CRVAL1\"] = logLam[0]\n hdu1.header[\"CD1_1\"] = logLam[1] - logLam[0]\n hdu1.header[\"CRPIX1\"] = 1.\n # Making wavelength array\n hdu3 = fits.BinTableHDU(Table([logLam], names=[\"loglam\"]))\n hdu3.header[\"EXTNAME\"] = \"LOGLAM\"\n hdulist = fits.HDUList([hdu1, hdu2, hdu3])\n hdulist.writeto(outfile, overwrite=True)\n return", "def run(self):\n\n print \"\\n\\n\\tPlease Note: Templates are generated based off\"\n print \"\\t of the OS environment variables that are set.\"\n print \"\\t* Running ReHeat.\"\n\n self.set_creds()\n self.gen_ip() # used in template description\n self.gen_tenant_id()\n if self.reheat_error:\n return self.reheat_errmsg\n\n print \"\\t* You have opted to generate %s file[s]\" % self.template_type\n if 'all' in self.template_type:\n self.gen_heat_data()\n self.gen_heat_template()\n self.gen_compute_data()\n return self.gen_compute_template()\n elif 'heat' in self.template_type:\n self.gen_heat_data()\n return self.gen_heat_template()\n elif 'compute' in self.template_type:\n self.gen_compute_data()\n return self.gen_compute_template()\n else:\n raise Exception(\"User provided an improper template type.\")", "def process(workbook: Any, contents: list) -> None:\n worksheet_name = 'Storage Inventory'\n worksheet = workbook.get_sheet_by_name(worksheet_name)\n\n headers = list(concat([\n ['Hostname', 'Model', 'OS', 'Nodes'],\n get_parser_header(DEDUPE_TMPL)\n ]))\n RowTuple = namedtuple('RowTuple', headers)\n build_header(worksheet, headers)\n\n rows = []\n for content in contents:\n doc = xmltodict.parse(content)\n component_details = search_tag_value(doc, 'component_details')\n command_details = search_tag_value(doc, 'command_details')\n\n dedupe, nodes = [], 0 # type: (list, int)\n for entry in command_details:\n nodes_content = collected_data(\n entry, 'cmd', 'isi storagepool nodepools list')\n nodes = max(map(compose(int, itemgetter(0)),\n run_parser_over(\n nodes_content,\n NODES_TMPL))) if nodes_content else nodes\n\n dedupe_content = collected_data(entry, 'cmd', 'isi dedupe stats')\n dedupe = run_parser_over(\n dedupe_content, DEDUPE_TMPL) if dedupe_content else dedupe\n\n dedupe = dedupe if len(dedupe) > 1 else [['', '', '', '', '', '']]\n rows.append([\n component_details['hostname'],\n component_details['model'],\n component_details['os'], str(nodes), *dedupe[0]\n ])\n\n final_col, final_row = 0, 0\n for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):\n for col_n, col_value in \\\n enumerate(row_tuple._asdict().values(), ord('A')):\n cell = worksheet['{}{}'.format(chr(col_n), row_n)]\n cell.value = str.strip(col_value)\n style_value_cell(cell)\n set_cell_to_number(cell)\n final_col = col_n\n final_row = row_n\n\n sheet_process_output(\n worksheet,\n 'StorageInventoryTable',\n 'Storage Inventory',\n final_col,\n final_row)", "def update_template():\n\n # Open, and read, the template file\n with open(\"template.html\", \"r\") as f:\n soup = BeautifulSoup(f.read(), features=\"html5lib\")\n\n # Add the plots in the correct places\n for div in soup.find_all(\"div\", class_=\"plot\"):\n with open(div[\"src\"], \"r\") as f:\n plot = BeautifulSoup(f.read(), features=\"html5lib\")\n div.replace_with(plot.html.body.div)\n\n # Write the finished report to document.html\n with open(\"document.html\", \"w\") as f:\n f.write(soup.prettify())", "def _get_component_templates(course):\r\n def create_template_dict(name, cat, boilerplate_name=None, is_common=False):\r\n \"\"\"\r\n Creates a component template dict.\r\n\r\n Parameters\r\n display_name: the user-visible name of the component\r\n category: the type of component (problem, html, etc.)\r\n boilerplate_name: name of boilerplate for filling in default values. May be None.\r\n is_common: True if \"common\" problem, False if \"advanced\". May be None, as it is only used for problems.\r\n\r\n \"\"\"\r\n return {\r\n \"display_name\": name,\r\n \"category\": cat,\r\n \"boilerplate_name\": boilerplate_name,\r\n \"is_common\": is_common\r\n }\r\n\r\n component_display_names = {\r\n 'discussion': _(\"Discussion\"),\r\n 'html': _(\"HTML\"),\r\n 'problem': _(\"Problem\"),\r\n 'video': _(\"Video\")\r\n }\r\n advanced_component_display_names = {}\r\n\r\n component_templates = []\r\n # The component_templates array is in the order of \"advanced\" (if present), followed\r\n # by the components in the order listed in COMPONENT_TYPES.\r\n for category in COMPONENT_TYPES:\r\n templates_for_category = []\r\n component_class = _load_mixed_class(category)\r\n # add the default template with localized display name\r\n # TODO: Once mixins are defined per-application, rather than per-runtime,\r\n # this should use a cms mixed-in class. (cpennington)\r\n if hasattr(component_class, 'display_name'):\r\n display_name = _(component_class.display_name.default) if component_class.display_name.default else _('Blank')\r\n else:\r\n display_name = _('Blank')\r\n templates_for_category.append(create_template_dict(display_name, category))\r\n\r\n # add boilerplates\r\n if hasattr(component_class, 'templates'):\r\n for template in component_class.templates():\r\n filter_templates = getattr(component_class, 'filter_templates', None)\r\n if not filter_templates or filter_templates(template, course):\r\n templates_for_category.append(\r\n create_template_dict(\r\n _(template['metadata'].get('display_name')),\r\n category,\r\n template.get('template_id'),\r\n template['metadata'].get('markdown') is not None\r\n )\r\n )\r\n component_templates.append({\r\n \"type\": category,\r\n \"templates\": templates_for_category,\r\n \"display_name\": component_display_names[category]\r\n })\r\n\r\n # Check if there are any advanced modules specified in the course policy.\r\n # These modules should be specified as a list of strings, where the strings\r\n # are the names of the modules in ADVANCED_COMPONENT_TYPES that should be\r\n # enabled for the course.\r\n course_advanced_keys = course.advanced_modules\r\n advanced_component_templates = {\"type\": \"advanced\", \"templates\": [], \"display_name\": _(\"Advanced\")}\r\n # Set component types according to course policy file\r\n if isinstance(course_advanced_keys, list):\r\n for category in course_advanced_keys:\r\n if category in ADVANCED_COMPONENT_TYPES:\r\n # boilerplates not supported for advanced components\r\n try:\r\n component_class = _load_mixed_class(category)\r\n\r\n if component_class.display_name.default:\r\n template_display_name = _(component_class.display_name.default)\r\n else:\r\n template_display_name = advanced_component_display_names.get(category, category)\r\n advanced_component_templates['templates'].append(\r\n create_template_dict(\r\n template_display_name,\r\n category\r\n )\r\n )\r\n except PluginMissingError:\r\n # dhm: I got this once but it can happen any time the\r\n # course author configures an advanced component which does\r\n # not exist on the server. This code here merely\r\n # prevents any authors from trying to instantiate the\r\n # non-existent component type by not showing it in the menu\r\n log.warning(\r\n \"Advanced component %s does not exist. It will not be added to the Studio new component menu.\",\r\n category\r\n )\r\n pass\r\n else:\r\n log.error(\r\n \"Improper format for course advanced keys! %s\",\r\n course_advanced_keys\r\n )\r\n if len(advanced_component_templates['templates']) > 0:\r\n component_templates.insert(0, advanced_component_templates)\r\n\r\n return component_templates", "def update_ccd_dir(self, components: str):\n\n for f in os.listdir(components):\n c = ccd_reader.read_pdb_cif_file(os.path.join(components, f)).component\n self.process_template(c)", "def run_template_matching(self):\n\n # Get the number of qubits/clbits for both circuit and template.\n n_qubits_c = len(self.circuit_dag_dep.qubits)\n n_clbits_c = len(self.circuit_dag_dep.clbits)\n\n n_qubits_t = len(self.template_dag_dep.qubits)\n n_clbits_t = len(self.template_dag_dep.clbits)\n\n # Loop over the indices of both template and circuit.\n for template_index in range(0, self.template_dag_dep.size()):\n for circuit_index in range(0, self.circuit_dag_dep.size()):\n # Operations match up to ParameterExpressions.\n if self.circuit_dag_dep.get_node(circuit_index).op.soft_compare(\n self.template_dag_dep.get_node(template_index).op\n ):\n\n qarg_c = self.circuit_dag_dep.get_node(circuit_index).qindices\n carg_c = self.circuit_dag_dep.get_node(circuit_index).cindices\n\n qarg_t = self.template_dag_dep.get_node(template_index).qindices\n carg_t = self.template_dag_dep.get_node(template_index).cindices\n\n node_id_c = circuit_index\n node_id_t = template_index\n\n # Fix the qubits and clbits configuration given the first match.\n\n all_list_first_match_q, list_first_match_c = self._list_first_match_new(\n self.circuit_dag_dep.get_node(circuit_index),\n self.template_dag_dep.get_node(template_index),\n n_qubits_t,\n n_clbits_t,\n )\n\n list_circuit_q = list(range(0, n_qubits_c))\n list_circuit_c = list(range(0, n_clbits_c))\n\n # If the parameter for qubits heuristics is given then extracts\n # the list of qubits for the successors (length(int)) in the circuit.\n\n if self.heuristics_qubits_param:\n heuristics_qubits = self._explore_circuit(\n node_id_c, node_id_t, n_qubits_t, self.heuristics_qubits_param[0]\n )\n else:\n heuristics_qubits = []\n\n for sub_q in self._sublist(list_circuit_q, qarg_c, n_qubits_t - len(qarg_t)):\n # If the heuristics qubits are a subset of the given qubits configuration,\n # then this configuration is accepted.\n if set(heuristics_qubits).issubset(set(sub_q) | set(qarg_c)):\n # Permute the qubit configuration.\n for perm_q in itertools.permutations(sub_q):\n perm_q = list(perm_q)\n for list_first_match_q in all_list_first_match_q:\n list_qubit_circuit = self._list_qubit_clbit_circuit(\n list_first_match_q, perm_q\n )\n\n # Check for clbits configurations if there are clbits.\n if list_circuit_c:\n for sub_c in self._sublist(\n list_circuit_c, carg_c, n_clbits_t - len(carg_t)\n ):\n for perm_c in itertools.permutations(sub_c):\n perm_c = list(perm_c)\n\n list_clbit_circuit = self._list_qubit_clbit_circuit(\n list_first_match_c, perm_c\n )\n\n # Apply the forward match part of the algorithm.\n forward = ForwardMatch(\n self.circuit_dag_dep,\n self.template_dag_dep,\n node_id_c,\n node_id_t,\n list_qubit_circuit,\n list_clbit_circuit,\n )\n forward.run_forward_match()\n\n # Apply the backward match part of the algorithm.\n backward = BackwardMatch(\n forward.circuit_dag_dep,\n forward.template_dag_dep,\n forward.match,\n node_id_c,\n node_id_t,\n list_qubit_circuit,\n list_clbit_circuit,\n self.heuristics_backward_param,\n )\n\n backward.run_backward_match()\n\n # Add the matches to the list.\n self._add_match(backward.match_final)\n else:\n # Apply the forward match part of the algorithm.\n forward = ForwardMatch(\n self.circuit_dag_dep,\n self.template_dag_dep,\n node_id_c,\n node_id_t,\n list_qubit_circuit,\n )\n forward.run_forward_match()\n\n # Apply the backward match part of the algorithm.\n backward = BackwardMatch(\n forward.circuit_dag_dep,\n forward.template_dag_dep,\n forward.match,\n node_id_c,\n node_id_t,\n list_qubit_circuit,\n [],\n self.heuristics_backward_param,\n )\n backward.run_backward_match()\n\n # Add the matches to the list.\n self._add_match(backward.match_final)\n\n # Sort the list of matches according to the length of the matches (decreasing order).\n self.match_list.sort(key=lambda x: len(x.match), reverse=True)", "def configureScenario(out, templatepath, geometrypath, nx, ny, nz, sx, sy, sz,\n vtkpath, enable_timing, timingpath, enable_vtk, initial,\n bakspath):\n tree = ET.parse(templatepath)\n root = tree.getroot()\n\n for child in root.findall(\"parallel\"):\n child.attrib[\"numProcessorsX\"] = str(nx)\n child.attrib[\"numProcessorsY\"] = str(ny)\n child.attrib[\"numProcessorsZ\"] = str(nz)\n\n for child in root.findall(\"vtk\"):\n child.attrib[\"enabled\"] = str(enable_vtk).lower()\n child.text = os.path.join(vtkpath, \"vtk\")\n\n for child in root.findall(\"timing\"):\n child.attrib[\"enabled\"] = str(enable_timing).lower()\n child.text = timingpath\n else:\n node = ET.Element(\"timing\", {\"enabled\": str(enable_timing).lower()})\n node.text = timingpath\n root.append(node)\n\n if sx != None and sy != None:\n for child in root.findall(\"geometry\"):\n child.attrib[\"sizeX\"] = str(sx)\n child.attrib[\"sizeY\"] = str(sy)\n child.attrib[\"sizeZ\"] = str(sz)\n\n for child in root.findall(\"geometry\"):\n attrs = child.attrib\n\n if \"obstacle\" in attrs and os.path.isfile(attrs[\"obstacle\"]):\n shutil.copyfile(attrs[\"obstacle\"], geometrypath)\n child.attrib[\"obstacle\"] = geometrypath\n\n for child in root.findall(\"restart\"):\n attrs = child.attrib\n\n if \"in\" in attrs and os.path.isfile(attrs[\"in\"] + \".bak\"):\n shutil.copyfile(attrs[\"in\"] + \".bak\", initial + \".bak\")\n child.attrib[\"in\"] = initial\n\n if \"out\" in attrs:\n child.attrib[\"out\"] = bakspath + \"//\" + child.attrib[\"out\"]\n\n tree.write(out)", "def _make_cloudformation_template(\n project_dir,\n user_data,\n s3_bucket_name,\n sam_template_name,\n elb_name,\n ami_id,\n instance_type,\n autoscaling_min_size,\n autoscaling_desired_capacity,\n autoscaling_max_size,\n):\n\n template_file_path = os.path.join(project_dir, sam_template_name)\n with open(template_file_path, \"a\") as f:\n f.write(\n \"\"\"\\\nAWSTemplateFormatVersion: 2010-09-09\nTransform: AWS::Serverless-2016-10-31\nDescription: BentoML load balanced template\nParameters:\n AmazonLinux2LatestAmiId:\n Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>\n Default: {ami_id}\nResources:\n SecurityGroupResource:\n Type: AWS::EC2::SecurityGroup\n Properties:\n GroupDescription: \"security group for bentoservice\"\n SecurityGroupIngress:\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 5000\n ToPort: 5000\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 22\n ToPort: 22\n VpcId: !Ref Vpc1\n\n Ec2InstanceECRProfile:\n Type: AWS::IAM::InstanceProfile\n Properties:\n Path: /\n Roles: [!Ref EC2Role]\n\n EC2Role:\n Type: AWS::IAM::Role\n Properties:\n AssumeRolePolicyDocument:\n Statement:\n - Effect: Allow\n Principal:\n Service: [ec2.amazonaws.com]\n Action: ['sts:AssumeRole']\n Path: /\n Policies:\n - PolicyName: ecs-service\n PolicyDocument:\n Statement:\n - Effect: Allow\n Action:\n - 'ecr:GetAuthorizationToken'\n - 'ecr:BatchGetImage'\n - 'ecr:GetDownloadUrlForLayer'\n Resource: '*'\n\n LaunchTemplateResource:\n Type: AWS::EC2::LaunchTemplate\n Properties:\n LaunchTemplateName: {template_name}\n LaunchTemplateData:\n IamInstanceProfile:\n Arn: !GetAtt Ec2InstanceECRProfile.Arn\n ImageId: !Ref AmazonLinux2LatestAmiId\n InstanceType: {instance_type}\n UserData: \"{user_data}\"\n SecurityGroupIds:\n - !GetAtt SecurityGroupResource.GroupId\n\n TargetGroup:\n Type: AWS::ElasticLoadBalancingV2::TargetGroup\n Properties:\n VpcId: !Ref Vpc1\n Protocol: HTTP\n Port: 5000\n TargetType: instance\n HealthCheckEnabled: true\n HealthCheckIntervalSeconds: {target_health_check_interval_seconds}\n HealthCheckPath: {target_health_check_path}\n HealthCheckPort: {target_health_check_port}\n HealthCheckProtocol: HTTP\n HealthCheckTimeoutSeconds: {target_health_check_timeout_seconds}\n HealthyThresholdCount: {target_health_check_threshold_count}\n\n LoadBalancerSecurityGroup:\n Type: AWS::EC2::SecurityGroup\n Properties:\n GroupDescription: \"security group for loadbalancing\"\n VpcId: !Ref Vpc1\n SecurityGroupIngress:\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 80\n ToPort: 80\n\n InternetGateway:\n Type: AWS::EC2::InternetGateway\n\n Gateway:\n Type: AWS::EC2::VPCGatewayAttachment\n Properties:\n InternetGatewayId: !Ref InternetGateway\n VpcId: !Ref Vpc1\n\n PublicRouteTable:\n Type: AWS::EC2::RouteTable\n Properties:\n VpcId: !Ref Vpc1\n\n PublicRoute:\n Type: AWS::EC2::Route\n DependsOn: Gateway\n Properties:\n DestinationCidrBlock: 0.0.0.0/0\n GatewayId: !Ref InternetGateway\n RouteTableId: !Ref PublicRouteTable\n\n RouteTableSubnetTwoAssociationOne:\n Type: AWS::EC2::SubnetRouteTableAssociation\n Properties:\n RouteTableId: !Ref PublicRouteTable\n SubnetId: !Ref Subnet1\n RouteTableSubnetTwoAssociationTwo:\n Type: AWS::EC2::SubnetRouteTableAssociation\n Properties:\n RouteTableId: !Ref PublicRouteTable\n SubnetId: !Ref Subnet2\n\n Vpc1:\n Type: AWS::EC2::VPC\n Properties:\n CidrBlock: 172.31.0.0/16\n EnableDnsHostnames: true\n EnableDnsSupport: true\n InstanceTenancy: default\n\n Subnet1:\n Type: AWS::EC2::Subnet\n Properties:\n VpcId: !Ref Vpc1\n AvailabilityZone:\n Fn::Select:\n - 0\n - Fn::GetAZs: \"\"\n CidrBlock: 172.31.16.0/20\n MapPublicIpOnLaunch: true\n\n Subnet2:\n Type: AWS::EC2::Subnet\n Properties:\n VpcId: !Ref Vpc1\n AvailabilityZone:\n Fn::Select:\n - 1\n - Fn::GetAZs: \"\"\n CidrBlock: 172.31.0.0/20\n MapPublicIpOnLaunch: true\n\n LoadBalancer:\n Type: AWS::ElasticLoadBalancingV2::LoadBalancer\n Properties:\n IpAddressType: ipv4\n Name: {elb_name}\n Scheme: internet-facing\n SecurityGroups:\n - !Ref LoadBalancerSecurityGroup\n Subnets:\n - !Ref Subnet1\n - !Ref Subnet2\n Type: application\n\n Listener:\n Type: AWS::ElasticLoadBalancingV2::Listener\n Properties:\n DefaultActions:\n - Type: forward\n TargetGroupArn: !Ref TargetGroup\n LoadBalancerArn: !Ref LoadBalancer\n Port: 80\n Protocol: HTTP\n\n AutoScalingGroup:\n Type: AWS::AutoScaling::AutoScalingGroup\n DependsOn: Gateway\n Properties:\n MinSize: {autoscaling_min_size}\n MaxSize: {autoscaling_max_size}\n DesiredCapacity: {autoscaling_desired_capacity}\n AvailabilityZones:\n - Fn::Select:\n - 0\n - Fn::GetAZs: \"\"\n - Fn::Select:\n - 1\n - Fn::GetAZs: \"\"\n LaunchTemplate:\n LaunchTemplateId: !Ref LaunchTemplateResource\n Version: !GetAtt LaunchTemplateResource.LatestVersionNumber\n TargetGroupARNs:\n - !Ref TargetGroup\n VPCZoneIdentifier:\n - !Ref Subnet1\n - !Ref Subnet2\n UpdatePolicy:\n AutoScalingReplacingUpdate:\n WillReplace: true\n\nOutputs:\n S3Bucket:\n Value: {s3_bucket_name}\n Description: Bucket to store sam artifacts\n AutoScalingGroup:\n Value: !Ref AutoScalingGroup\n Description: Autoscaling group name\n TargetGroup:\n Value: !Ref TargetGroup\n Description: Target group for load balancer\n Url:\n Value: !Join ['', ['http://', !GetAtt [LoadBalancer, DNSName]]]\n Description: URL of the bento service\n\n\"\"\".format(\n ami_id=ami_id,\n template_name=sam_template_name,\n instance_type=instance_type,\n user_data=user_data,\n elb_name=elb_name,\n autoscaling_min_size=autoscaling_min_size,\n autoscaling_desired_capacity=autoscaling_desired_capacity,\n autoscaling_max_size=autoscaling_max_size,\n s3_bucket_name=s3_bucket_name,\n target_health_check_interval_seconds=TARGET_HEALTH_CHECK_INTERVAL,\n target_health_check_path=TARGET_HEALTH_CHECK_PATH,\n target_health_check_port=TARGET_HEALTH_CHECK_PORT,\n target_health_check_timeout_seconds=TARGET_HEALTH_CHECK_TIMEOUT_SECONDS,\n target_health_check_threshold_count=TARGET_HEALTH_CHECK_THRESHOLD_COUNT,\n )\n )\n return template_file_path", "def build(self):\n self.logger.debug(\"run\")\n\n self.onInit()\n self.work()\n \n self.afterWork()\n\n template = Templateengine(self.currenttemplate)\n template.readTemplateFile()\n contenttype = self.settings.contenttype \n self.defaultTemplateParameter()\n \n try:\n self.content = template.get(self.tplparam)\n except Exception as ex:\n Emergency.stop(ex)\n\n self.onDone()\n \n self.logger.debug(\"done\")", "def pull_template(config: Config) -> int:\n if not config.purpose:\n log.warning('When pushing a template, the `purpose` argument is required.')\n return 6\n templates = pull_document(config, TEMPLATES_DOCUMENT) or create_initial_templates_document()\n if config.category == 'gateway':\n data = extract_gateway_template(templates, config.namespace, config.purpose)\n elif config.category == 'service':\n data = extract_service_template(\n templates, config.namespace, config.service, config.color, config.purpose,\n )\n else:\n log.warning('Unknown category {cat}', cat=config.category)\n return 7\n if data is None:\n log.warning('No such template inside templates document.')\n return 8\n if config.filename == '-':\n print(data)\n else:\n with open(config.filename, 'w') as f:\n f.write(data)\n return 0", "def fillSignalTemplates(opt):\n\n totalSig={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n data.AddFile(os.path.join(opt.input,opt.sig))\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel\n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n print '\\t',catName,categCut\n\n #signal modelling histograms\n histos=[]\n for name,pfix in [('sig_'+catName,''),('sig_%s_sigShape'%catName,'mix')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n wgtExpr='wgt*%f'%(SIGNALXSECS[opt.xangle]*opt.lumi)\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),\n '{0}*({1})'.format(wgtExpr,templCuts),\n 'goff')\n h=data.GetHistogram()\n histos.append( h.Clone(name) ) \n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalSig[icat]=h.Integral()\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n \n print '\\t total signal:',totalSig\n return totalSig,templates", "def test_ws_getItemInfosWithReusedPODTemplates(self):\n # in the PM test profile, some templates are only defined for the plonemeeting-assembly\n self.usedMeetingConfigId = \"plonegov-assembly\"\n self.changeUser('pmCreator1')\n item = self.create('MeetingItem')\n # first check that the only returned template is a template rusing another\n viewlet = self._get_viewlet(\n context=item,\n manager_name='plone.belowcontenttitle',\n viewlet_name='document-generation-link')\n templates = viewlet.get_generable_templates()\n self.assertEqual(len(templates), 1)\n self.assertTrue(templates[0].pod_template_to_use)\n self.assertIsNone(templates[0].odt_file)\n # get the reponse\n resp = self._getItemInfos(item.UID(), showTemplates=True, toBeDeserialized=False)\n # we have 1 template\n self.assertEqual(len(resp._itemInfo[0]._templates), 1)\n # templateFilename was taken from template to use\n self.assertEqual(resp._itemInfo[0]._templates[0]._templateFilename, u'Item.odt')\n self.assertEqual(resp._itemInfo[0]._templates[0]._templateFormat, 'odt')", "def read_template(pool, sim_tag, source_id, variable_id, fgt, output_file_path):\n\n connection = pool.connection()\n try:\n\n with connection.cursor() as cursor:\n sql_statement = \"SELECT `template` FROM `run_info` WHERE `sim_tag`=%s and `source`=%s and \" \\\n \"`variable`=%s and `fgt`=%s\"\n row_count = cursor.execute(sql_statement, (sim_tag, source_id, variable_id, fgt))\n if row_count > 0:\n template_data = cursor.fetchone()['template']\n write_file(data=template_data, filename=output_file_path)\n else:\n return None\n\n return True\n except Exception as exception:\n error_message = \"Retrieving template failed for run info entry with source={}, variable={}, sim_tag={}, fgt={}\" \\\n .format(source_id, variable_id, sim_tag, fgt)\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def scaled_component(self, key):\n\n if key in self.components:\n dat = self.components[key] \n # Aliases\n elif key in component_from_alias:\n comp = component_from_alias[key]\n if comp in self.components:\n dat = self.components[comp] \n else:\n # Component not present, make zeros\n return np.zeros(self.shape)\n else:\n raise ValueError(f'Component not available: {key}')\n \n # Multiply by scale factor\n factor = self.factor \n \n if factor != 1:\n return factor*dat\n else:\n return dat" ]
[ "0.5333179", "0.5140254", "0.51039773", "0.509984", "0.5071793", "0.5044713", "0.4932854", "0.4887987", "0.4865483", "0.47699484", "0.4758888", "0.47446755", "0.47439492", "0.47232375", "0.46697277", "0.46674794", "0.46576747", "0.46525013", "0.46251747", "0.46167573", "0.45920938", "0.45782018", "0.45761847", "0.45501897", "0.4537932", "0.4536397", "0.45097527", "0.45032844", "0.450163", "0.44910938" ]
0.83371866
0
Given the waterlevel time history, this function computes a least squares fit polynomial of degree p to the water level data.
def polyfit(dates, levels, p): assert isinstance(p, int) and p > 0, f"{p} is not a positive integer" x = matplotlib.dates.date2num(dates) y = levels try: # Find coefficients of best-fit polynomial f(x) of degree p p_coeff = np.polyfit(x - x[0], y, p) # Convert coefficient into a polynomial that can be evaluated poly = np.poly1d(p_coeff) # Returns a 1D polynomial and time-axis shift return poly, x[0] except TypeError: # workaround to handle unexpected numpy polyfit errors return None, x[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def polyfit(dates, levels, p):\n x = matplotlib.dates.date2num(dates)\n\n y = levels\n\n # Using shifted x values, find coefficient of best-fit\n # polynomial f(x) of degree p\n shifted_times = [t-x[0] for t in x]\n p_coeff = np.polyfit(shifted_times, y, p)\n\n # Convert coefficient into a polynomial that can be evaluated\n # e.g. poly(0.3)\n poly = np.poly1d(p_coeff)\n\n return (poly, x[0])", "def fitfunc(x_unshifted, p=default()):\n x = x_unshifted+p[4]\n xtr, ytr, gradtr = logcontinuity(p)\n if x < xtr:\n return logpeak(x, p)\n else:\n return logpowerlaw(x, p)", "def polyfit(x, y, yerror, pinit=[0,0,0,0]):\n x = np.array(x)\n y = np.array(y)\n pinit[2] = np.mean(y)\n pinit[3] = x[len(x)/2]\n if (type(yerror) != list and type(yerror) != np.ndarray):\n yerror = np.ones(len(x)) * yerror\n fitfunc = lambda p, x: p[2] + p[1]*(x-p[3]) + p[0]*(x-p[3])**2\n errfunc = lambda p,x,y,err: (y-fitfunc(p,x))/(err**2)\n out = scipy.optimize.leastsq(errfunc, pinit, args=(x,y,yerror/y), full_output=1)\n p = out[0]\n covar = out[1]\n return(p)", "def fit_poly(x, y, n=5, log=False):\n \n x_g = x\n x = np.ma.array(x, mask=y.mask).compressed()\n y = y.compressed()\n if log:\n yl = np.log10(y)\n else:\n yl = y\n fit = np.polyfit(x, yl, n)\n p = np.poly1d(fit)\n \n if log:\n return 10**(p(x_g))\n else:\n return p(x_g)", "def fit(x, a, p, b):\n return a * (p ** x) + b", "def exp_fit(timeList, voltageList, ySS):\n\n bList = [log(max(y-ySS,1e-6)) for y in voltageList]\n b = np.matrix(bList).T\n rows = [ [1,t] for t in timeList]\n A = np.matrix(rows)\n #w = (pinv(A)*b)\n (w,residuals,rank,sing_vals) = np.linalg.lstsq(A,b)\n tau = -1.0/w[1,0]\n amplitude = np.exp(w[0,0])\n return (amplitude,tau)", "def construct_poly(data, power):\n return np.power(data, power)", "def fit_poly(data, error_func, degree=4): \n\n # generate initial guss for polynomial model (all coeffs = 1)\n guess = np.poly1d(np.ones(degree + 1, dtype=np.float32))\n\n # plot intial guess\n x = np.linspace(-5, 5, 21)\n plt.plot(x, np.polyval(guess, x), 'm--', linewidth=2.0, label=\"Initial guess\")\n\n # call optimizer to minimize error function\n result = spo.minimize(error_poly, guess, args=(data,), method='SLSQP', options={'disp':True})\n \n # convert optimal result into a poly1d object\n return np.poly1d(result.x)", "def risefit(self, p, x, y, risepower, mode=0):\n assert mode in [-1, 0, 1]\n ix = np.argmin(np.fabs(x-p[2]))\n tm = np.zeros_like(x)\n expf = (x[ix:]-p[2])/p[1]\n pclip = 1.e3\n nclip = 0.\n expf[expf>pclip]= pclip\n expf[expf<-nclip] = -nclip\n tm[ix:] = p[0] * (1.0 - np.exp(-expf))**risepower\n if mode == 0:\n return tm - y\n elif mode == 1:\n return np.linalg.norm(tm-y)\n elif mode == -1:\n return tm\n else:\n raise ValueError('doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)')", "def fit_polynomial_regression(self, x_train, y_train):\n x_poly = self.poly_reg.fit_transform(x_train)\n self.lin_reg.fit(x_poly, y_train)", "def polyLS(pd, x, y, f, X, Y \\\n, coeff = [], xmc = [], ymc = [], ell = [], w = [], ELL = [], W = []) :\n xmc, ymc, ell, w, ELL, W = assignDefaults(x, y, xmc, ymc, ell, w, ELL, W)\n \n numP = int((pd + 1) * (pd + 2) / 2)\n \n if (len(xmc) == 1) and (len(ymc) == 1) :\n \n\n if coeff == [] :\n p = poly(x, y, pd)\n coeff = np.linalg.lstsq(p, f, rcond=None)[0]\n\n B = poly(X, Y, pd)\n approx = B.dot(coeff).flatten()\n coeff_copy = coeff\n \n else :\n \n approx = np.zeros(len(X), float)\n \n if coeff == [] :\n for i in range(len(xmc)) :\n IND = inSquare(x, y, xmc[i], ymc[i], ELL, W)\n if len(IND) < int(1.5 * numP) :\n raise ValueError(\"Not enough data for this polynomial \" \\\n + \"degree.\\nEither lower the polynomial degree or \" \\\n + \"decrease the number of subdivisions.\")\n p = poly(x[IND], y[IND], pd)\n lam = np.linalg.lstsq(p, f[IND], rcond=None)[0]\n coeff.append(lam)\n\n coeff_copy = coeff.copy()\n\n for i in range(len(xmc) - 1, -1, -1) :\n IND = inSquare(X, Y, xmc[i], ymc[i], ell, w)\n B = poly(X[IND], Y[IND], pd)\n lam = coeff.pop()\n approx[IND] = B.dot(lam).flatten()\n \n return approx, coeff_copy", "def _fit_poly(y_data, deg=5):\n x = np.arange(1, len(y_data) + 1)\n coeffs = np.polynomial.polynomial.polyfit(\n x, y_data, deg=deg)\n y_pred = poly(x, coeffs)\n return coeffs, np.mean((y_data - y_pred) ** 2)", "def fit(self, pressure, loading, param_guess, optimization_params=None, verbose=False):\n if verbose:\n logger.info(f\"Attempting to model using {self.name}\")\n\n # parameter names (cannot rely on order in Dict)\n param_names = [param for param in self.params]\n guess = numpy.array([param_guess[param] for param in param_names])\n bounds = [[self.param_bounds[param][0] for param in param_names],\n [self.param_bounds[param][1] for param in param_names]]\n\n # remove invalid values in function\n zero_values = ~numpy.logical_and(pressure > 0, loading > 0)\n if any(zero_values):\n logger.warning('Removed points which are equal to 0.')\n pressure = pressure[~zero_values]\n loading = loading[~zero_values]\n\n # define fitting function as polynomial transformed input\n ln_p_over_n = numpy.log(numpy.divide(pressure, loading))\n\n # add point\n add_point = False\n added_point = False\n if optimization_params:\n add_point = optimization_params.pop('add_point', None)\n fractional_loading = loading / max(loading)\n if len(fractional_loading[fractional_loading < 0.5]) < 3:\n if not add_point:\n raise CalculationError(\n \"\"\"\n The isotherm recorded has very few points below 0.5\n fractional loading. If a virial model fit is attempted\n the resulting polynomial will likely be unstable in the\n low loading region.\n\n You can pass ``add_point=True`` in ``optimization_params``\n to attempt to add a point in the low pressure region or\n record better isotherms.\n \"\"\"\n )\n added_point = True\n ln_p_over_n = numpy.hstack([ln_p_over_n[0], ln_p_over_n])\n loading = numpy.hstack([1e-1, loading])\n\n def fit_func(x, L, ln_p_over_n):\n for i, _ in enumerate(param_names):\n self.params[param_names[i]] = x[i]\n return self.params['C'] * L**3 + self.params['B'] * L**2 \\\n + self.params['A'] * L - numpy.log(self.params['K']) - ln_p_over_n\n\n kwargs = dict(\n fun=fit_func, # fitting function\n x0=guess, # initial guess\n bounds=bounds, # bounds of the parameters\n args=(loading, ln_p_over_n), # extra arguments to the fit function\n # loss='huber', # use a loss function against outliers\n # f_scale=0.1, # scale of outliers\n )\n if optimization_params:\n kwargs.update(optimization_params)\n\n opt_res = self.fit_leastsq(kwargs)\n\n # assign params\n for index, _ in enumerate(param_names):\n self.params[param_names[index]] = opt_res.x[index]\n\n self.rmse = numpy.sqrt(numpy.sum((opt_res.fun)**2) / len(loading))\n\n if verbose:\n logger.info(f\"Model {self.name} success, RMSE is {self.rmse:.4g}\")\n n_load = numpy.linspace(1e-2, numpy.amax(loading), 100)\n virial_plot(\n loading, ln_p_over_n, n_load,\n numpy.log(numpy.divide(self.pressure(n_load), n_load)), added_point\n )", "def test_ww_power_law_fit_directly(self):\n\n\t\t\tnp.random.seed(123)\n\t\t\tdata = np.random.pareto(2.5, 100)\n\t\t\t\n\t\t\n\t\t\tresult = WW_powerlaw.pl_fit(data, xmax=np.max(data), pl_package=POWERLAW_PACKAGE)\n\t\t\texpected_alpha = result.alpha\n\t\t\tself.assertAlmostEqual(expected_alpha, 2.5, delta=0.1)\n\t\n\t\t\tresult = WW_powerlaw.pl_fit(data, xmax=np.max(data), pl_package=WW_POWERLAW)\n\t\t\tactual_alpha = result.alpha\t\n\t\t\tself.assertAlmostEqual(expected_alpha, actual_alpha, delta=0.1)", "def linearfit(x,y):\n fit = np.polyfit(x,y,1)\n fit_fn = np.poly1d(fit)\n yy = fit_fn(x) \n \n return yy", "def residuals_PL(self, p, data, x):\n err = data - self.PowerLaw(x,p)\n return err", "def logpeak(x, p=default()):\n model = p[0] - p[1]*(x**2)\n return model", "def lagrange_poly(x, xp, fp):\n\n f = 0.0\n \n # sum over points\n m = 0\n while (m < len(xp)):\n\n # create the Lagrange basis polynomial for point m \n l = None\n\n n = 0\n while (n < len(xp)):\n if n == m:\n n += 1\n continue\n\n if l == None:\n l = (x - xp[n])/(xp[m] - xp[n])\n else:\n l *= (x - xp[n])/(xp[m] - xp[n])\n\n n += 1\n\n \n f += fp[m]*l\n\n m += 1\n\n return f", "def fitfunc(self, x, p):\n S = np.zeros(self.npt)\n for i in range(0, len(p), self.maxparm):\n ptyp = self.peakTyp[int(i/self.maxparm)]\n xm = p[i]\n amp = p[i+1]\n w = p[i+2]\n a = p[i+3]\n m = p[i+4]\n if ptyp == 'G':\n # peak type = Gaussian\n S = S + self.gauss(x, xm, amp, w)\n elif ptyp == 'L':\n # peak type = Lorentzian\n S = S + self.lorentz(x, xm, amp, w)\n elif ptyp == 'P':\n # peak type = Pseudo-Voigt\n S = S + self.psVoigt(x, xm, amp, w, m)\n elif ptyp == 'AG':\n # peak type = Asymmetric Gaussian\n S = S + self.agauss(x, xm, amp, w, a)\n elif ptyp == 'AL':\n # peak type = Asymmetric Lorentzian\n S = S + self.alorentz(x, xm, amp, w, a)\n elif ptyp == 'AP':\n # peak type = Asymmetric Pseudo-Voigt\n S = S + self.aPsVoigt(x, xm, amp, w, a, m)\n return S", "def _model(x, p):\n y_hat = 0\n for i, pi in enumerate(reversed(p)):\n y_hat += x**i * pi\n return y_hat", "def Yp(t, p, q):\n \n return (t**p - 1) / p + (1-t**(-q)) / q", "def get_polyfit(self, stack_of_points, uq=False):\n N = len(self.coefficients)\n if uq:\n return np.dot(self.get_poly(stack_of_points).T , self.coefficients.reshape(N, 1)), self._get_polystd(stack_of_points)\n else:\n return np.dot(self.get_poly(stack_of_points).T , self.coefficients.reshape(N, 1))", "def Y(t, p, q):\n \n if t <= 0:\n return float('inf')\n \n if q == 1:\n return (t**(p+1) - 1) / (p * (p+1)) - np.log(t) / q + (p - 1) / p * (t-1)\n else:\n return (t**(p+1) - 1) / (p * (p+1)) + (t**(1-q) - 1) / (q*(q-1)) + (p - q) / (p * q) * (t-1)", "def polyval(p, x):\r\n val = 0\r\n ii = len(p) - 1\r\n for i in range(len(p) - 1):\r\n val += p[i] * (x ** ii)\r\n ii -= 1\r\n return val + p[-1]", "def get_polyfit_function(self):\n N = len(self.coefficients)\n return lambda x: np.dot( self.get_poly(x).T , self.coefficients.reshape(N, 1) )", "def y_from_p(p):\n\ty = -np.log(p)\n\treturn y", "def least_sqr_fit(self,x, y):\n A = np.array([ x, np.ones(len(x))])\n # linearly generated sequence\n a,f,g,h = np.linalg.lstsq(A.T,y) # obtaining the parameters\n print 'de gevonden rechte = %.10f x + %.10f' %(a[0], a[1])\n lined = map(lambda g: a[0]*g +a[1],x) # regression line\n return lined , a", "def brute_leastsquare_fit(fun, x_data, y_data,weight_data=None,p_names=None,p_min_max_steps_dict=None,\r\n const_params=[], visualize=False):\r\n \r\n if p_names == None or p_min_max_steps_dict==None:\r\n raise Exception ('p_names and p_min_max_steps must be given!'+ \r\n 'structure of p_min_max_steps_dict: {\"pname0\":[min0,max0,brute_steps0]}')\r\n \r\n params = Parameters() ### initialize LMfit parameters\r\n for p_name in p_names:\r\n min_val=p_min_max_steps_dict[p_name][0]\r\n max_val=p_min_max_steps_dict[p_name][1]\r\n steps=p_min_max_steps_dict[p_name][2]\r\n params.add(p_name,value=min_val,\r\n min=min_val,\r\n max=max_val,\r\n brute_step=(max_val-min_val)/(steps-1))\r\n \r\n ### define function to be minimized for fit \r\n \r\n def cost_function_fit(p=params):\r\n def minimize_fun(pars):\r\n \r\n v=pars.valuesdict()\r\n arglist=[]\r\n for p_name in p_names:\r\n arglist.append(v[p_name])\r\n \r\n for const_param in const_params:\r\n arglist.append(const_param)\r\n \r\n ret=np.array((fun(x_data,*arglist)-y_data),dtype=float)\r\n if weight_data is not None:\r\n ret=ret*np.sqrt(weight_data)\r\n return(ret)\r\n brute_result=lmfit.minimize(minimize_fun,params,method='brute',nan_policy='omit')\r\n best_result=copy.deepcopy(brute_result)\r\n for candidate in brute_result.candidates[0:5]:\r\n trial = lmfit.minimize(minimize_fun, params=candidate.params,method='leastsq',nan_policy='omit')\r\n if trial.chisqr < best_result.chisqr:\r\n best_result = trial\r\n \r\n return((best_result,brute_result))\r\n \r\n best_result,brute_result = cost_function_fit()\r\n arg_list=[]\r\n for p_name in p_names:\r\n arg_list.append(best_result.params.valuesdict()[p_name])\r\n for const_param in const_params:\r\n arg_list.append(const_param)\r\n \r\n \r\n if visualize == True:\r\n plot_brute_leastsquares_results(brute_result,leastsq_fit_result=best_result)\r\n plt.figure()\r\n plt.plot(x_data,y_data,label='data',color='blue')\r\n plt.plot(x_data,fun(x_data,*arg_list),label='Fit',color='red')\r\n plt.title(best_result.params.valuesdict())\r\n plt.show()\r\n return (arg_list[0:len(p_names)])", "def eval_poly(self, p):\n A = self\n m, n = A.shape\n\n if m != n:\n raise DMNonSquareMatrixError(\"Matrix must be square\")\n\n if not p:\n return self.zeros(self.shape, self.domain)\n elif len(p) == 1:\n return p[0] * self.eye(self.shape, self.domain)\n\n # Evaluate p(A) using Horner's method:\n # XXX: Use Paterson-Stockmeyer method?\n I = A.eye(A.shape, A.domain)\n p_A = p[0] * I\n for pi in p[1:]:\n p_A = A*p_A + pi*I\n\n return p_A", "def fit_polynomial(self,x,t,m,lambda_reg=0):\n\n phi = self.designMatrix(x,m)\n phi_trans = np.transpose(phi)\n\n a = phi_trans.dot(phi) + lambda_reg*np.identity(phi.shape[1])\n b = np.linalg.inv(a)\n c = b.dot(phi_trans)\n\n w_ml = c.dot(t)\n\n return w_ml, phi" ]
[ "0.68798745", "0.6365099", "0.61925536", "0.6161876", "0.61507106", "0.6070283", "0.5868757", "0.58644587", "0.5821649", "0.5798074", "0.5762538", "0.5758994", "0.5749708", "0.5739675", "0.5738362", "0.57277375", "0.5726155", "0.57221943", "0.5698397", "0.5694881", "0.5680145", "0.5658517", "0.56348556", "0.5618798", "0.56093514", "0.5570519", "0.5568915", "0.5560053", "0.55344117", "0.55337983" ]
0.70911086
0
Convert a trace codes text to dictionary.
def from_trace_codes_text(codes_text: str) -> Mapping[int, str]: return {int(s[0], 16): s[1] for s in map(lambda l: l.split(), codes_text.splitlines())}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_trace_codes_file(path: str) -> Mapping[int, str]:\n with open(path, 'r') as fd:\n return from_trace_codes_text(fd.read())", "def default_trace_codes() -> Mapping[int, str]:\n with open(Path(__file__).resolve().parent.joinpath('trace.codes'), 'r') as fd:\n return from_trace_codes_text(fd.read())", "def _convert_to_dict(r):\n if not r:\n return r\n else:\n return dict(token=r[0], code=r[2], value=r[1], address='-')", "def parseCodeLine(self, text):\n\t\tcodeRegex = re.compile(r\"^\\[0x([0-9a-f]{8})\\]\\t0x([0-9a-f]{8}) (.+)$\")\n\t\t# attempt to match the code\n\t\tm = codeRegex.match(text)\n\t\t# If no match found, return None.\n\t\tif (m is None): return None\n\t\tresult = {}\n\t\tresult['encoded_instruction'] = int(m.group(2),16)\n\t\tresult['address'] = int(m.group(1),16)\n\t\t# Do we have a comment?\n\t\tinstr = m.group(3)\n\t\tif (\";\" in instr):\n\t\t\tpos = instr.index(\";\")\n\t\t\tcomment = instr[pos+1:].strip()\n\t\t\tinstr = instr[:pos].strip()\n\t\telse:\n\t\t\tinstr = instr.strip()\n\t\t\tcomment = \"\"\n\t\tresult['comment'], result['instruction'] = comment, instr\n\t\treturn result", "def inf2dict(text):\n lines = text.strip().split('\\n')\n pairs, extra_lines = split_lines(lines)\n return parse_pairs(pairs, extra_lines)", "def convert_to_dict(text):\n content_dict = dict()\n content_dict['clean_text'] = text\n return content_dict", "def _preprocess(program: str) -> Dict[int, int]:\n i_map = {}\n stack = []\n for p_ptr in range(len(program)):\n if program[p_ptr] == \"[\":\n stack.append(p_ptr)\n elif program[p_ptr] == \"]\":\n if len(stack) == 0:\n raise RuntimeError\n i = stack.pop()\n i_map[i] = p_ptr\n i_map[p_ptr] = i\n if len(stack) != 0:\n raise RuntimeError\n return i_map", "def preprocess_text(text: str) -> Tuple[List[str], Dict]:\n raise NotImplementedError", "def read_code(string):\n\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code", "def read_dict(txt_file_path):\n txt_file = open(txt_file_path,'r')\n txt_raw = txt_file.read()\n txt_as_dict = ast.literal_eval(txt_raw)\n txt_file.close()\n return txt_as_dict", "def get_codes(path):\n hospital_codes = {}\n with open(path, encoding='utf8') as f:\n for line in f:\n val, key = line.split(\",\")\n hospital_codes[int(key)] = val\n return hospital_codes", "def pgn2dict(txt):\n result = {}\n for line in txt:\n if not line:\n continue\n match = re.search(r'(\\w+) \"(.*)\"', line).groups()\n result[match[0]] = match[1].replace(\"'\", \"''\")\n\n return result", "def _convert_tags_to_dict(text_list_tags):\n return OrderedDict([re.findall(r\"\"\"\\s*_(\\w+)\\s+(.+?)\\s*$\"\"\", row)[0] for row in text_list_tags])", "def get_trans_dict(self):\n translated = dict([(k,v) for (k,v) in self._trans_dict.items() if k is not v])\n frm = \" \".join([ c + ' |' for c in translated.keys()])\n to = \" \".join([ c + ' |' for c in translated.values()])\n\n return \"code: \\t{}\\nactual:\\t{}\".format(frm, to)", "def hl7_str_to_dict(raw_hl7, use_long_name=True):\n message = parse_hl7(raw_hl7)\n return hl7_message_to_dict(message, use_long_name)", "def get_string_stech_dict(stech_string):\n stech_dict = {}\n try:\n stech_lst = stech_string.split(\",\") # Generates a stech list: [\"A:3\", \"B:2\", ...]\n for stech in stech_lst:\n chain, number = stech.split(\":\")\n stech_dict[chain] = int(number) # Chain id as key and number as value: { \"A\": 3, \"B\": 2, ...}\n return stech_dict\n except:\n sys.stderr.write(\"Stechometry string format is wrong, please follow this format: A:2,B:11,C:4, ...\")\n sys.exit(1)", "def parse_lines_to_dict(lines):\n res = {k: v.strip() for k, v in (m.split(':', 1) for m in lines)}\n return res", "def get_codes(tree: HuffmanTree) -> Dict[int, str]:\n # Edge Case\n if tree is None or (tree.symbol is None and tree.is_leaf()):\n return {}\n else:\n return _get_codes_helper(tree, \"\")", "def code_mapper(file, idx):\n with open('./I94_SAS_Labels_Descriptions.SAS') as f:\n f_content = f.read()\n f_content = f_content.replace('\\t', '')\n f_content2 = f_content[f_content.index(idx):]\n f_content2 = f_content2[:f_content2.index(';')].split('\\n')\n f_content2 = [i.replace(\"'\", \"\") for i in f_content2]\n dic = [i.split('=') for i in f_content2[1:]]\n dic = dict([i[0].strip(), i[1].strip()] for i in dic if len(i) == 2)\n return dic", "def _txt_to_basis_dict(basis_txt):\n\n symbol = basis_txt[0].split()[0]\n\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n basis_pure = basis_txt[1:]\n\n section_marks = []\n for i, line in enumerate(basis_pure):\n if not is_number(line.split()[0]):\n section_marks.append(i)\n\n shells = []\n for i in section_marks[:-1]:\n type, n_func, _ = basis_pure[i].split()\n n_func = int(n_func)\n\n if type.upper() in ['SP']:\n p_exponent, con_coefficients, p_con_coefficients = np.array([line.split()\n for line in basis_pure[i + 1:i + n_func + 1]],\n dtype=float).T\n else:\n p_exponent, con_coefficients = np.array([line.split()\n for line in basis_pure[i + 1:i + n_func + 1]],\n dtype=float).T\n p_con_coefficients = np.zeros_like(p_exponent)\n\n\n shells.append({'shell_type': type,\n 'p_exponents': list(p_exponent),\n 'con_coefficients': list(con_coefficients),\n 'p_con_coefficients': list(p_con_coefficients)})\n\n return {'symbol': symbol,\n 'shells': shells}", "def from_text(cls, text):\n raw = decode_b64(json.loads(text))\n raw[0] = Code(raw[0]) # make it an object of type Code\n return cls(*raw)", "def form_dict(path):\n data={}\n try:\n f=codecs.open(path, \"r\", \"utf-8\")\n text=f.read()\n f.close()\n except Exception:text=None\n if text!=None:\n #print text\n lines=text.split(\"\\n\")\n for sline in lines:\n if sline!=\"\" or sline==None:line_data=sline.partition(\":\")\n if len(line_data)==3:\n try:\n kin=line_data[0].strip().decode(\"utf-8\")\n data[kin.lower()]=line_data[2].strip()\n except:pass\n return data", "def convert(text: str) -> Iterator[Tuple[str, str]]:\n with gzip.open(resource_filename(\"nautc\", \"txt.gz\"), \"rt\") as txt:\n return (\n lambda idxs: map(\n lambda kv: (\n kv[0],\n \"\".join(\n map(\n lambda char: char in idxs\n and kv[1][idxs[char]]\n or char,\n text,\n )\n ),\n ),\n (\n lambda lines: map(\n lambda t, c: (t, c.split(\"||\")),\n lines[::2],\n lines[1::2],\n )\n )(txt.read().splitlines()),\n )\n )(\n dict(\n map(\n lambda kv: (kv[1], kv[0]),\n enumerate(ascii_letters + digits),\n )\n )\n )", "def forward_code_map(self):\n\n return { c.key:c.value for c in self.codes}", "def cigar_to_map(cigar_text):\n assert 'I' not in cigar_text\n spans, posn = [], 0\n for n, c in pattern.findall(cigar_text):\n if n:\n n = int(n)\n else:\n n = 1\n \n if c == 'M':\n spans.append(Span(posn, posn+n))\n posn += n\n else:\n spans.append(LostSpan(n))\n map = Map(spans = spans, parent_length = posn)\n return map", "def parse_event(self, raw_data: str):\n event_attributes = raw_data.replace(\"\\n\", \"\").split(\",\")\n for j in range(len(event_attributes)):\n event_attributes[j] = str_to_number(event_attributes[j])\n return dict(zip(METASTOCK_7_COLUMN_KEYS, event_attributes))", "def parse_text(self, text: str) -> SectionDict:", "def _get_codes_helper(tree: HuffmanTree, code: str,\n symbol_dict: Any = None) -> Dict[int, str]:\n\n if tree.is_leaf():\n symbol_dict[tree.symbol] = code\n return symbol_dict\n\n else:\n if symbol_dict is None:\n symbol_dict = {}\n\n symbol_dict = _get_codes_helper(tree.left, code + \"0\", symbol_dict)\n symbol_dict = _get_codes_helper(tree.right, code + \"1\", symbol_dict)\n\n return symbol_dict", "def parse(line):\n return dict([pair.split(':') for pair in line.split()])", "def json_scalyr_config_decode(text):\n return json_lib.parse(text)" ]
[ "0.6811558", "0.61674523", "0.6107895", "0.59936696", "0.592808", "0.54712546", "0.5466992", "0.54269063", "0.5381498", "0.53706264", "0.53520334", "0.5219542", "0.520817", "0.51722044", "0.5172045", "0.51429605", "0.51282406", "0.51202375", "0.5092845", "0.5079664", "0.50580245", "0.50468576", "0.50452554", "0.5036278", "0.5031142", "0.5003228", "0.4997807", "0.49753207", "0.49450254", "0.49384713" ]
0.8251909
0
Read trace codes from a file.
def from_trace_codes_file(path: str) -> Mapping[int, str]: with open(path, 'r') as fd: return from_trace_codes_text(fd.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_trace(path):\n suffix = path[-3:]\n if suffix == 'txt':\n return read_csv(path)\n elif suffix == 'trc':\n return read_trc(path)\n else:\n raise ValueError(\"Invalid file extension: %s\" % suffix)", "def read_codes(self, filename=\"static/codes.txt\"):\n with open(filename, \"r\") as f:\n contents = f.read().splitlines()\n code = contents[0]\n \n return code", "def read_code(filename):\n f = open('files/%s.code' % filename)\n string = f.read()\n tokens = scan(string)\n ret = parse_code(tokens)\n return ret", "def default_trace_codes() -> Mapping[int, str]:\n with open(Path(__file__).resolve().parent.joinpath('trace.codes'), 'r') as fd:\n return from_trace_codes_text(fd.read())", "def _readSecStrucCodes(fileName):\n\n ssCodes = []\n file = open(fileName)\n \n line = file.readline()\n while line:\n data = line.split()\n \n if data and (data[0][0] != '#'):\n ssCodes.append(data[2])\n \n line = file.readline()\n \n file.close()\n \n return ssCodes", "def read_file(filename):\n f = open(filename)\n code = f.read()\n f.close()\n return code", "def read_asm_file_to_code(file_path):\r\n with open(file_path, 'r') as file:\r\n asm_code = []\r\n for line in file:\r\n asm_code.append(line)\r\n\r\n return asm_code", "def parse_trace_file(file_path):\n\n if not os.path.isfile(file_path):\n LOGGER.error(\"Trace file '%s' doesn't exist.\", file_path)\n return None\n\n try:\n with open(file_path, \"r\", newline=\"\\n\") as f:\n data_points = f.readlines()\n except IOError:\n LOGGER.error(\"Exception while reading trace file '%s'. Terminating.\", file_path)\n sys.exit(0)\n\n data_point_tuple_list = []\n for value in range(0, len(data_points)):\n split_string = data_points[value].split(\" \")\n\n memory_address = split_string[0].strip()\n read_or_write = split_string[1].strip()\n\n current_tuple = (memory_address, read_or_write)\n data_point_tuple_list.append(current_tuple)\n\n return data_point_tuple_list", "def read_file(self,filename):\n\n f = open(filename,'r')\n lines = f.readlines()\n f.close()\n\n sequences = [l.strip() for l in lines if l.strip() != \"\"]\n\n self.load_sequences(sequences)", "def read_file(path_to_file):\n 8", "def read_from_file(self, filename: str) -> None:", "def get_tracefile_offered_load_from_ycsb(ifile):\n\n tracefile = get_args_from_ycsb_file(ifile, [(\"tracefile\", \"=\")])[0]\n\n ## Trace file format is: synthetic_trace_3000_600_poisson.data_scle_1_ycsb.trace\n return os.path.basename(tracefile).split(\"_\")[2]", "def read_traces(coadd_file):\n trc_file = coadd_file.replace('.fits', '_traces.json')\n tdict = ltu.loadjson(trc_file)\n # Return\n return tdict['obj'], tdict['arc']", "def read_file(self, filename):\n with open(filename, 'r') as file:\n for line in file:\n l = line.strip()\n\n if l == ST_POS0:\n self._state = ST_POS0\n elif l == ST_TRNS:\n self._state = ST_TRNS\n elif l == ST_POS1:\n self._state = ST_POS1\n else:\n self._parse_line(l)\n self._state = None", "def parse_trace_file(filename):\n f = open(filename, 'r')\n trace_data = f.read()\n\n messages = parse_atm_messages(trace_data) + parse_host_messages(trace_data)\n f.close()\n messages.sort()\n\n return messages", "def read_file(file_path):\n with open(file_path) as file_h:\n return file_h.readlines()", "def read_file(file_path):\n try:\n with open(file_path, \"r\") as file_obj:\n data = file_obj.read()\n code_type = classify_response(data)\n return data, code_type\n\n except FileNotFoundError:\n writer(f\"\\nerror: Unable to read file {file_path}\\n\", FORMAT[\"ERROR\"])\n sys.exit(1)", "def load_traces(self, filename):\n\n self.traces = self.load(filename)", "def read_flows_from_file(filename: str):\n with open(os.path.join(DATA_DIR, filename)) as f:\n return [int(x.strip()) for x in f.readlines() if x.strip()]", "def read_from_file(filename):\n with open(filename, \"r\") as f:\n f.readlines()", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def readFromFile(filename):\n raise NotImplementedError", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def read_gcode(filename):\n\t##TODO: parse/read file line by line for memory considerations\n\twith open(filename, 'r') as fh_in:\n\t\tgcode_raw = fh_in.readlines()\n\t\tgcode_raw = [gcode.rstrip(';\\n') for gcode in gcode_raw] # stripping off trailing semicolon and newlines\n\treturn gcode_raw", "def ReadReachedSymbols(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f.readlines()]", "def read_file(file_path):\n scan = nib.load(filename=file_path)\n scan = scan.get_fdata()\n return scan", "def readFastaFile(filename):", "def get_codes(path):\n hospital_codes = {}\n with open(path, encoding='utf8') as f:\n for line in f:\n val, key = line.split(\",\")\n hospital_codes[int(key)] = val\n return hospital_codes", "def read_file(filename, tokenizer, is_cased):\n sents = []\n with open(filename) as f:\n for line in f:\n sents.append(tokenizer(line, is_cased))\n return sents", "def read_lines_from_file(fname):\n return []" ]
[ "0.69202065", "0.65100384", "0.6413361", "0.6296157", "0.61902547", "0.6171397", "0.6058499", "0.59744567", "0.594655", "0.5926533", "0.591121", "0.58754104", "0.58621883", "0.57683814", "0.5767075", "0.5757081", "0.5746866", "0.57361424", "0.5710775", "0.56944233", "0.5655237", "0.5653463", "0.56525993", "0.56021506", "0.5533444", "0.5513585", "0.54325515", "0.5416666", "0.54011697", "0.539719" ]
0.7683125
0
Get the default trace codes mapping.
def default_trace_codes() -> Mapping[int, str]: with open(Path(__file__).resolve().parent.joinpath('trace.codes'), 'r') as fd: return from_trace_codes_text(fd.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_trace_codes_file(path: str) -> Mapping[int, str]:\n with open(path, 'r') as fd:\n return from_trace_codes_text(fd.read())", "def forward_code_map(self):\n\n return { c.key:c.value for c in self.codes}", "def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def build_default_catch_all_map(self):\n self.default_catch_all_map = self.data['catchall']", "def reverse_code_map(self):\n\n return { c.value:(c.ikey if c.ikey else c.key) for c in self.codes}", "def from_trace_codes_text(codes_text: str) -> Mapping[int, str]:\n return {int(s[0], 16): s[1] for s in map(lambda l: l.split(), codes_text.splitlines())}", "def get_registry_codes( ):\n return _theRegistry.get_codes( )", "def theme_basecodes(self, theme_index):\n return self._theme_basecodes[theme_index]\n #return self._themes[theme_index]", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def _initalize_mapping():\n linter = lint.PyLinter()\n linter.load_defaults()\n linter.load_default_plugins()\n\n mapping = {\n message.msgid: message.symbol\n for message in linter.msgs_store.messages\n }\n\n return mapping", "def get_codes(tree: HuffmanTree) -> Dict[int, str]:\n # Edge Case\n if tree is None or (tree.symbol is None and tree.is_leaf()):\n return {}\n else:\n return _get_codes_helper(tree, \"\")", "def get_letter_to_code_mappings():\n return {\n \"a\": \"Alfa\", \"b\": \"Bravo\", \"c\": \"Charlie\", \"d\": \"Delta\", \"e\": \"Echo\",\n \"f\": \"Foxtrot\", \"g\": \"Golf\", \"h\": \"Hotel\", \"i\": \"India\", \"j\":\n \"Juliett\", \"k\": \"Kilo\", \"l\": \"Lima\", \"m\": \"Mike\", \"n\": \"November\", \"o\":\n \"Oscar\", \"p\": \"Papa\", \"q\": \"Quebec\", \"r\": \"Romeo\", \"s\": \"Sierra\", \"t\":\n \"Tango\", \"u\": \"Uniform\", \"v\": \"Victor\", \"w\": \"Whiskey\", \"x\": \"Xray\",\n \"y\": \"Yankee\", \"z\": \"Zulu\", \"0\": \"Zero\", \"1\": \"One\", \"2\": \"Two\", \"3\":\n \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\", \"7\": \"Seven\", \"8\":\n \"Eight\", \"9\": \"Niner\", \"=\": \"Equals\", \"?\": \"Query\", \"/\": \"Slash\", \",\":\n \"Comma\", \".\": \"Stop\", \":\": \"Colon\", \"'\": \"Apostrophe\", \"-\": \"Dash\",\n \"(\": \"Open\", \")\": \"Close\", \"@\": \"At\",\n }", "def default_code():\n return uuid.uuid4().hex", "def fallback_status_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"fallback_status_codes\")", "def sigmap():\n return _SIGMAP.copy()", "def _default_observation_map(self) -> Dict[str, ObservationMapValue]:\n pass", "def default_regs(self):\n\n return {}", "def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'", "def code(self):\n\n return self._code or self._default_code", "def get_training_labels():\n\n\tmapping = dict()\n\tmapping[constants.ASCause.apsp] = 0\n\tmapping[constants.ASCause.bl] = 1\n\tmapping[constants.ASCause.ce] = 2\n\tmapping[constants.ASCause.dfl] = 3\n\tmapping[constants.ASCause.lrssi] = 4\n\tmapping[constants.ASCause.pwr_state] = 5\n\treturn mapping", "def _build_return_code_enum():\n prefix = 'XTT_RETURN_'\n codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)}\n return IntEnum('ReturnCode', codes)", "def _get_codes_helper(tree: HuffmanTree, code: str,\n symbol_dict: Any = None) -> Dict[int, str]:\n\n if tree.is_leaf():\n symbol_dict[tree.symbol] = code\n return symbol_dict\n\n else:\n if symbol_dict is None:\n symbol_dict = {}\n\n symbol_dict = _get_codes_helper(tree.left, code + \"0\", symbol_dict)\n symbol_dict = _get_codes_helper(tree.right, code + \"1\", symbol_dict)\n\n return symbol_dict", "def get_default():\n return build_key_map({\n \"LEFT\" : \"move-left\",\n \"RIGHT\" : \"move-right\",\n \"UP\" : \"move-up\",\n \"DOWN\" : \"move-down\",\n \"S-LEFT\" : \"scroll-left\",\n \"S-RIGHT\" : \"scroll-right\",\n\n \"C-b\" : \"move-left\", # back\n \"C-f\" : \"move-right\", # forward\n \"C-k\" : \"delete-row\",\n \"C-p\" : \"move-up\", # previous\n \"C-n\" : \"move-down\", # next\n \"M-v\" : \"move-up-page\",\n \"C-v\" : \"move-down-page\",\n \"C-x\" : PREFIX,\n (\"C-x\", \"C-s\") : \"save\",\n (\"C-x\", \"C-w\") : \"save-as\",\n \"C-z\" : \"undo\",\n\n \";\" : \"decrease-column-width\",\n \"'\" : \"increase-column-width\",\n \":\" : \"decrease-column-precision\",\n \"\\\"\" : \"increase-column-precision\",\n\n \"M-#\" : \"toggle-show-row-num\",\n \"M-$\" : \"hide-column\",\n \"M-x\" : \"command\",\n\n \"q\" : \"quit\",\n })", "def map_event_code(event_code):\n event_code = int(event_code)\n\n # Honestly, these are just guessing based on the below event list.\n # It could be wrong, I have no idea.\n if 1100 <= event_code <= 1199:\n return ALARM_GROUP\n\n if 3100 <= event_code <= 3199:\n return ALARM_END_GROUP\n\n if 1300 <= event_code <= 1399:\n return PANEL_FAULT_GROUP\n\n if 3300 <= event_code <= 3399:\n return PANEL_RESTORE_GROUP\n\n if 1400 <= event_code <= 1499:\n return DISARM_GROUP\n\n if 3400 <= event_code <= 3799:\n return ARM_GROUP\n\n if 1600 <= event_code <= 1699:\n return TEST_GROUP\n\n if 5000 <= event_code <= 5099:\n return CAPTURE_GROUP\n\n if 5100 <= event_code <= 5199:\n return DEVICE_GROUP\n\n if 5200 <= event_code <= 5299:\n return AUTOMATION_GROUP\n\n if 6000 <= event_code <= 6100:\n return ARM_FAULT_GROUP\n\n return None", "def generate_code_point_map(cpp, public_name, enum_name, table):\n\n private_name = public_name + \"_\"\n\n cpp.write(\n f\"static constexpr MapEntry<CodePoint, {enum_name}> \" f\"{private_name}[] = {{\\n\"\n )\n\n # Generate one array entry at the start of the mapping.\n # Only emit a new entry if the current value changes.\n last_value = None\n max_cp = max(table)\n for cp in range(0, max_cp + 2):\n # Improvement: Could make the invalid value more generic in the future\n value = table.get(cp, \"Invalid\")\n if value != last_value:\n cpp.write(f\" {{{hex(cp)}, {enum_name}::{value}}},\\n\")\n last_value = value\n\n cpp.write(\"};\\n\\n\")\n cpp.write(\n f\"constexpr Span<const MapEntry<CodePoint, {enum_name}>> \"\n f\"{public_name}({private_name});\\n\\n\"\n )", "def get_pb_visit_lookup(self) -> Dict[str, int]:\n return self.pb_visit_lookup", "def _code_indices(self) -> Tuple[int, ...]:\n return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)", "def mapping_for_enum(mapping):\n return dict(mapping.keys())", "def _get_return_codes(self):\n return self.__return_codes" ]
[ "0.6177796", "0.6068879", "0.60158306", "0.58416814", "0.5706858", "0.5705263", "0.563382", "0.5626298", "0.5607481", "0.558467", "0.549548", "0.5402228", "0.5259976", "0.52537525", "0.52353066", "0.5230671", "0.51013124", "0.5097843", "0.5085019", "0.50778747", "0.5073233", "0.5033127", "0.49600232", "0.49517378", "0.4948364", "0.49372378", "0.4932627", "0.49225166", "0.49141625", "0.49137893" ]
0.86958295
0
Install the cloc package for autograding source code comment counting.
def up(config): os.system("sudo apt-get install cloc --yes")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def InstallPrereqs():\n #Collect the source for Cython and put in _deps/cython-master\n import urllib,zipfile\n print('getting cython sources')\n urllib.urlretrieve('https://github.com/cython/cython/archive/master.zip', filename = 'master.zip')\n with zipfile.ZipFile('master.zip', 'r') as myzip:\n myzip.extractall(path='_deps')\n os.remove('master.zip')\n for python_install in PYTHONVERSIONS:\n for cwd in ['_deps/cython-master']:\n print(subprocess.check_output([python_install, 'setup.py', 'install'], cwd = cwd))", "def valgrind(c):\n c.solutions[0].custom_deps['src/chromium/src/third_party/valgrind'] = \\\n ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries')", "def ci(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)\n run_yapf(session, True)\n run_all_linters(session)\n run_pytest_units(session)\n run_pytest_integrations(session)", "def install_coverage_prereqs():\n if no_prereq_install():\n print(NO_PREREQ_MESSAGE)\n return\n pip_install_req_file(COVERAGE_REQ_FILE)", "def main():\n pattern = get_regex_pattern()\n directories = get_directories()\n files = get_js_files(directories)\n comments_for_jsdoc_exists = analyse_files_against_regex_pattern(\n files, pattern)\n set_github_env_variable(comments_for_jsdoc_exists)", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def cli(ctx, comment, metadata=\"\"):\n return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)", "def comments(self):\n lineno = 0\n novermin = set()\n src = self.__source\n if type(src) == bytes:\n src = src.decode(errors=\"ignore\")\n for line in src.splitlines():\n lineno += 1\n line = line.strip()\n m = RE_COMMENT.match(line)\n if m is not None:\n comment = m.group(2).strip()\n if comment == \"novermin\" or comment == \"novm\":\n # Ignore if it is inside another comment, like: `# test: # novm`\n if m.start(0) < m.start(1) and m.group(0).strip().startswith(\"#\"):\n continue\n # Associate with next line if the comment is \"alone\" on a line, i.e. '#' starts the line.\n novermin.add(lineno + 1 if m.start(1) == 0 else lineno)\n return novermin", "def count_lines_of_code(repo_path):\n\n cmd = \"cloc\"\n out = subprocess.Popen(\n f\"{cmd} {repo_path}\",\n shell=True,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n ).stdout.read()\n\n str = StringIO(out)\n for line in str:\n if line.startswith(\"--------\"):\n break\n data = pd.read_csv(\n str,\n skiprows=0,\n skipfooter=3,\n skip_blank_lines=True,\n sep=r\"[ ]{5,}\",\n index_col=\"Language\",\n comment=\"-\",\n engine=\"python\",\n )\n\n return data", "def add_comment_ml(self, lines, verbosity_level=Verbosity.info):\n if self.verbosity < verbosity_level:\n return 0\n return self._add_scope(lines, '/*', '*/', '* ', inline=False)", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def cc():\n load_env_vars('dev')\n from tools.static_code_analysis import CyclomaticComplexity\n radon_cc = CyclomaticComplexity()\n score = radon_cc.run_test()\n radon_cc.create_badge(score)", "def main(\n req_in: str, req_pinned: str, tld: Optional[str], ignore: Optional[Iterable[str]]\n):\n print(\"Compiling requirements!\")\n if isinstance(ignore, str):\n ignore = [ignore]\n reqcompyle(\n Path(req_in),\n Path(req_pinned),\n Path(tld) if tld else None,\n [Path(i) for i in ignore] if ignore else None,\n )\n return 0", "def install_deps():\n click.echo(\"install_deps\")", "def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1", "def supports_refcounts(self):\n return sys.implementation.name == \"cpython\"", "def __ingest_c_comment_start(self, line, pos):\n\n if line[pos] == '/' and len(line) > pos + 1:\n if line[pos + 1] == '/':\n return -1\n elif line[pos + 1] == '*':\n self._in_block_comment = True\n return 2\n return 0", "def setup_prereqs():\n # Run the contrib download script -- easier that way\n gmp = os.path.join(flag_gcc_subdir, \"gmp\")\n if not os.path.exists(gmp):\n dochdir(flag_gcc_subdir)\n docmd(\"./contrib/download_prerequisites\")\n # Hack -- fix up gmp dir\n patch_gmp_configure()\n dochdir(\"..\")", "def install():\n build()\n sh(\"%s setup.py develop\" % PYTHON)", "def test_0_check_xc_docstring(self):\n self.banner(\"Checking the docstring on your extra credit.\") \n filename = self.find_file('project9_xc.py')\n self.check_docstring(filename)", "def prereposetup_hook(conduit):\n return init_hook(conduit)", "def _install_code_importer(self):\n self._log.info(\"Installing talus code importer\")\n code = self._config[\"code\"]\n self._code_importer = TalusCodeImporter(\n code[\"loc\"],\n code[\"username\"],\n code[\"password\"],\n parent_log=self._log\n )\n sys.meta_path = [self._code_importer]", "def add_comment(self, lines, verbosity_level=Verbosity.info):\n if self.verbosity < verbosity_level:\n return 0\n return self._add_scope(lines, None, None, '// ', inline=False)", "def task_pydocstyle():\n yield {\n 'name': os.path.join(os.getcwd(), 'nikola'),\n 'actions': [\"pydocstyle --count --match-dir='(?!^\\\\.)(?!data).*' nikola/\"],\n }", "def lint(session):\n session.install(\"-r\", \"requirements-test.txt\")\n session.install(\"-r\", \"requirements.txt\")\n session.install(\"flake8-import-order\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests\",\n \"google\",\n \"tests\",\n )\n session.run(\"mypy\", \"google\", \"tests\")\n session.run(\"python\", \"setup.py\", \"sdist\")\n session.run(\"twine\", \"check\", \"dist/*\")", "def analyzeCppCode(self, sourceFile):\n numLines = 0 # Number of lines of code\n numComments = 0 # Number of comments in the code\n\n f=self.openFile(sourceFile)\n for line in f:\n numLines += 1;\n loc = 0\n while (loc != -1): #count the # of times the '/*' characters appears\n loc = line.find(\"#\", loc)\n if (loc != -1):\n loc += 1\n numComments += 1\n \n loc = 0\n loc = line.find('//', loc) #count the # of times the '//' characters appears\n if (loc != -1):\n loc += 1\n numComments += 1\n \n f.close()\n return numLines, numComments", "def install_distcc_and_ccache():\n distcc_dir = '/usr/lib/distcc'\n ccache_dir = '/usr/lib/ccache'\n\n # Make sure distcc will be called if we use ccache\n if os.path.exists(distcc_dir):\n logging.info('Found distcc, so setting CCACHE_PREFIX=distcc')\n os.environ['CCACHE_PREFIX'] = 'distcc'\n\n # Add ccache to PATH if it exists, otherwise add distcc\n if os.path.exists(ccache_dir):\n extra_path = ccache_dir\n elif os.path.exists(distcc_dir):\n extra_path = distcc_dir\n else:\n return\n logging.info('Adding %s to PATH', extra_path)\n os.environ['PATH'] = extra_path + ':' + os.getenv('PATH')\n\n if os.path.exists(distcc_dir):\n # Set DISTCC_HOSTS if necessary\n if not os.getenv('DISTCC_HOSTS'):\n hosts = subprocess.Popen(['lsdistcc'], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')\n hosts = ' '.join(hosts.split())\n logging.debug('Setting DISTCC_HOSTS=%s', hosts)\n os.environ['DISTCC_HOSTS'] = hosts\n\n # Compute a reasonable number of workers for UBT\n if not os.getenv('UBT_PARALLEL'):\n workers = subprocess.Popen(['distcc', '-j'], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')\n logging.debug('Setting UBT_PARALLEL=%s', workers)\n os.environ['UBT_PARALLEL'] = workers", "def getComments(source):\n\n markup = []\n for f in source:\n markup += extractMarkup(f)\n\n docs = collateDocs(markup)\n return docs", "def annotate_pyapi(llvm_intermediate, py_annotations):\n for py_lineno in llvm_intermediate.linenomap:\n count = 0\n for llvm_lineno in llvm_intermediate.linenomap[py_lineno]:\n line = llvm_intermediate.source.linemap[llvm_lineno]\n if re.search(py_c_api, line):\n count += 1\n\n if count:\n py_annotations[py_lineno].append(Annotation(A_c_api, count))", "def _expand_cldr(self):\n# global cldr\n self.tree.item('cldr', open=True, \\\n values=[self._count_children('cldr'), ''])" ]
[ "0.57753175", "0.50839555", "0.5048947", "0.4999652", "0.4999292", "0.49329606", "0.48832586", "0.48532987", "0.4812118", "0.48035082", "0.4798042", "0.4769488", "0.4746221", "0.47187597", "0.47153038", "0.4701966", "0.4700621", "0.46982154", "0.46897608", "0.46163833", "0.46006203", "0.45931515", "0.4581098", "0.45786223", "0.45726377", "0.45504206", "0.45232886", "0.45131916", "0.45044827", "0.44998285" ]
0.5521329
1
Test when the yaml stream is a valid xban format
def test_valid_xban(): # first is a valid dict but the second one is not stream = [ {"xban_config": {"title": "testfile", "description": "", "board_color": [],}}, {}, ] assert xban_content("test/testfile.yaml", stream) == stream
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_yaml_xban(caplog):\n\n # the color generation is random so need to check individual values\n\n stream = [{\"new\": [\"a\", \"b\"], \"old\": [\"c\", \"d\"]}]\n\n parsed = xban_content(\"test/testfile.yaml\", stream)\n assert parsed[1] == stream[0]\n assert parsed[0][\"xban_config\"][\"title\"] == \"testfile\"\n\n color = parsed[0][\"xban_config\"][\"board_color\"]\n\n assert len(color) == 2\n assert color[0] in TILE_STYLE", "def test_invalid_xban(caplog):\n\n # first is a valid dict but the second one is not\n stream = [{\"config\": {\"test\": \"new\"}}, []]\n assert xban_content(\"test/testfile.yaml\", stream) == []\n assert caplog.record_tuples == [\n (\n \"xban-io\",\n logging.ERROR,\n \"test/testfile.yaml does not have a valid xban format\",\n )\n ]", "def test_invalid_xban(caplog):\n\n # first is a valid dict but the second one is not\n stream = [{\"config\": {\"test\": \"new\"}}, []]\n assert xban_content(\"test/testfile.yaml\", stream) == []\n assert caplog.record_tuples == [\n (\n \"xban-io\",\n logging.ERROR,\n \"test/testfile.yaml does not have a valid xban format\",\n )\n ]", "def is_valid_yaml(yamlObj, type):\n try:\n yaml.load(yamlObj)\n except (yaml.scanner.ScannerError, AttributeError), e:\n data[\"skytap_\" + type + \"_status\"] = \"error\"\n data[\"skytap_\" + type + \"_status\"] = e\n return False\n return True", "def assertValidYAML(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_yaml(data)", "def _validate_yaml(self):\n\n # verify the format is correct\n if self.validater == 'yamale':\n\n import yamale\n\n print('Validating yaml file with yamale.')\n cwd = Path(os.path.dirname(__file__))\n schema_path = str(cwd.parent / 'schema') + '/generic_schema.yaml'\n schema = yamale.make_schema(schema_path)\n data = yamale.make_data(self.yaml_path)\n try:\n yamale.validate(schema, data, strict=False)\n print('Validation success! 👍')\n return True\n except ValueError as e:\n print(\n 'Yamale found that your file, '\n + self.yaml_path\n + ' is not formatted correctly.'\n )\n print(e)\n return False\n else:\n print('Did not validate yaml.')\n print('If unexpected results occur, try installing yamale and rerun.')\n return True", "def valid_is_yaml(self):\n return self.file_name.endswith('.yml')", "def invalid_yaml_error():\n\n clowder_output = yaml_file('clowder.yaml')\n return '\\n' + clowder_output + ' appears to be invalid'", "def assertValidYAMLResponse(self, resp):\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp['Content-Type'].startswith('text/yaml'))\r\n self.assertValidYAML(resp.content)", "def test_process_yaml_invalid(caplog):\n data = \"\"\"\n text_key: incorrect format\n - listitem\n - listitem\n \"\"\"\n\n with patch(\"builtins.open\", mock_open(read_data=data)):\n result = process_yaml(\"test/file.yaml\")\n\n for record in caplog.records:\n assert (\n \"Incorrect test/file.yaml. Error: while parsing a block mapping\"\n in record.message\n )\n assert record.levelname == \"ERROR\"\n assert result == []", "def test_process_yaml_valid(caplog):\n\n with patch(\"builtins.open\", mock_open(read_data=DATA)):\n result = process_yaml(\"test/testfile.yaml\")\n assert result == [\n {\n \"xban_config\": {\n \"title\": \"testfile\",\n \"description\": \"test io\",\n \"board_color\": [\"red\", \"teal\"],\n }\n },\n {\"todo\": [\"need more tests!\", \"and more!\"], \"finished\": [\"io tests\"],},\n ]", "def _check_yaml(self, yaml):\n if type(yaml['datasets']) == dict:\n logging.error(\n \"[ERROR] \\\"datasets\\\" section of config file must be a list, not a dictionary...\" \n )\n sys.exit()", "def test_bad_start(data):\n conf = load_yaml(data)\n errors = get_config_errors(conf)\n assert len(errors) == 1\n assert \"object\" in errors[0]", "def _validate_yaml(schema, config):\n check = pykwalify_core.Core(\n source_data=config, schema_files=[\"{}/{}.yml\".format(conf_schema_path, schema)])\n try:\n check.validate(raise_exception=True)\n except pykwalify_errors.SchemaError as e:\n _logger.error(\"Schema validation failed\")\n raise Exception(\"File does not conform to {} schema: {}\".format(schema, e))", "def test_load_argument_validation():\n\n class Config(DumpableAttrs):\n a: int\n\n yaml.load(\n \"\"\"\\\n!Config\n a: 1\n\"\"\"\n )\n\n with pytest.raises(TypeError):\n yaml.load(\"!Config {}\")", "def validate_yaml(_, file_name):\n slab_logger.info('Validating syntax of %s' % file_name)\n yaml_utils.validate_syntax(file_name)", "def test_PhonopyYaml_read_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = _get_unitcell(fp)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def is_eyaml_value(value: str) -> bool:\n if not isinstance(value, str):\n return False\n return value.replace(\"\\n\", \"\").replace(\" \", \"\").startswith(\"ENC[\")", "def test_invalid_from_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_from_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def testParse_invalidYaml(self):\n config_path = GetTestFilePath('invalid/config_invalid_yaml.yaml')\n with self.assertRaises(lab_config.ConfigError):\n with open(config_path, 'r') as f:\n lab_config.Parse(f)", "def validate(self,data):\n serialized = jsonpickle.encode(data)\n print(yaml.dump(yaml.load(serialized), indent=2))", "def validateYaml(f):\n\tif os.path.isfile(f) and f.endswith('.yaml'):\n\t\ttry:\n\t\t\tjsonschema.validate(yaml.load(open(f)), cfg['post_schema'])\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tprint (\"Error loading post %s: %s\" % (f,e))[0:240] + \"...\\n\"\n\treturn False", "def _load_yaml_file(yaml_file):\n with io.open(yaml_file, 'r', encoding='utf-8') as stream:\n yaml_content = yaml.load(stream)\n FileUtils._check_format(yaml_file, yaml_content)", "def test_read_phonopy_yaml_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = read_phonopy_yaml(fp).unitcell\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def test_string_output(self):\n got_str = yamlish.dumps(IN)\n got = yaml.load(got_str)\n self.assertEqual(got, self._expected, \"Result matches\")", "def test_ignore_unrecognized_fields():\n\n class Foo(DumpableAttrs):\n foo: int\n\n s = \"\"\"\\\n!Foo\nfoo: 1\nbar: 2\n\"\"\"\n with pytest.warns(CorrWarning):\n assert yaml.load(s) == Foo(1)", "def test_bad_items_pos():\n conf = load_yaml(\n \"\"\"\\\ndb_objects:\n - name: 42\n - name: aaa\n kind: nope\n - name: bbb\n kinds:\n - table\n - boh\n - sequence\n - mah\n\"\"\"\n )\n errors = get_config_errors(conf)\n assert len(errors) == 4\n assert \"<unicode string>:2\" in errors[0]\n assert \"<unicode string>:4\" in errors[1]\n assert \"<unicode string>:8\" in errors[2]\n assert \"<unicode string>:10\" in errors[3]", "def test_multi_docs_stream(caplog):\n stream = [{\"new\": [\"a\", \"b\"], \"old\": [\"c\", \"d\"]}, {\"new\": [\"a\", \"b\"]}]\n assert xban_content(\"test/testfile.yaml\", stream) == []\n assert caplog.record_tuples == [\n (\"xban-io\", logging.ERROR, \"test/testfile.yaml have too many yaml documents\",)\n ]", "def test_invalid_reader_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_reader_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def test_invalid_format(api):\n\twith pytest.raises(top_stories.InvalidFormatType):\n\t\tapi.get_stories(\"home\", \"xml\")" ]
[ "0.77437544", "0.75530493", "0.75530493", "0.6958264", "0.6825725", "0.6687764", "0.66465294", "0.65671825", "0.6549907", "0.6477815", "0.63983715", "0.6390067", "0.62619424", "0.6199314", "0.6169752", "0.6092808", "0.6079635", "0.60743856", "0.60647416", "0.60126257", "0.5997526", "0.59836066", "0.59764737", "0.59645915", "0.5961188", "0.5887889", "0.588337", "0.5875742", "0.5869244", "0.5864534" ]
0.76649076
1
Test given process yaml given a mocked io and invalid input
def test_process_yaml_invalid(caplog): data = """ text_key: incorrect format - listitem - listitem """ with patch("builtins.open", mock_open(read_data=data)): result = process_yaml("test/file.yaml") for record in caplog.records: assert ( "Incorrect test/file.yaml. Error: while parsing a block mapping" in record.message ) assert record.levelname == "ERROR" assert result == []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_process_yaml_valid(caplog):\n\n with patch(\"builtins.open\", mock_open(read_data=DATA)):\n result = process_yaml(\"test/testfile.yaml\")\n assert result == [\n {\n \"xban_config\": {\n \"title\": \"testfile\",\n \"description\": \"test io\",\n \"board_color\": [\"red\", \"teal\"],\n }\n },\n {\"todo\": [\"need more tests!\", \"and more!\"], \"finished\": [\"io tests\"],},\n ]", "def test_invalid_from_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_from_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def test_main_modular_no_file() -> None:\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n\n assert main(['--input', str(input_filename)]) == Exit.ERROR", "def test_probabilistic_parsers():", "def test_running_with_badly_formatted_config():\n cli_result = subprocess.run(\n ['kaiba', 'tests/files/bad_config.json', 'tests/files/input.json'],\n capture_output=True,\n )\n assert b\"'target' is a required property\" in cli_result.stderr", "def test_complex_io_from_package(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": {\n \"url\": {\n \"type\": \"File\"\n }\n },\n \"outputs\": {\n \"files\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"File\",\n }\n }\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 1\n assert proc[\"inputs\"][0][\"id\"] == \"url\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert isinstance(proc[\"inputs\"][0][\"formats\"], list)\n assert len(proc[\"inputs\"][0][\"formats\"]) == 1\n assert isinstance(proc[\"inputs\"][0][\"formats\"][0], dict)\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"inputs\"][0][\"formats\"][0][\"default\"] is True\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 1\n assert proc[\"outputs\"][0][\"id\"] == \"files\"\n assert \"minOccurs\" not in proc[\"outputs\"][0]\n assert \"maxOccurs\" not in proc[\"outputs\"][0]\n assert isinstance(proc[\"outputs\"][0][\"formats\"], list)\n assert len(proc[\"outputs\"][0][\"formats\"]) == 1\n assert isinstance(proc[\"outputs\"][0][\"formats\"][0], dict)\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"outputs\"][0][\"formats\"][0][\"default\"] is True\n expect = KNOWN_PROCESS_DESCRIPTION_FIELDS\n fields = set(proc.keys()) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))", "def test_invalid_reader_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_reader_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def test_literal_io_from_package(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": {\n \"url\": {\n \"type\": \"string\"\n }\n },\n \"outputs\": {\n \"values\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\",\n }\n }\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 1\n assert proc[\"inputs\"][0][\"id\"] == \"url\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert \"format\" not in proc[\"inputs\"][0]\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 1\n assert proc[\"outputs\"][0][\"id\"] == \"values\"\n assert \"minOccurs\" not in proc[\"outputs\"][0]\n assert \"maxOccurs\" not in proc[\"outputs\"][0]\n assert \"format\" not in proc[\"outputs\"][0]\n expect = KNOWN_PROCESS_DESCRIPTION_FIELDS\n fields = set(proc.keys()) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))\n # make sure that deserialization of literal fields did not produce over-verbose metadata\n for p_input in proc[\"inputs\"]:\n expect = KNOWN_PROCESS_DESCRIPTION_INPUT_DATA_FIELDS\n fields = set(p_input) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))\n for p_output in proc[\"outputs\"]:\n expect = KNOWN_PROCESS_DESCRIPTION_OUTPUT_DATA_FIELDS\n fields = set(p_output) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))", "def test_no_input_format(self):\n pandoc_default_files = [\n os.path.join(TEST_DEFAULT_FILES_PATH, \"no_input_format.yaml\")\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"No input format specified.\", message)", "def test_cli_unknown(run):\n\n out, err, mocked_input = run(\n dork.cli.main, '-?', input_side_effect=['tester', '.rq'])\n assert 'Greetings' in out\n assert err == \"\"\n assert mocked_input.call_count == 2", "def assert3(*extra_args, stdin):\n sys.argv[1:] = []\n sys.argv.append('markdown')\n _stdout = io.StringIO()\n pf.stdio(*extra_args, input_stream=io.StringIO(stdin), output_stream=_stdout)\n _stdout = pf.convert_text(_stdout.getvalue(), 'json', 'markdown')\n assert _stdout == out1", "def testParse_invalidYaml(self):\n config_path = GetTestFilePath('invalid/config_invalid_yaml.yaml')\n with self.assertRaises(lab_config.ConfigError):\n with open(config_path, 'r') as f:\n lab_config.Parse(f)", "def test_check_args(caplog, tmpdir, monkeypatch):\n\n # Check existing input file\n argv = [\"-i\", f\"{proj_dir}/data/input.json\"]\n check_args(parse_args(argv))\n\n # Check non-existent input file\n caplog.clear()\n argv = [\"-i\", \"foo.bar\"]\n with pytest.raises(SystemExit):\n check_args(parse_args(argv))\n assert caplog.records[0].message == 'Cannot find input file: foo.bar'\n\n # Check non-existent output file\n ofile = tmpdir.join(\"foo.bar\")\n argv = [\"-o\", ofile.strpath]\n check_args(parse_args(argv))\n\n # Check existing output file\n ofile = tmpdir.join(\"output.txt\")\n ofile.write(\"Test\")\n argv = [\"-o\", ofile.strpath]\n\n monkeypatch.setattr('sys.stdin', StringIO('Y\\n'))\n check_args(parse_args(argv))\n\n monkeypatch.setattr('sys.stdin', StringIO('N\\n'))\n with pytest.raises(SystemExit):\n check_args(parse_args(argv))\n ## Check unacceptable user input\n monkeypatch.setattr('sys.stdin', StringIO('A\\nB\\nC\\nN'))\n with pytest.raises(SystemExit):\n check_args(parse_args(argv))", "def test_robo():\n print(\n \"Choose the option of input:\\n\"\n \"1. Enter the command string\\n\"\n \"2. Select from input file\"\n )\n option = input(\"Enter 1 or 2 to select the input method:\")\n # Wait till the right option is chosen.\n while not (option == \"1\" or option == \"2\"):\n print(\"Invalid option, please enter valid option\")\n option = input(\"Enter 1 or 2 to select the input method:\")\n\n if option == \"1\":\n command = input(\"Enter the command as string:\")\n # To format the cli command to proper string.\n command = command[1:-1]\n robo = calculate_distance.Robot()\n robo.main(command)\n\n elif option == \"2\":\n file = open(os.path.dirname(__file__) + \"/input.yml\")\n parsed_input_file = yaml.load(file, Loader=yaml.FullLoader)\n command_list = parsed_input_file.get(\"input_list\")\n # To iterate through the list of commands given.\n for item in command_list:\n robo = calculate_distance.Robot()\n robo.main(item)", "def test_yaml_include(mo):\n\n mo.return_value.name = \"./foobar.yml\"\n mo2 = mock.mock_open(read_data=test)\n mo2.return_value.name = \"test\"\n handlers = (mo.return_value, mo2.return_value)\n mo.side_effect = handlers\n\n with mock.patch(\"builtins.open\", mo, create=True):\n cfg = config.get_validated_config(\"lalala\")\n assert cfg[\"dbms\"][\"xob10\"][\"type\"] == \"oracle\"", "def test_basic_parsers():", "def test_invalid_format(self):\n input_file = self.copy_and_mark_for_cleanup(\"Medline/pubmed_result1.txt\")\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")", "def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})", "def test_PhonopyYaml_read_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = _get_unitcell(fp)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def test_execute_job_with_inline_input_values(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\", \"script.py\"],\n \"inputs\": {\n \"stringInput\": \"string\",\n \"integerInput\": \"int\",\n \"doubleInput\": \"float\",\n \"stringArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"string\"}},\n \"integerArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"int\"}},\n \"floatArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"float\"}},\n \"measureStringInput\": \"string\",\n \"measureIntegerInput\": \"int\",\n \"measureFloatInput\": \"float\",\n \"measureFileInput\": \"File\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n CWL_REQUIREMENT_INIT_WORKDIR: {\n \"listing\": [\n {\n \"entryname\": \"script.py\",\n \"entry\": cleandoc(\"\"\"\n import json\n import os\n import ast\n input = $(inputs)\n try:\n for key, value in input.items():\n if isinstance(value, dict):\n path_ = value.get(\"path\")\n if path_ and os.path.exists(path_):\n with open (path_, \"r\") as file_:\n file_data = file_.read()\n input[key] = ast.literal_eval(file_data.upper())\n json.dump(input, open(\"./tmp.txt\", \"w\"))\n except Exception as exc:\n print(exc)\n raise\n \"\"\")\n }\n ]\n }\n },\n \"outputs\": [{\"id\": \"output_test\", \"type\": \"File\", \"outputBinding\": {\"glob\": \"tmp.txt\"}}],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n try:\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n except colander.Invalid:\n self.fail(\"Test\")\n\n assert desc[\"process\"] is not None\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n tmp_file = stack_exec.enter_context(tempfile.NamedTemporaryFile(mode=\"w\", suffix=\".json\")) # noqa\n tmp_file.write(json.dumps({\"value\": {\"ref\": 1, \"measurement\": 10.3, \"uom\": \"m\"}}))\n tmp_file.seek(0)\n\n exec_body = {\n \"mode\": EXECUTE_MODE_ASYNC,\n \"response\": EXECUTE_RESPONSE_DOCUMENT,\n \"inputs\": {\n \"stringInput\": \"string_test\",\n \"integerInput\": 10,\n \"doubleInput\": 3.14159,\n \"stringArrayInput\": [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"],\n \"integerArrayInput\": [1, 2, 3, 4, 5, 6],\n \"floatArrayInput\": [1.45, 2.65, 3.5322, 4.86, 5.57, 6.02],\n \"measureStringInput\": {\n \"value\": \"this is a test\"\n },\n \"measureIntegerInput\": {\n \"value\": 45\n },\n \"measureFloatInput\": {\n \"value\": 10.2\n },\n \"measureFileInput\": {\n \"href\": \"file://{}\".format(tmp_file.name)\n }\n },\n \"outputs\": [\n {\"id\": \"output_test\", \"type\": \"File\"},\n ]\n }\n\n proc_url = \"/processes/{}/jobs\".format(self._testMethodName)\n resp = mocked_sub_requests(self.app, \"post_json\", proc_url, timeout=5,\n data=exec_body, headers=self.json_headers, only_local=True)\n assert resp.status_code in [200, 201], \"Failed with: [{}]\\nReason:\\n{}\".format(resp.status_code, resp.json)\n status_url = resp.json.get(\"location\")\n\n results = self.monitor_job(status_url)\n\n job_output_path = results.get(\"output_test\")[\"href\"].split(self.settings[\"weaver.wps_output_path\"])[-1]\n tmp_file = \"{}/{}\".format(self.settings[\"weaver.wps_output_dir\"], job_output_path)\n\n try:\n with open(tmp_file, \"r\") as f:\n processed_values = json.load(f)\n except FileNotFoundError:\n self.fail(\"Output file [{}] was not found where it was expected to resume test\".format(tmp_file))\n except Exception as exception:\n self.fail(\"An error occurred during the reading of the file: {}\".format(exception))\n assert processed_values[\"stringInput\"] == \"string_test\"\n assert processed_values[\"integerInput\"] == 10\n assert processed_values[\"doubleInput\"] == 3.14159\n assert processed_values[\"stringArrayInput\"] == [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n assert processed_values[\"integerArrayInput\"] == [1, 2, 3, 4, 5, 6]\n assert processed_values[\"floatArrayInput\"] == [1.45, 2.65, 3.5322, 4.86, 5.57, 6.02]\n assert processed_values[\"measureStringInput\"] == \"this is a test\"\n assert processed_values[\"measureIntegerInput\"] == 45\n assert processed_values[\"measureFloatInput\"] == 10.2\n assert processed_values[\"measureFileInput\"] == {\"VALUE\": {\"REF\": 1, \"MEASUREMENT\": 10.3, \"UOM\": \"M\"}}", "def test_string_output(self):\n got_str = yamlish.dumps(IN)\n got = yaml.load(got_str)\n self.assertEqual(got, self._expected, \"Result matches\")", "def test_cli_stdin(\n config,\n):\n from YesssSMS.api import MAX_MESSAGE_LENGTH_STDIN\n\n testargs = [\"yessssms\", \"--test\", \"-l\", \"06641234567\", \"-p\", \"passw0rd\", \"-m\", \"-\"]\n\n in_message = \"\"\"Da steh’ ich nun, ich armer Thor!\nUnd bin so klug als wie zuvor;\nHeiße Magister, heiße Doctor gar,\nUnd ziehe schon an die zehen Jahr,\nHerauf, herab und quer und krumm,\nMeine Schüler an der Nase herum –\nUnd sehe, daß wir nichts wissen können!\nDas will mir schier das Herz verbrennen.\nZwar bin ich gescheidter als alle die Laffen,\nDoctoren, Magister, Schreiber und Pfaffen;\nMich plagen keine Scrupel noch Zweifel,\nFürchte mich weder vor Hölle noch Teufel –\nDafür ist mir auch alle Freud’ entrissen,\nBilde mir nicht ein was rechts zu wissen,\nBilde mir nicht ein, ich könnte was lehren,\nDie Menschen zu bessern und zu bekehren.\nAuch hab’ ich weder Gut noch Geld,\nNoch Ehr’ und Herrlichkeit der Welt.\nEs möchte kein Hund so länger leben!\nDrum hab’ ich mich der Magie ergeben,\nOb mir durch Geistes Kraft und Mund\nNicht manch Geheimniß würde kund;\nDaß ich nicht mehr mit sauerm Schweiß,\nZu sagen brauche, was ich nicht weiß;\"\"\"\n with mock.patch.object(sys, \"argv\", testargs):\n with mock.patch.object(sys, \"stdin\", in_message):\n with requests_mock.Mocker() as m:\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n _LOGIN_URL,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": _KONTOMANAGER_URL},\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n _KONTOMANAGER_URL,\n status_code=200,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n _SMS_FORM_URL,\n status_code=200,\n text=TEST_FORM_TOKEN_SAMPLE,\n )\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n _SEND_SMS_URL,\n status_code=200,\n text=\"<h1>Ihre SMS wurde erfolgreich \" + \"verschickt!</h1>\",\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n _LOGOUT_URL,\n status_code=200,\n )\n\n message = CLI().message\n\n assert message.startswith(\n \"\"\"Da steh’ ich nun, ich armer Thor!\nUnd bin so klug als wie zuvor;\"\"\"\n )\n assert message == in_message[:MAX_MESSAGE_LENGTH_STDIN]", "def test_metadata():\n\n def to_json(text):\n return pf.convert_text(text, 'markdown', 'json')\n\n def assert_equal(*extra_args, input_text, output_text):\n \"\"\"\n Default values for extra_args:\n filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False\n \"\"\"\n \n # Set --from=markdown\n sys.argv[1:] = []\n sys.argv.append('markdown')\n\n _stdout = io.StringIO()\n pf.stdio(*extra_args, input_stream=io.StringIO(input_text), output_stream=_stdout)\n _stdout = pf.convert_text(_stdout.getvalue(), 'json', 'markdown')\n assert _stdout == output_text\n\n md_contents = \"$1-1$\"\n md_document = \"\"\"---\npanflute-filters: test_filter\npanflute-path: ./tests/test_panfl/bar\n...\n{}\n\"\"\".format(md_contents)\n expected_output = '$1+1markdown$'\n\n json_contents = to_json(md_contents)\n json_document = to_json(md_document)\n\n # Filter in YAML block; try `panf_` true and false (this is a minor option that changes how the path gets built)\n assert_equal(None, None, True, True, True, input_text=json_document, output_text=expected_output)\n assert_equal(None, None, True, True, False, input_text=json_document, output_text=expected_output)\n\n # Open the filter as a standalone python script within a folder\n assert_equal(['test_filter.py'], ['./tests/test_panfl/bar'], True, True, False, input_text=json_contents, output_text=expected_output)\n\n # Open the filter with the exact abs. path (no need for folder)\n assert_equal([os.path.abspath('./tests/test_panfl/bar/test_filter.py')], [], False, True, True, input_text=json_contents, output_text=expected_output)\n\n # Open the filter as part of a package (packagename.module)\n assert_equal(['foo.test_filter'], ['./tests/test_panfl'], False, True, True, input_text=json_contents, output_text=expected_output)\n assert_equal(['test_filter'], ['./tests/test_panfl/foo'], False, True, True, input_text=json_contents, output_text=expected_output)", "def test_111(self):\n user_input = [\"1\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_load_interactive_invalid_inputs(\n self, mock_input, _mock_sleep, mock_load, side_effect, error_message\n ): # pylint: disable=redefined-outer-name\n mock_input.side_effect = side_effect\n with pytest.raises(ValueError, match=error_message):\n qml.data.load_interactive()", "def test_prompt_setInput_stringio_valid(self):\n instr = StringIO.StringIO()\n self.prompt.setInput(instr)\n\n self.assertEquals(instr, self.prompt._instr)\n self.assertEquals(instr.getvalue(), \"\")\n\n with mock.patch('__builtin__.raw_input', return_value='mocked input') as mockinput:\n result = self.prompt._prompt({}, {\n 'say': 'test',\n 'ask': 'varname'\n })\n\n self.assertEquals(result['ansible_facts']['varname'], 'mocked input')", "def flow_input_validator(body: str) -> str:\n # Callbacks are run regardless of whether an option was explicitly set.\n # Handle the scenario where the default value for an option is empty\n if not body:\n return body\n\n # Reading from a file was indicated by prepending the filename with the @\n # symbol -- for backwards compatability check if the symbol is present\n # remove it if present\n body = body.lstrip(\"@\")\n\n body_path = pathlib.Path(body)\n\n if body_path.exists() and body_path.is_file():\n with body_path.open() as f:\n try:\n yaml_body = yaml.safe_load(f)\n except yaml.YAMLError as e:\n raise typer.BadParameter(f\"Invalid flow input: {e}\")\n elif body_path.exists() and body_path.is_dir():\n raise typer.BadParameter(\"Expected file, received directory\")\n else:\n try:\n yaml_body = yaml.safe_load(body)\n except yaml.YAMLError as e:\n raise typer.BadParameter(f\"Invalid flow input: {e}\")\n\n try:\n yaml_to_json = json.dumps(yaml_body)\n except TypeError as e:\n raise typer.BadParameter(f\"Unable to translate flow input to JSON: {e}\")\n\n return yaml_to_json", "def test_output_reader_errors():\n with pytest.raises(TypeError):\n load_output_reader(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_output_reader({\"format\": \"invalid_driver\"})", "def test_input_reader_errors():\n with pytest.raises(TypeError):\n load_input_reader(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_input_reader({})\n with pytest.raises(errors.MapcheteDriverError):\n load_input_reader({\"abstract\": {\"format\": \"invalid_format\"}})", "def test_101(self):\n user_input = [\"1\",\"0\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")" ]
[ "0.72985035", "0.61559045", "0.6025226", "0.5997324", "0.59448725", "0.5877921", "0.5869697", "0.5867817", "0.5832697", "0.5803574", "0.57814", "0.5766075", "0.5719221", "0.5697595", "0.5692346", "0.568342", "0.5672739", "0.56462955", "0.5644205", "0.5641872", "0.562619", "0.5621152", "0.56203157", "0.560784", "0.5607766", "0.5598951", "0.55988175", "0.55961156", "0.5590614", "0.5576575" ]
0.671951
1
Test given process yaml given a mocked io
def test_process_yaml_valid(caplog): with patch("builtins.open", mock_open(read_data=DATA)): result = process_yaml("test/testfile.yaml") assert result == [ { "xban_config": { "title": "testfile", "description": "test io", "board_color": ["red", "teal"], } }, {"todo": ["need more tests!", "and more!"], "finished": ["io tests"],}, ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_yaml_include(mo):\n\n mo.return_value.name = \"./foobar.yml\"\n mo2 = mock.mock_open(read_data=test)\n mo2.return_value.name = \"test\"\n handlers = (mo.return_value, mo2.return_value)\n mo.side_effect = handlers\n\n with mock.patch(\"builtins.open\", mo, create=True):\n cfg = config.get_validated_config(\"lalala\")\n assert cfg[\"dbms\"][\"xob10\"][\"type\"] == \"oracle\"", "def test_complex_io_from_package(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": {\n \"url\": {\n \"type\": \"File\"\n }\n },\n \"outputs\": {\n \"files\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"File\",\n }\n }\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 1\n assert proc[\"inputs\"][0][\"id\"] == \"url\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert isinstance(proc[\"inputs\"][0][\"formats\"], list)\n assert len(proc[\"inputs\"][0][\"formats\"]) == 1\n assert isinstance(proc[\"inputs\"][0][\"formats\"][0], dict)\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"inputs\"][0][\"formats\"][0][\"default\"] is True\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 1\n assert proc[\"outputs\"][0][\"id\"] == \"files\"\n assert \"minOccurs\" not in proc[\"outputs\"][0]\n assert \"maxOccurs\" not in proc[\"outputs\"][0]\n assert isinstance(proc[\"outputs\"][0][\"formats\"], list)\n assert len(proc[\"outputs\"][0][\"formats\"]) == 1\n assert isinstance(proc[\"outputs\"][0][\"formats\"][0], dict)\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"outputs\"][0][\"formats\"][0][\"default\"] is True\n expect = KNOWN_PROCESS_DESCRIPTION_FIELDS\n fields = set(proc.keys()) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))", "def test_process_yaml_invalid(caplog):\n data = \"\"\"\n text_key: incorrect format\n - listitem\n - listitem\n \"\"\"\n\n with patch(\"builtins.open\", mock_open(read_data=data)):\n result = process_yaml(\"test/file.yaml\")\n\n for record in caplog.records:\n assert (\n \"Incorrect test/file.yaml. Error: while parsing a block mapping\"\n in record.message\n )\n assert record.levelname == \"ERROR\"\n assert result == []", "def test_PhonopyYaml_read_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = _get_unitcell(fp)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def test_literal_io_from_package(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": {\n \"url\": {\n \"type\": \"string\"\n }\n },\n \"outputs\": {\n \"values\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\",\n }\n }\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 1\n assert proc[\"inputs\"][0][\"id\"] == \"url\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert \"format\" not in proc[\"inputs\"][0]\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 1\n assert proc[\"outputs\"][0][\"id\"] == \"values\"\n assert \"minOccurs\" not in proc[\"outputs\"][0]\n assert \"maxOccurs\" not in proc[\"outputs\"][0]\n assert \"format\" not in proc[\"outputs\"][0]\n expect = KNOWN_PROCESS_DESCRIPTION_FIELDS\n fields = set(proc.keys()) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))\n # make sure that deserialization of literal fields did not produce over-verbose metadata\n for p_input in proc[\"inputs\"]:\n expect = KNOWN_PROCESS_DESCRIPTION_INPUT_DATA_FIELDS\n fields = set(p_input) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))\n for p_output in proc[\"outputs\"]:\n expect = KNOWN_PROCESS_DESCRIPTION_OUTPUT_DATA_FIELDS\n fields = set(p_output) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))", "def test_metadata():\n\n def to_json(text):\n return pf.convert_text(text, 'markdown', 'json')\n\n def assert_equal(*extra_args, input_text, output_text):\n \"\"\"\n Default values for extra_args:\n filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False\n \"\"\"\n \n # Set --from=markdown\n sys.argv[1:] = []\n sys.argv.append('markdown')\n\n _stdout = io.StringIO()\n pf.stdio(*extra_args, input_stream=io.StringIO(input_text), output_stream=_stdout)\n _stdout = pf.convert_text(_stdout.getvalue(), 'json', 'markdown')\n assert _stdout == output_text\n\n md_contents = \"$1-1$\"\n md_document = \"\"\"---\npanflute-filters: test_filter\npanflute-path: ./tests/test_panfl/bar\n...\n{}\n\"\"\".format(md_contents)\n expected_output = '$1+1markdown$'\n\n json_contents = to_json(md_contents)\n json_document = to_json(md_document)\n\n # Filter in YAML block; try `panf_` true and false (this is a minor option that changes how the path gets built)\n assert_equal(None, None, True, True, True, input_text=json_document, output_text=expected_output)\n assert_equal(None, None, True, True, False, input_text=json_document, output_text=expected_output)\n\n # Open the filter as a standalone python script within a folder\n assert_equal(['test_filter.py'], ['./tests/test_panfl/bar'], True, True, False, input_text=json_contents, output_text=expected_output)\n\n # Open the filter with the exact abs. path (no need for folder)\n assert_equal([os.path.abspath('./tests/test_panfl/bar/test_filter.py')], [], False, True, True, input_text=json_contents, output_text=expected_output)\n\n # Open the filter as part of a package (packagename.module)\n assert_equal(['foo.test_filter'], ['./tests/test_panfl'], False, True, True, input_text=json_contents, output_text=expected_output)\n assert_equal(['test_filter'], ['./tests/test_panfl/foo'], False, True, True, input_text=json_contents, output_text=expected_output)", "def test_process_email(self, process_mock):\n with name_of_file_containing('contents') as filename:\n call_command('process_email', email_file=filename)\n self.assertEqual(process_mock.call_count, 1, 'process_response_email should be called once')\n self.assertEqual(\n process_mock.call_args.args,\n ('contents',),\n 'process_response_email should receive the correct contents'\n )", "def test_mediatype_io_format_references(self):\n ns_json, type_json = get_cwl_file_format(CONTENT_TYPE_APP_JSON)\n namespaces = dict(list(ns_json.items()))\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"formats\": [\n {\n \"mimeType\": CONTENT_TYPE_APP_JSON,\n \"default\": True,\n }\n ]\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"formats\": [\n {\n \"mediaType\": CONTENT_TYPE_APP_JSON,\n \"default\": True,\n }\n ]\n },\n ],\n \"outputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"formats\": [{\"mediaType\": CONTENT_TYPE_APP_JSON}],\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"formats\": [{\"mediaType\": CONTENT_TYPE_APP_JSON}],\n },\n ],\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\n \"unit\": {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n ],\n \"outputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n ],\n \"$namespaces\": namespaces\n }\n }]\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n assert proc[\"inputs\"][0][\"id\"] == \"wps_format_mimeType\"\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"inputs\"][1][\"id\"] == \"wps_format_mediaType\"\n assert proc[\"inputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"outputs\"][0][\"id\"] == \"wps_format_mimeType\"\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"outputs\"][1][\"id\"] == \"wps_format_mediaType\"\n assert proc[\"outputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n\n desc = self.describe_process(self._testMethodName, describe_schema=\"OGC\")\n assert desc[\"inputs\"][\"wps_format_mimeType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"inputs\"][\"wps_format_mediaType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"outputs\"][\"wps_format_mimeType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"outputs\"][\"wps_format_mediaType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON", "def testProcess(self):\n self.grr_hunt_file_collector.PreProcess()\n self.grr_hunt_file_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].paths,\n ['/etc/passwd', '/etc/shadow', '/etc/hosts'])\n self.assertEqual(call_kwargs['flow_args'].action.action_type,\n flows_pb2.FileFinderAction.DOWNLOAD)\n self.assertEqual(call_kwargs['flow_name'], 'FileFinder')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')", "def testProcess(self):\n self.grr_hunt_artifact_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].artifact_list,\n ['RandomArtifact'])\n self.assertEqual(call_kwargs['flow_args'].use_raw_filesystem_access, True)\n self.assertEqual(call_kwargs['flow_name'], 'ArtifactCollectorFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')", "def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')", "def test_read_phonopy_yaml_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = read_phonopy_yaml(fp).unitcell\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def test_probabilistic_parsers():", "def test_get_yaml_spec(self):\n pass", "def test_process_data():\n afos_dump.process_data(\"\")", "def test_main_prints_title(runner: CliRunner, mock_requests_get: MockFixture) -> None:\n result = runner.invoke(console.main)\n assert \"Lorem Ipsum\" in result.output", "async def test_stream(hassio_client, aioclient_mock: AiohttpClientMocker) -> None:\n aioclient_mock.get(\"http://127.0.0.1/app/entrypoint.js\")\n await hassio_client.get(\"/api/hassio/app/entrypoint.js\", data=\"test\")\n assert isinstance(aioclient_mock.mock_calls[-1][2], StreamReader)", "def setUp(self):\n self._expected = yaml.safe_load(OUT)", "def test_process_factory(self):\n logging.debug('********** Test process: test_process_factory **********')\n\n self.parser.clear()\n self.parser.read_dict({'test_process': {'class_name': 'EssSocPowerController',\n 'some_special_attribute': 0.6}})\n\n PF = process_core.ProcessFactory()\n test_class = PF.factory(self.parser['test_process'])\n\n self.assertIsInstance(test_class, process_plugins.EssSocPowerController)\n #self.assertEqual(test_class.config['some_special_attribute'], 0.6)", "def test_process_metrics_method_no_metadata_file(monkeypatch, s3_setup, tmpdir):\n argument_dict = {\n \"cos-endpoint\": \"http://\" + MINIO_HOST_PORT,\n \"cos-bucket\": \"test-bucket\",\n \"cos-directory\": \"test-directory\",\n \"cos-dependencies-archive\": \"test-archive.tgz\",\n \"filepath\": os.path.join(RESOURCES_DIR, \"test-notebookA.ipynb\"),\n \"inputs\": \"test-file.txt;test,file.txt\",\n \"outputs\": \"test-file/test-file-copy.txt;test-file/test,file/test,file-copy.txt\",\n \"user-volume-path\": None,\n }\n\n output_path = Path(tmpdir)\n # metadata file name and location\n metadata_file = output_path / \"mlpipeline-ui-metadata.json\"\n # remove file if it already exists\n remove_file(metadata_file)\n\n # override the default output directory to make this test platform\n # independent\n monkeypatch.setenv(\"ELYRA_WRITABLE_CONTAINER_DIR\", str(tmpdir))\n main_method_setup_execution(monkeypatch, s3_setup, tmpdir, argument_dict)\n\n # process_metrics should have generated a file named mlpipeline-ui-metadata.json\n # in tmpdir\n\n try:\n with open(metadata_file, \"r\") as f:\n metadata = json.load(f)\n assert metadata.get(\"outputs\") is not None\n assert isinstance(metadata[\"outputs\"], list)\n assert len(metadata[\"outputs\"]) == 1\n assert metadata[\"outputs\"][0][\"storage\"] == \"inline\"\n assert metadata[\"outputs\"][0][\"type\"] == \"markdown\"\n assert (\n f\"{argument_dict['cos-endpoint']}/{argument_dict['cos-bucket']}/{argument_dict['cos-directory']}\"\n in metadata[\"outputs\"][0][\"source\"]\n )\n assert argument_dict[\"cos-dependencies-archive\"] in metadata[\"outputs\"][0][\"source\"]\n except AssertionError:\n raise\n except Exception as ex:\n # Potential reasons for failures:\n # file not found, invalid JSON\n print(f'Validation of \"{str(ex)}\" failed: {ex}')\n assert False", "def test_to_yaml(self):\n file_to_read = \"this_yaml_is_a_ghost.yaml\"\n\n expected = False\n actual = PyFunceble.path.isfile(file_to_read)\n\n self.assertEqual(expected, actual)\n\n to_write = {\"hello\": [\"This is PyFunceble!\", \"Uhh!\"], \"world\": \"Fun Ilrys\"}\n\n expected = \"\"\"hello: [This is PyFunceble!, Uhh!]\nworld: Fun Ilrys\n\"\"\"\n\n Dict(to_write).to_yaml(file_to_read)\n\n actual = File(file_to_read).read()\n self.assertEqual(expected, actual)\n\n File(file_to_read).delete()\n\n expected = False\n actual = PyFunceble.path.isfile(file_to_read)\n\n self.assertEqual(expected, actual)", "def test_pandoc_call():\n\n def run_proc(*args, stdin):\n #assert not args, args\n proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=stdin, encoding='utf-8', cwd=os.getcwd())\n _stdout, _stderr = proc.stdout, proc.stderr\n return (_stdout if _stdout else '').strip(), (_stderr if _stderr else '').strip()\n\n md_contents = \"$1-1$\"\n expected_output = '$1+1markdown$'\n\n stdout = run_proc('pandoc', '--filter=panfl', '--to=markdown',\n '--metadata=panflute-verbose:True',\n '--metadata=panflute-filters:' + os.path.abspath('./tests/test_panfl/bar/test_filter.py'),\n stdin=md_contents)[0]\n assert stdout == expected_output\n\n stdout = run_proc('pandoc', '--filter=panfl', '--to=markdown',\n '--metadata=panflute-filters:test_filter',\n '--metadata=panflute-path:./tests/test_panfl/bar',\n stdin=md_contents)[0]\n assert stdout == expected_output", "def test_pick_a_process_to_run(self):\n workflow = self.get_workflow(\n \"\"\"file://C <- file://B\n echo C > C\n echo B creates C\n\nfile://B <- file://A\n echo B > B\n echo A creates B\n \"\"\")\n p = workflow.pick_a_process_to_run()\n assert p.id.find(\"_5\") >= 0, p.id", "def test_execute_job_with_inline_input_values(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\", \"script.py\"],\n \"inputs\": {\n \"stringInput\": \"string\",\n \"integerInput\": \"int\",\n \"doubleInput\": \"float\",\n \"stringArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"string\"}},\n \"integerArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"int\"}},\n \"floatArrayInput\": {\"type\": {\"type\": \"array\", \"items\": \"float\"}},\n \"measureStringInput\": \"string\",\n \"measureIntegerInput\": \"int\",\n \"measureFloatInput\": \"float\",\n \"measureFileInput\": \"File\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n CWL_REQUIREMENT_INIT_WORKDIR: {\n \"listing\": [\n {\n \"entryname\": \"script.py\",\n \"entry\": cleandoc(\"\"\"\n import json\n import os\n import ast\n input = $(inputs)\n try:\n for key, value in input.items():\n if isinstance(value, dict):\n path_ = value.get(\"path\")\n if path_ and os.path.exists(path_):\n with open (path_, \"r\") as file_:\n file_data = file_.read()\n input[key] = ast.literal_eval(file_data.upper())\n json.dump(input, open(\"./tmp.txt\", \"w\"))\n except Exception as exc:\n print(exc)\n raise\n \"\"\")\n }\n ]\n }\n },\n \"outputs\": [{\"id\": \"output_test\", \"type\": \"File\", \"outputBinding\": {\"glob\": \"tmp.txt\"}}],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n try:\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n except colander.Invalid:\n self.fail(\"Test\")\n\n assert desc[\"process\"] is not None\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n tmp_file = stack_exec.enter_context(tempfile.NamedTemporaryFile(mode=\"w\", suffix=\".json\")) # noqa\n tmp_file.write(json.dumps({\"value\": {\"ref\": 1, \"measurement\": 10.3, \"uom\": \"m\"}}))\n tmp_file.seek(0)\n\n exec_body = {\n \"mode\": EXECUTE_MODE_ASYNC,\n \"response\": EXECUTE_RESPONSE_DOCUMENT,\n \"inputs\": {\n \"stringInput\": \"string_test\",\n \"integerInput\": 10,\n \"doubleInput\": 3.14159,\n \"stringArrayInput\": [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"],\n \"integerArrayInput\": [1, 2, 3, 4, 5, 6],\n \"floatArrayInput\": [1.45, 2.65, 3.5322, 4.86, 5.57, 6.02],\n \"measureStringInput\": {\n \"value\": \"this is a test\"\n },\n \"measureIntegerInput\": {\n \"value\": 45\n },\n \"measureFloatInput\": {\n \"value\": 10.2\n },\n \"measureFileInput\": {\n \"href\": \"file://{}\".format(tmp_file.name)\n }\n },\n \"outputs\": [\n {\"id\": \"output_test\", \"type\": \"File\"},\n ]\n }\n\n proc_url = \"/processes/{}/jobs\".format(self._testMethodName)\n resp = mocked_sub_requests(self.app, \"post_json\", proc_url, timeout=5,\n data=exec_body, headers=self.json_headers, only_local=True)\n assert resp.status_code in [200, 201], \"Failed with: [{}]\\nReason:\\n{}\".format(resp.status_code, resp.json)\n status_url = resp.json.get(\"location\")\n\n results = self.monitor_job(status_url)\n\n job_output_path = results.get(\"output_test\")[\"href\"].split(self.settings[\"weaver.wps_output_path\"])[-1]\n tmp_file = \"{}/{}\".format(self.settings[\"weaver.wps_output_dir\"], job_output_path)\n\n try:\n with open(tmp_file, \"r\") as f:\n processed_values = json.load(f)\n except FileNotFoundError:\n self.fail(\"Output file [{}] was not found where it was expected to resume test\".format(tmp_file))\n except Exception as exception:\n self.fail(\"An error occurred during the reading of the file: {}\".format(exception))\n assert processed_values[\"stringInput\"] == \"string_test\"\n assert processed_values[\"integerInput\"] == 10\n assert processed_values[\"doubleInput\"] == 3.14159\n assert processed_values[\"stringArrayInput\"] == [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n assert processed_values[\"integerArrayInput\"] == [1, 2, 3, 4, 5, 6]\n assert processed_values[\"floatArrayInput\"] == [1.45, 2.65, 3.5322, 4.86, 5.57, 6.02]\n assert processed_values[\"measureStringInput\"] == \"this is a test\"\n assert processed_values[\"measureIntegerInput\"] == 45\n assert processed_values[\"measureFloatInput\"] == 10.2\n assert processed_values[\"measureFileInput\"] == {\"VALUE\": {\"REF\": 1, \"MEASUREMENT\": 10.3, \"UOM\": \"M\"}}", "def test_main_modular_no_file() -> None:\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n\n assert main(['--input', str(input_filename)]) == Exit.ERROR", "def testProcess(self, mock_get_write_results):\n self.mock_grr_api.Hunt.return_value.Get.return_value = \\\n mock_grr_hosts.MOCK_HUNT\n self.grr_hunt_downloader.Process()\n mock_get_write_results.assert_called_with(mock_grr_hosts.MOCK_HUNT,\n '/tmp/test')", "def test_basic_parsers():", "def test_load(self):\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_utilisation: 1.1\n - !MockPool\n \"\"\"\n )\n with load(config.name):\n assert True\n assert True", "def test_PhonopyYaml_read(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n cell = _get_unitcell(filename)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True" ]
[ "0.5976523", "0.59347874", "0.57523376", "0.5659607", "0.5630501", "0.55258167", "0.55115277", "0.54421395", "0.5437241", "0.5436717", "0.5402416", "0.5360342", "0.53089064", "0.5305208", "0.5304585", "0.52773273", "0.5237477", "0.52093935", "0.5208303", "0.5205367", "0.5205287", "0.5204116", "0.51918155", "0.5172656", "0.5172342", "0.5161971", "0.5160353", "0.5153231", "0.51354545", "0.5124304" ]
0.7105798
0
Verify that cloud endpoint service names match the regex
def test_match_endpoints(): service_names = [ "iap-ingress-kfctl-8c9b.endpoints.kubeflow-ci-deployment.cloud.goog", ] for s in service_names: assert cleanup_ci.is_match(s, patterns=cleanup_ci.E2E_PATTERNS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)", "def check_name(name, allow_services=False):", "def is_service_name_correct(self, service):\r\n return service in self.services", "def _is_format_endpoint(pattern):\n return '?P<format>' in pattern._regex", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def test_100_services(self):\n services = {\n self.keystone_sentry: ['keystone'],\n self.cinder_sentry: ['cinder-api',\n 'cinder-scheduler',\n 'cinder-volume']\n }\n if self.is_liberty_or_newer():\n services[self.keystone_sentry] = ['apache2']\n else:\n services[self.keystone_sentry] = ['keystone']\n ret = u.validate_services_by_name(services)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def _checkServices(self, expectedServices):\n it = iter(self._getServices())\n for (type_uri, service_uri) in expectedServices:\n for element in it:\n if type_uri in xrds.getTypeURIs(element):\n self.assertEqual(xrds.getURI(element), service_uri)\n break\n else:\n self.fail('Did not find %r service' % (type_uri,))", "def is_valid_hostname(string: str) -> bool:\n\n return hostname_regex.match(string) is not None", "def IsVPCNameValid(vpc):\n if len(vpc) < 1 or len(vpc) > 63:\n return False\n return bool(re.match('^[a-z]$|^[a-z][a-z0-9-]*[a-z0-9]$', vpc))", "def match_endpoint(self, method, request):\n if \"?\" in request:\n raise InvalidRequest()\n all_endpoints = self.config.endpoints()\n match_str = \"%s %s\" % (method, request)\n matched_endpoints = set()\n # Note: fnmatch.filter seemed to be broken when trying to do exaclty this.\n for endpoint in all_endpoints:\n if fnmatch.fnmatch(match_str, endpoint):\n matched_endpoints.add(endpoint)\n return matched_endpoints", "def test_is_valid_kubernetes_resource_name_valid_input():\n # test valid names\n assert is_valid_kubernetes_resource_name(name=\"l0l\")\n assert is_valid_kubernetes_resource_name(name=\"l-l\")\n assert is_valid_kubernetes_resource_name(name=\"l.l\")\n assert is_valid_kubernetes_resource_name(name=\"4-you\")\n assert is_valid_kubernetes_resource_name(name=\"you.2\")", "def test_name_matching(string, matches: bool):\n assert (re.fullmatch(pattern, string) is not None) == matches", "def _find_filter(keyword):\n db = get_service_collection()\n result = db.find({\"name\": {\"$regex\": keyword}})\n service_endpoint = ''\n for item in result:\n service_endpoint = item[\"value\"][\"url\"]\n break\n return service_endpoint", "def _filter_service(self, service):\n\t\tservice = service.lower()\n\t\tservice = service.replace(\"soap\",\"http\").replace(\"https\",\"http\").replace(\"ssl\",\"http\").replace(\"http-proxy\",\"http\").replace(\"http-alt\",\"http\").replace(\"ajp13\",\"http\").replace(\"vnc-http\",\"http\").replace(\"http-mgmt\",\"http\").replace(\"x509\",\"http\").replace('iiimsf','http')\n\t\tservice = service.replace(\"microsoft-ds\",\"netbios-ssn\")\n\t\tservice = service.replace(\"imaps\",\"imap\").replace(\"pop3s\",\"pop3\").replace(\"smtps\",\"smtp\").replace(\"pop3pw\",\"pop3\")\n\t\tservice = service.replace(\"psql\",\"postgresql\")\n\n\t\treturn service", "def validate_endpoint_data(self, endpoints, admin_port, internal_port,\n public_port, expected):\n found = False\n for ep in endpoints:\n self.log.debug('endpoint: {}'.format(repr(ep)))\n if (admin_port in ep.adminurl and\n internal_port in ep.internalurl and\n public_port in ep.publicurl):\n found = True\n actual = {'id': ep.id,\n 'region': ep.region,\n 'adminurl': ep.adminurl,\n 'internalurl': ep.internalurl,\n 'publicurl': ep.publicurl,\n 'service_id': ep.service_id}\n ret = self._validate_dict_data(expected, actual)\n if ret:\n return 'unexpected endpoint data - {}'.format(ret)\n\n if not found:\n return 'endpoint not found'", "def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)", "def __validate_conn_pattern(conns:str)->str:\n pattern1 = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n # pattern2 = re.compile(r'^\\w+:\\w+@\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n\n for conn in conns.split(\",\"):\n if not pattern1.match(conn) and not pattern2.match(conn):\n raise argparse.ArgumentTypeError(f'Invalid connection format: {conn}. Supported formats: 127.0.0.1:32049 or user:[email protected]:32049')\n\n return conns", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def test_138_service_catalog(self):\n u.log.debug('Checking keystone service catalog...')\n self.set_api_version(2)\n endpoint_check = {\n 'adminURL': u.valid_url,\n 'id': u.not_null,\n 'region': 'RegionOne',\n 'publicURL': u.valid_url,\n 'internalURL': u.valid_url\n }\n expected = {\n 'volume': [endpoint_check],\n 'identity': [endpoint_check]\n }\n actual = self.keystone_v2.service_catalog.get_endpoints()\n\n ret = u.validate_svc_catalog_endpoint_data(expected, actual)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def valid_host(host):\n if host in ACCEPTED_HOSTS:\n return True\n return False", "def validate_svc_catalog_endpoint_data(self, expected, actual):\n self.log.debug('actual: {}'.format(repr(actual)))\n for k, v in expected.iteritems():\n if k in actual:\n ret = self._validate_dict_data(expected[k][0], actual[k][0])\n if ret:\n return self.endpoint_error(k, ret)\n else:\n return \"endpoint {} does not exist\".format(k)\n return ret", "def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []", "def test_100_services(self):\n u.log.debug('Checking system services...')\n swift_storage_services = ['swift-account',\n 'swift-account-auditor',\n 'swift-account-reaper',\n 'swift-account-replicator',\n 'swift-container',\n 'swift-container-auditor',\n 'swift-container-replicator',\n 'swift-container-updater',\n 'swift-object',\n 'swift-object-auditor',\n 'swift-object-replicator',\n 'swift-object-updater',\n 'swift-container-sync']\n service_names = {\n self.keystone_sentry: ['keystone'],\n self.glance_sentry: ['glance-registry',\n 'glance-api'],\n self.swift_proxy_sentry: ['swift-proxy'],\n self.swift_storage_sentry: swift_storage_services\n }\n\n if self._get_openstack_release() >= self.trusty_liberty:\n service_names[self.keystone_sentry] = ['apache2']\n\n ret = u.validate_services_by_name(service_names)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def IsValidHotlistName(s):\n return (RE_HOTLIST_NAME.match(s) and\n len(s) <= framework_constants.MAX_HOTLIST_NAME_LENGTH)", "def test_namespace_of_call_api_endpoint():\n\n url = '/api/v1/calls/'\n resolved = resolve(url)\n\n assert resolved.namespace == 'calls'\\\n and resolved.url_name == 'call-list'", "def chkfqdn(fqdn):\n if fqdn is None:\n return False\n hp = hostportion(fqdn)\n # not needed right now: pp = portportion(fqdn)\n # TODO need to augment this for IPv6 addresses\n return re.match('^[a-zA-Z0-9_-]+(\\\\.[a-zA-Z0-9_-]+)+$', hp) is not None", "def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False", "def check_services(self):\n for service in self.services:\n try:\n self.cloud.search_services(service)[0]\n except Exception: # pylint: disable=broad-except\n self.is_skipped = True\n break", "def test_get_service_string(self):\n pass", "def test_ipam_services_list(self):\n pass" ]
[ "0.6847793", "0.65917355", "0.6273963", "0.58474916", "0.57660294", "0.5582451", "0.5489312", "0.5447833", "0.5414882", "0.53854394", "0.5358709", "0.5328395", "0.53097093", "0.5299616", "0.525812", "0.52561545", "0.52393234", "0.5234684", "0.5198802", "0.51901925", "0.5182861", "0.51774687", "0.51726836", "0.5145361", "0.5130106", "0.51182556", "0.51098067", "0.5103878", "0.5096667", "0.50871575" ]
0.76800895
0
Process an LTPage layout and return a list of elements.
def _process_layout(self, layout): # Here we just group text into paragraphs elements = [] for lt_obj in layout: if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine): elements.append(Paragraph(lt_obj.get_text().strip())) elif isinstance(lt_obj, LTFigure): # Recursive... elements.extend(self._process_layout(lt_obj)) return elements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_layout(self, layout):\r\n # Here we just group text into paragraphs\r\n elements = []\r\n for lt_obj in layout:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n elements.append(Paragraph(lt_obj.get_text().strip()))\r\n elif isinstance(lt_obj, LTFigure):\r\n # Recursive...\r\n elements.extend(self._process_layout(lt_obj))\r\n return elements", "def parse_layout(layout):\n global index \n for lt_obj in layout:\n print(lt_obj.__class__.__name__)\n print(lt_obj.bbox)\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n print(lt_obj.get_text())\n d[lt_obj.get_text().strip()]=(index,lt_obj.bbox)\n index+=1\n elif isinstance(lt_obj, LTFigure):\n parse_layout(lt_obj) # Recursive", "def parse_layout(layout, mytext, line_list):\n # mytext = []\n line = \"\"\n for lt_obj in layout:\n # print(lt_obj.__class__.__name__)\n # print(lt_obj.bbox)\n if isinstance(lt_obj, LTTextLine):\n mytext.append(lt_obj) # .get_text())\n # print(lt_obj.get_text())\n # print(text)\n elif isinstance(lt_obj, LTLine):\n line_list.append(lt_obj)\n elif isinstance(lt_obj, LTCurve):\n bbox = lt_obj.bbox\n if bbox[1] == bbox[3]:\n line_list.append(lt_obj)\n elif isinstance(lt_obj, LTTextBox): # or isinstance(lt_obj, LTTextLine):\n # print(lt_obj.get_text())\n mytext, line_list = parse_layout(lt_obj, mytext, line_list) # Recursive\n\n # elif isinstance(lt_obj, LTAnno):\n # print(line)\n # print(str(lt_obj._text) + 'xx')\n # if lt_obj._text == '\\n':\n # text.append(line)\n # print(str(type(lt_obj)) + \" : \" + str(lt_obj.get_text()))\n # print(dir(lt_obj))\n # print(mytext)\n return mytext, line_list", "def order_lines_layouted(self):\n self.ensure_one()\n report_pages = [[]]\n #print(list(groupby(self.order_line, lambda l: l.layout_category_id)))\n for category, lines in groupby(self.order_line, lambda l: l.layout_category_id):\n\n if report_pages[-1] and report_pages[-1][-1]['pagebreak']:\n report_pages.append([])\n report_pages[-1].append({\n 'parent': category.parent.name,\n 'name': category and category.name or _('Uncategorized'),\n 'subtotal': category and category.subtotal,\n 'pagebreak': category and category.pagebreak,\n 'lines': list(lines)\n })\n\n return report_pages", "def process_page_tag(self, root):\n pages = root.findall(self.tag_prefix + self.page_tag)\n articles = []\n for page in pages:\n if self.is_news_article(page):\n article = self.parse_text(page)\n if article:\n articles.append(article)\n return articles", "def process_page_data(page_data: List[str]) -> List[Tuple[Any]]:\r\n processed_data: List[Tuple[Any]] = []\r\n for item in page_data:\r\n ram_soup = soup(item, \"html.parser\")\r\n list_wrap = ram_soup.find(\"div\", {\"class\": \"list-wrap\"})\r\n containers = list_wrap.findAll(\"div\", {\"class\": \"item-container\"})\r\n for container in containers:\r\n brand: Optional[str] = get_brand_name(container=container)\r\n product_name: str = get_product_name(container=container)\r\n shipping: str = get_shipping(container=container)\r\n product_price: Optional[str] = get_product_price(container=container)\r\n processed_data.append((brand, product_name, shipping, product_price))\r\n return processed_data", "def extractTextWithFullLayout(analyzed_data):\r\n\r\n data = []\r\n for page in analyzed_data:\r\n if not page:\r\n continue\r\n\r\n data.append([])\r\n for lt_obj in page:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n data[-1].append({\r\n 'type': 'text', # Might support more types (e.g. figures) in the future.\r\n 'text': lt_obj.get_text().split(\"\\n\"),\r\n 'layout': {\r\n 'x0': lt_obj.x0,\r\n 'x1': lt_obj.x1,\r\n 'y0': lt_obj.y0,\r\n 'y1': lt_obj.y1\r\n }\r\n })\r\n\r\n return data", "def _parse_xml(self):\n self.properties = {}\n pages = self.root.findall('page')\n self.pages = {} \n\n for page_num, page in enumerate(pages): \n\n _, _ , width, height = page.attrib[\"bbox\"].split(\",\")\n width, height = float(width), float(height)\n \n page_object = {\"page\": page_num + 1 , \"width\": width, \"height\": height} \n lines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n print(\"{} Number of Lines in Page {}\".format(len(lines), page_num))\n \n self.bbox = {'x1': [] , 'y1':[], 'x2':[], 'y2':[]}\n textlines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n textlines = sorted(textlines, key= lambda x: -float(x.attrib['bbox'].split(',')[3]))\n \n \n line_objects = []\n for idx, item in enumerate(textlines):\n item_props = self._extract_textline_properties(item)\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3])\n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n\n line_objects.append(item_props)\n page_object[\"lines\"] = line_objects\n \n \n others = [] \n# for key in [\"rect\", \"figure\", \"layout/textgroup\", \"curve\"]: \n for key in [\"curve\", \"rect\", \"figure\"]: \n other_objs = self.root.findall('page[@id=\\'{}\\']/{}'.format(page_num+1, key)) \n for idx, item in enumerate(other_objs):\n \n item_props = {\"type\": key}\n# print(key, ET.tostring(item))\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3]) \n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n others.append(item_props)\n \n page_object[\"others\"] = others\n page = Page(page_object)\n page_object[\"para\"] = page.para\n page_object[\"plines\"] = page.lines\n page_object[\"bigbox\"] = page.bigbox\n page_object[\"components\"] = page.components\n\n self.pages[page_num+1] = page_object", "def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text", "def extract_text_blocks(layout, space_tol):\n text = \"\"\n prev_item = None\n\n def is_text(item):\n return isinstance(item, LTText)\n\n def is_container(item):\n return isinstance(item, LTContainer)\n\n for item in layout:\n if is_text(item):\n max_distance = space_tol * min(item.width, item.height)\n if is_text(prev_item) and item.hdistance(prev_item) > max_distance:\n if len(text) > 0:\n yield text\n text = \"\"\n text += item.get_text()\n elif is_container(item):\n for text in extract_text_blocks(item, space_tol):\n yield text\n else:\n if len(text) != 0:\n yield text\n text = \"\"\n prev_item = item\n\n if len(text) != 0:\n yield text", "def split_lines(layout):\n children = []\n current_line = None\n\n for child in layout.children:\n if isinstance(child, _LineBreak):\n current_line = None\n else:\n if current_line is None:\n current_line = LineBox(child.style)\n children.append(current_line)\n current_line.children.append(child)\n\n layout.children = children", "def init_page_elements(self):\n pass", "def parseLayouts(*args):\n return _libsbml.parseLayouts(*args)", "def parse_further_pages(self, response):\n # print(\"Page num: \", response.meta[\"page_number\"])\n page_num = response.meta[\"page_number\"]\n tile_path = \"//div[@class='product-tile']\"\n # gets between 1 and 48 SelectorLists, depending on how many products are on the page.\n product_tiles_from_the_page = response.xpath(tile_path)\n for page in product_tiles_from_the_page:\n self.convert_product_tiles_from_this_page_to_items(page,\n product_category=response.meta[\"category_name\"],\n page_num=page_num)\n\n return None", "def _parse_result_page(self, page):\n items = []\n table = list(page.findall(\".//table[@id='browse']\"))[0]\n for row in (x for x in list(table.findall('tr'))[1:]\n if len(x.getchildren()) != 1):\n item = self._parse_item_row(row)\n items.append(item)\n return items", "def addWebPageListToLayout(self, gridLayout, startingRow):\n\n from consider.notifications import options\n\n webPages = self.model.getWebPages()\n\n if verbose:\n print('DEBUG: current web pages: ' + str(webPages))\n row = startingRow\n\n webPageLabel = QLabel('WebPage:')\n gridLayout.addWidget(webPageLabel, row, 0, 1, 4)\n\n clientLabel = QLabel('Client')\n gridLayout.addWidget(clientLabel, row, 4)\n emailLabel = QLabel('Email')\n gridLayout.addWidget(emailLabel, row, 5)\n #smsLabel = QLabel('SMS')\n #gridLayout.addWidget(smsLabel, row, 6)\n frequencyLabel = QLabel('Frequency')\n gridLayout.addWidget(frequencyLabel, row, 7)\n minWordLabel = QLabel('Sensitivity')\n gridLayout.addWidget(minWordLabel, row, 8)\n\n for webPage in webPages:\n row = row + 1\n linkLineEdit = QLineEdit(webPage)\n gridLayout.addWidget(linkLineEdit, row, 0, 1, 4)\n\n clientCheck = QCheckBox()\n if options.NOTIFICATION_TYPE_CLIENT in webPages[webPage].getNotificationTypes():\n clientCheck.setChecked(1)\n self.connect(clientCheck,\n SIGNAL('stateChanged(int)'),\n self.checkBoxHandlerBuilder(webPage, options.NOTIFICATION_TYPE_CLIENT))\n gridLayout.addWidget(clientCheck, row, 4)\n\n emailCheck = QCheckBox()\n if options.NOTIFICATION_TYPE_EMAIL in webPages[webPage].getNotificationTypes():\n emailCheck.setChecked(1)\n self.connect(emailCheck,\n SIGNAL('stateChanged(int)'),\n self.checkBoxHandlerBuilder(webPage, options.NOTIFICATION_TYPE_EMAIL))\n gridLayout.addWidget(emailCheck, row, 5)\n\n #smsCheck = QCheckBox()\n #if options.NOTIFICATION_TYPE_SMS in webPages[webPage].getNotificationTypes():\n # smsCheck.setChecked(1)\n #self.connect(smsCheck,\n # SIGNAL('stateChanged(int)'),\n # self.checkBoxHandlerBuilder(webPage, options.NOTIFICATION_TYPE_SMS))\n #gridLayout.addWidget(smsCheck, row, 6)\n\n frequencySlider = QSlider(Qt.Horizontal)\n frequencySlider.setTracking(False)\n frequencySlider.setMaximum(options.MAX_FREQUENCY)\n frequencySlider.setMinimum(options.MIN_FREQUENCY)\n frequencySlider.setValue(webPages[webPage].getFrequency())\n self.connect(frequencySlider, SIGNAL('valueChanged(int)'), self.sliderChangeBuilder(webPage) )\n gridLayout.addWidget(frequencySlider, row, 7)\n\n wordCountSpinBox = QSpinBox()\n wordCountSpinBox.setMinimum(options.MIN_WC_THRESHOLD)\n wordCountSpinBox.setMaximum(options.MAX_WC_THRESHOLD)\n wordCountSpinBox.setValue(webPages[webPage].getWCThreshold())\n self.connect(wordCountSpinBox, SIGNAL('valueChanged(int)'), self.spinboxChangeBuilder(webPage))\n gridLayout.addWidget(wordCountSpinBox, row, 8)\n\n removeButton = QPushButton('Remove')\n self.connect(removeButton, SIGNAL('clicked()'), self.removeWebPageBuilder((webPage)))\n gridLayout.addWidget(removeButton, row, 10)\n \n # add a blank line for adding new entries\n row = row + 1\n self.newWebPageLink = QLineEdit(\"<Location>\")\n gridLayout.addWidget(self.newWebPageLink, row, 0, 1, 4)\n # FIXME\n #clientCheck = QCheckBox()\n #gridLayout.addWidget(clientCheck, row, 2)\n #emailCheck = QCheckBox()\n #gridLayout.addWidget(emailCheck, row, 3)\n #smsCheck = QCheckBox()\n #gridLayout.addWidget(smsCheck, row, 4)\n\n addButton = QPushButton(\"Add\")\n self.connect(addButton, SIGNAL(\"clicked()\"), self.addNewWebPage)\n gridLayout.addWidget(addButton, row, 10)\n return row+1", "def _parse_node_layouts(self):\n\n # for layout in self.node_layout:\n # self._parse_node_layout(layout)\n\n # get a list containing a list of codes on the same node\n codes_on_node = []\n for layout in self.node_layout:\n codes_on_node.append(list(self._extract_codes_on_node(layout)))\n\n # check for dependencies and re-arrange codes in this list\n self._rearrange_codes_by_dependencies(codes_on_node)\n\n # Get num nodes required to run this layout\n for l in codes_on_node:\n if len(l) == 0:\n continue\n\n num_nodes_reqd_for_layout = max([code.nodes for code in l])\n\n # Ensure required nodes are available\n num_nodes_in_queue = len(list(self._nodes_assigned.queue))\n assert num_nodes_in_queue >= num_nodes_reqd_for_layout, \\\n \"Do not have sufficient nodes to run the layout. \" \\\n \"Need {}, found {}\".format(num_nodes_reqd_for_layout,\n num_nodes_in_queue)\n\n # Get a list of nodes required for this run\n nodes_assigned_to_layout = []\n for i in range(num_nodes_reqd_for_layout):\n nodes_assigned_to_layout.append(self._nodes_assigned.get())\n\n # Assign nodes to runs\n for run in l:\n run.nodes_assigned = nodes_assigned_to_layout", "def html_collect_pages(app):\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)", "def html_collect_pages(app):\n\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)", "def get_layout_code(self, obj):\n # called only from generate_code_ctor when creating a class constructor to get the last lines\n layout_lines = []\n self._reset_vars()\n\n self._prepare_tmpl_content(obj)\n for line in self.tmpl_layout:\n layout_lines.append(line % self.tmpl_dict)\n return layout_lines", "def _get_page_elements(self):\n from toolium.pageelements.page_element import PageElement\n from toolium.pageelements.page_elements import PageElements\n page_elements = []\n for attribute, value in list(self.__dict__.items()) + list(self.__class__.__dict__.items()):\n if attribute != 'parent' and (\n isinstance(value, PageElement) or isinstance(value, PageElements) or isinstance(value,\n PageObject)):\n page_elements.append(value)\n return page_elements", "def get_pages() -> [(str, str, int)]:\n\ttext = requests.get(url_pages).text\n\ttable = re.search(pat_program_table, text).group(1)\n\tpages = re.findall(pat_program_entry, table)[2:] # First 2 - table headers\n\treturn [get_page(x) for x in pages]", "def get_declared_items(self):\n for k, v in super(AndroidListView, self).get_declared_items():\n if k == 'layout':\n yield k, v\n break", "def content_pages(self):\n pages = []\n for block in self.contents: # pylint: disable=not-an-iterable\n if block.value:\n pages.append(block.value.specific)\n return pages", "def make_pages(src, dst, layout, **params):\n items = []\n\n for src_path in glob.glob(src):\n context = read_content(src_path)\n\n params.update(context)\n\n dst_path = render(dst, **params)\n output = render(context['content'], **params)\n\n # Add destination path to context\n context['dest_path'] = dst_path\n\n # Add item to list\n items.append(context)\n\n log('Rendering {} => {} ...', src_path, dst_path)\n fwrite(dst_path, output)\n\n return sorted(items, key=lambda x: x['date_ymd'], reverse=True)", "def layout (self, lydef):\n\n # Categorize files\n fout = self.categorize()\n\n ly = defaultdict(list)\n\n # For each template path, attempt to map all files in that category\n # and add any files that renders completely to the layout.\n for tmplsrc, category in lydef.items():\n tmpl = Template(tmplsrc)\n for a, f in fout[category]:\n # print('%s: Try %s matched to %s in %s' % (category, tmplsrc, f, a))\n try:\n path = os.path.join(tmpl.substitute(a.info),\n os.path.basename(f))\n ly[path].append((a, f))\n except KeyError as e:\n print(' -- %s info key %s not found' % (a, e))\n pass\n\n # Sort providing sources for each path.\n # E.g., prefer .redist. before .symbols., etc.\n for path in ly:\n ly[path].sort(reverse=True)\n\n return ly", "def _parse(self, tree):\n date_el = self.get_etree().xpath(DATE_XP)[0]\n self.date = date_el.attrib['value']\n self.year, self.month, self.day = self.date.split('-')\n self.date_text = date_el.text\n\n def resolve_type(element):\n return element.attrib.get('type', '').lower().strip('. ')\n\n def index_entity(nodes, model, article):\n for n in nodes:\n m = model(n, article)\n if m.ok:\n db.session.add(m)\n\n def get_html(article):\n return html.tostring(tei.build(etree.Element('article'), article))\n\n root = self.get_etree()\n for section in root.xpath('//div1'):\n section_type = resolve_type(section)\n if not section_type:\n continue\n for subsection in section.xpath('./div2'):\n subsection_type = resolve_type(subsection)\n if not subsection_type:\n continue\n for article in subsection.xpath('./div3'):\n article_type = resolve_type(article)\n if article_type == 'ad-blank':\n continue\n a = Article(issue_id=self.id,\n date=self.date,\n section_type=section_type,\n subsection_type=subsection_type,\n article_type=article_type,\n xpath=root.getpath(article),\n content=get_html(article))\n db.session.add(a)\n db.session.flush()\n index_entity(article.xpath('.//persName'), PersName, a)\n index_entity(article.xpath('.//placeName'), PlaceName, a)\n index_entity(article.xpath('.//orgName'), OrgName, a)\n index_entity(article.xpath('.//rs'), RefString, a)", "def parse_lit(self, lines):\n comment_char = \"#\" # TODO: move this into a directive option\n comment = re.compile(r\"^\\s*{}[ \\n]\".format(comment_char))\n section_test = lambda val: bool(comment.match(val))\n\n sections = []\n for is_doc, group in itertools.groupby(lines, section_test):\n if is_doc:\n text = [comment.sub(\"\", i).rstrip(\"\\r\\n\") for i in group]\n else:\n text = [i.rstrip(\"\\r\\n\") for i in group]\n\n sections.append((is_doc, text))\n\n return sections", "def _get_items_for_parsing(self):\n count_posts = self.posts_number if 0 < self.posts_number < self.COUNT_POSTS_MAX else self.COUNT_POSTS_MAX\n pastes_page_content = self._get_pastes_page_content()\n tree = html.fromstring(pastes_page_content)\n items = tree.xpath('//table[@class=\"maintable\"]/tr/td[1]/a')\n return items[:count_posts] or []", "def get_elements_from_page(pagetree, css):\n\n # Have to convert the CSS selectors to XPATH selectors (gross).\n try:\n expression = GenericTranslator().css_to_xpath(css)\n except SelectorError:\n print('Invalid selector.')\n return\n elements = pagetree.xpath(expression)\n return elements" ]
[ "0.7649959", "0.6409742", "0.602266", "0.5737034", "0.5713485", "0.5590701", "0.54194415", "0.5303942", "0.52360404", "0.5199061", "0.51894474", "0.51213837", "0.5121312", "0.5115619", "0.50663406", "0.5054951", "0.50322646", "0.4996954", "0.49940476", "0.4983458", "0.49749455", "0.49440476", "0.486984", "0.4809252", "0.4783271", "0.4774944", "0.4751364", "0.47215778", "0.47126082", "0.470881" ]
0.7622432
1
Computes where the first tensor is greater than the second tensor. This is an elementwise operation (with NumPystyle broadcasting support). See also
def greater(input: Tensor, other: Tensor) -> Tensor: ctx = get_current_context() g = ctx.graph pb_g = g._pb_graph check_in_graph(g, input=input, other=other) check_tensor_ipu_and_tile_set(input=input, other=other) settings = ctx._get_op_settings("greater") opid = _ir.OperatorIdentifier("ai.onnx", "Greater", 9, _ir.NumInputs(2, 2), 1) op = pb_g.createConnectedOp_GreaterOp( {0: input.id, 1: other.id}, { 0: g._create_tensor_id("greater_out"), }, opid, settings, ) return Tensor._from_pb_tensor(op.outTensor(0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def __gt__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__gt__\")", "def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2", "def gt(self, x, y):\n return self.lt(y,x)", "def gt (x,y):\n\n return not le(x,y)", "def __gt__(self, *args):\n return _ida_hexrays.cexpr_t___gt__(self, *args)", "def all_gt(self, other):\n return self.x > other.x and self.y > other.y", "def test_greater_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::ge\"},\n )", "def test_greater_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ge\"},\n )", "def greater_than(self) -> global___Expression:", "def __gt__(self, other):\n return self.weight() > other.weight()", "def __gt__(self, other):\n return self.element() > other.element()", "def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)", "def __gt__(self, other: Any) -> ColumnOperators:\n return self.operate(gt, other)", "def fp_gt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x > y", "def __gt__(self, *args):\n return _ida_hexrays.operand_locator_t___gt__(self, *args)", "def __gt__(self, *args):\n return _ida_hexrays.var_ref_t___gt__(self, *args)", "def __gt__(self,other):\r\n\t\tsorted_self = sorted(self.vector, reverse=True) #sort both lists in descending order\r\n\t\tsorted_other = sorted(other, reverse=True) \r\n\t\tcmpflag = False\r\n\t\tfor li1, li2 in zip(sorted_self, sorted_other):\r\n\t\t\tif(li1 > li2):\r\n\t\t\t\tcmpflag = True\r\n\t\treturn cmpflag", "def __gt__(self, other):\n return self.weight > other.weight", "def __gt__(self, other: t.Any) -> bool:\n return self._op_bool('__gt__', other)", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def greater(lhs, rhs):\n return _make.greater(lhs, rhs)", "def __gt__(self, other):\n return self.abs2phy.__gt__(other)", "def __gt__(self, other):\n return self.abs2phy.__gt__(other)", "def __gt__(self, *args):\n return _ida_hexrays.cif_t___gt__(self, *args)", "def __gt__(self, *args):\n return _ida_hexrays.vdloc_t___gt__(self, *args)", "def almostlte(a, b):\n return np.all(np.logical_or(a < b, almosteq(a, b)))", "def __gt__(self, *args):\n return _ida_hexrays.cdo_t___gt__(self, *args)" ]
[ "0.7161988", "0.69977444", "0.6803066", "0.6791493", "0.6791026", "0.67566764", "0.6724056", "0.6651434", "0.6625063", "0.66145337", "0.657495", "0.6460056", "0.64555115", "0.6449233", "0.64254844", "0.63984334", "0.636584", "0.63458794", "0.63412446", "0.63338196", "0.63262653", "0.63010025", "0.6292287", "0.62809694", "0.62775445", "0.62775445", "0.6230252", "0.62164146", "0.6198663", "0.6197115" ]
0.7294057
0
Get this project folder's name or None if it is the root project folder
def get_name(self): return self.client._perform_json("GET", "/project-folders/%s" % self.project_folder_id).get("name", None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_project_root_name():\n return get_project_root_name_from_settings(settings.PROJECT_ROOT)", "def base_name(self):\n return self._project.path", "def get_project_name(working_dir):\n return path.path(working_dir).name", "def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]", "def get_project_path(window: 'Any') -> 'Optional[str]':\n if not window:\n return None\n num_folders = len(window.folders())\n if num_folders == 0:\n return get_directory_name(window.active_view())\n elif num_folders == 1:\n folder_paths = window.folders()\n return folder_paths[0]\n else: # num_folders > 1\n return find_path_among_multi_folders(\n window.folders(),\n window.active_view())", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def get_project_root():\n return str(Path(__file__).parent.parent.parent.parent)", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def name(self):\n try:\n return self._name\n except AttributeError:\n if self.is_task:\n try:\n return self.pos_str\n except:\n return os.path.basename(self.workdir)\n else:\n return os.path.basename(self.workdir)", "def displayname(self):\n if self.path.is_dir():\n if (is_uuid(self.path.parts[-1])):\n self.is_uuid_folder = True\n return self.path.name + '/'\n elif is_proj(self.path.parts[-1]):\n return f'{bcolors.BOLD}' + self.path.name + f'{bcolors.ENDC}'\n return self.path.name", "def get_project_root(self):\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def infer_name(self):\n if CONFIG_KEY not in self:\n return\n if hasattr(self[CONFIG_KEY], \"name\"):\n if \" \" in self[CONFIG_KEY].name:\n raise InvalidConfigFileException(\n \"Specified Project name ({}) contains whitespace\".\n format(self[CONFIG_KEY].name))\n return self[CONFIG_KEY].name.replace(\" \", \"_\")\n if not self[CONFIG_FILE_KEY]:\n raise NotImplementedError(\"Project name inference isn't supported \"\n \"on a project that lacks a config file.\")\n config_folder = os.path.dirname(self[CONFIG_FILE_KEY])\n project_name = os.path.basename(config_folder)\n if project_name == METADATA_KEY:\n project_name = os.path.basename(os.path.dirname(config_folder))\n return project_name.replace(\" \", \"_\")", "def getRootName(self):\n return self.__rootName", "def full_name(self):\n if not self.project_id:\n raise ValueError('Missing project ID.')\n return 'projects/%s' % (self.project_id)", "def getProjectName():", "def _project_name(self):\n name = getattr(self._req.req, 'project_name', '')\n if name:\n return name\n raise ValueError('Requirement has no project_name.')", "def project_name(self):\n pass", "def project_root() -> Path:\n return PROJECT_ROOT", "def get_project_root():\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def folder(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder\")", "def folder(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder\")", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def get_project_name(self):\n remote = self.get_gitlab_remote()\n return self.get_project_name_from_url(remote.url)", "def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")" ]
[ "0.7680206", "0.7352743", "0.7339847", "0.73121136", "0.72225565", "0.70877147", "0.70877147", "0.70877147", "0.70877147", "0.70877147", "0.70877147", "0.7020423", "0.6999252", "0.6999252", "0.69466954", "0.6943411", "0.6913359", "0.6867396", "0.6861792", "0.6852675", "0.67820555", "0.6747206", "0.6737448", "0.6657743", "0.6654431", "0.6622563", "0.6622563", "0.65841836", "0.65833604", "0.65813917" ]
0.7708228
0
Get this project folder's parent or None if it is the root project folder
def get_parent(self): parent_id = self.client._perform_json("GET", "/project-folders/%s" % self.project_folder_id).get("parentId", None) if parent_id is None: return None else: return DSSProjectFolder(self.client, parent_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parent_folder(self):\n return self.properties.get(\"ParentFolder\",\n Folder(self.context, ResourcePath(\"ParentFolder\", self.resource_path)))", "def parent_dir(self):\n parent = os.path.dirname(self.dirn)\n if self.is_subdir:\n parent = os.path.basename(parent)\n else:\n if self.platform is not None and parent.endswith(self.platform):\n parent = parent[:-len(self.platform)].rstrip(os.sep)\n if self.year is not None and parent.endswith(str(year)):\n parent = parent[:-len(str(year))].rstrip(os.sep)\n return parent", "def parentOrThisDir(path):\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n return path", "def parent(self):\n if self._path == sep:\n return None\n elif self._parent is None:\n self._parent = Path(first(split(self._path)))\n return self._parent\n else:\n return self._parent", "def get_project_root():\n return str(Path(__file__).parent.parent.parent.parent)", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root(self):\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def get_project_root():\n return Path(__file__).parent.parent", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_root_folder() -> Path:\n return Path(__file__).parent.parent", "def getParentDirectory():\n path = os.path.dirname(os.path.realpath(__file__))\n path = '/'.join( path.split('/')[:-1] )\n return path", "def get_project_root() -> pl.Path:\n return pl.Path(__file__).parent.parent", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None", "def get_parent_dir(path):\n\n return os.path.abspath(os.path.join(path, os.pardir))", "def get_parent(path):\n\n # security check\n parent = os.path.dirname(path)\n\n try:\n get_abspath(parent)\n except:\n parent = ''\n\n return parent", "def _get_root(self):\n if not self:\n return False\n if self.parent_id:\n breadcrumb = self.parent_id.get_breadcrumb()\n if not breadcrumb:\n breadcrumb = [self.parent_id.read(['name', 'parent_id'])]\n project_id = breadcrumb[self.parent_id.id][0]['id']\n else:\n # if no parent, we are the project\n project_id = self.id\n return self.browse(project_id)", "def get_project_path(window: 'Any') -> 'Optional[str]':\n if not window:\n return None\n num_folders = len(window.folders())\n if num_folders == 0:\n return get_directory_name(window.active_view())\n elif num_folders == 1:\n folder_paths = window.folders()\n return folder_paths[0]\n else: # num_folders > 1\n return find_path_among_multi_folders(\n window.folders(),\n window.active_view())", "def get_parent_dir(path):\n return os.path.dirname(path)", "def get_project_root():\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def parent(self):\n return self if self.is_root else self.__parent", "def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret", "def get_project_dir():\n path = Path(__file__).parent.parent\n project_dir = path.parent\n return project_dir", "def parent(self):\n if self._parent is not None:\n return self._parent()\n else:\n return None", "def parent_dir_path(path):\n return absolute_path(os.path.dirname(path))", "def get_parent(self) :\n return self.parent", "def get_path(self):\n definition = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id)\n parent_id = definition.get(\"parentId\", None)\n if parent_id is not None:\n parent = DSSProjectFolder(self.client, parent_id)\n path = parent.get_path()\n return (\"\" if path == \"/\" else path) + \"/\" + definition.get(\"name\", \"\")\n else:\n return \"/\"" ]
[ "0.75956905", "0.7526333", "0.7380301", "0.7366877", "0.73337495", "0.7214397", "0.7214397", "0.7214397", "0.7214397", "0.72093064", "0.71505404", "0.7147413", "0.7147413", "0.7107515", "0.70951563", "0.7071745", "0.7060472", "0.70595", "0.70325863", "0.7026057", "0.70073944", "0.7003713", "0.69727683", "0.68969566", "0.6891563", "0.6888241", "0.68719476", "0.6810832", "0.6765934", "0.6759255" ]
0.8148282
0
List the child project folders inside this project folder
def list_child_folders(self): children = self.client._perform_json("GET", "/project-folders/%s" % self.project_folder_id).get("childrenIds", []) return [DSSProjectFolder(self.client, child) for child in children]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def list_dir(self, path):", "def list_projects(self) -> List['RadsProject']:\n ret = []\n base = self.fspath(\"projects\")\n for name in os.listdir(base):\n if os.path.isdir(f\"{base}/{name}/releases\"):\n ret.append(RadsProject(self, name))\n return ret", "def getChildren(self):\n return self.directories.values()", "def list(self):\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n print(os.path.join(root, name))", "def all_projects(self):\n projects_list = []\n for path in DAVOS_PROJECT_DIR.iterdir():\n if path.is_dir():\n projects_list.append(Project(path.name))\n return projects_list", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def get_parent_subfolders(self):\n return [x[0] for x in os.walk(self.parent_folder)]", "def _subdirectories(self):\n for o in os.listdir(self.directory):\n if os.path.isdir(os.path.join(self.directory, o)):\n yield os.path.join(self.directory, o)", "def list(self):\n for dir in subdirs('plugins'):\n print dir.replace('plugins/', '')", "def getImmediateSubdirectories(dir):", "def list_directory(project_tree, directory):\n _, subdirs, subfiles = next(project_tree.walk(directory.path))\n return DirectoryListing(directory,\n [Path(join(directory.path, subdir)) for subdir in subdirs\n if not subdir.startswith('.')],\n [Path(join(directory.path, subfile)) for subfile in subfiles])", "def _project_files(project_name, folder):\n _authenticate()\n if project_name.startswith(\"project-\"):\n project_id = project_name\n else:\n query = dxpy.api.system_find_projects({\"name\": project_name, \"level\": \"VIEW\"})\n if len(query[\"results\"]) == 1:\n project_id = query[\"results\"][0][\"id\"]\n else:\n raise ValueError(\"Did not find DNAnexus project %s: %s\" % (project_name, query))\n dx_proj = dxpy.get_handler(project_id)\n return _recursive_ls(dx_proj, project_name, folder)", "def get_dir_list(basepath):\n parent = ListDir(basepath=basepath)\n parent.contents = get_dir_list_recurse(basepath, parent=parent)\n return parent", "def list_dirs(self):\n return self.list_groups()", "def list(self):\n\n if self.isdir():\n from os import listdir\n\n return [u for e in listdir(self.fspath) for u in self.join(e).list()]\n\n else:\n return [self]", "def folder_name(self): \n folders = []\n for folder in self.folders:\n folders.append(folder)\n return folders", "def _get_project_names(self):\n for folder in os.listdir(self.root):\n if folder[0].isdigit():\n try:\n self.new_source_paths[folder]\n pass\n except:\n self.new_source_paths[folder] = {}\n return self.new_source_paths", "def listdirs(self):\n return self.list_groups()", "def get_child_folder_names(folder_path):\n folder_names_in_folder = []\n try:\n for f in listdir(folder_path):\n if '__pycache__' not in f and isdir(\"%s/%s\" %(folder_path,f)):\n folder_names_in_folder.append(f)\n except OSError as e:\n # error\n print(\"ERROR IN get_child_folder_names\")\n\n return folder_names_in_folder", "def project_root_files():\n return [\"parent_workflow.wdl\"]", "def subdir(self):", "def ListFolder(self, path): # real signature unknown; restored from __doc__\n pass", "def __gitSubmodulesList(self):\n self.vcs.gitSubmoduleList(self.project.getProjectPath())", "def test_get_children_project(self):\n children = self.project.get_children()\n self.assertEqual(children.count(), 0)", "def immediate_children( path ):\n assert( os.path.isdir( path ) )\n CMD = [ \"find\", path, \"-mindepth\", \"1\", \"-maxdepth\", \"1\" ]\n return [ x for x in run_cmd( CMD ).split( \"\\n\" ) if len( x ) > 0 ]", "def siblings(self):\r\n\r\n for build in glob1(self.parent_path, 'BUILD*'):\r\n if self.name != build and BuildFile._is_buildfile_name(build):\r\n siblingpath = os.path.join(os.path.dirname(self.relpath), build)\r\n if not os.path.isdir(os.path.join(self.root_dir, siblingpath)):\r\n yield BuildFile(self.root_dir, siblingpath)", "def load_project_structure(self, startpath, tree):\n from PyQt5.QtWidgets import QTreeWidgetItem\n from PyQt5.QtGui import QIcon\n\n for element in os.listdir(startpath):\n path_info = startpath + \"/\" + element\n parent_itm = QTreeWidgetItem(tree, [os.path.basename(element)])\n if os.path.isdir(path_info):\n self.load_project_structure(path_info, parent_itm)\n parent_itm.setIcon(0, QIcon(\"assets/folder.ico\"))\n else:\n parent_itm.setIcon(0, QIcon(\"assets/file.ico\"))", "def listdir(self, path):\n return os.listdir(path)", "def listFolders(folderRoot):\n return os.listdir(folderRoot)" ]
[ "0.671556", "0.65671426", "0.649489", "0.6452801", "0.6385048", "0.6361596", "0.6360185", "0.6353166", "0.6309465", "0.6256494", "0.62158364", "0.6205499", "0.61954165", "0.6167936", "0.61630815", "0.6134903", "0.61286485", "0.61226493", "0.60995126", "0.6084035", "0.6050062", "0.6046435", "0.60327184", "0.5987027", "0.59834415", "0.59784776", "0.5929292", "0.5928271", "0.5914303", "0.5899174" ]
0.7146115
0
List the project keys of the projects that are stored in this project folder
def list_project_keys(self): return self.client._perform_json("GET", "/project-folders/%s" % self.project_folder_id).get("projectKeys", [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keys_from_projects():\r\n\r\n returnkeys = set()\r\n for project in (self.project*self.suspend_default_keys) + self.temp_projects:\r\n if project in self.default_dict['projects'].get_all_projects():\r\n returnkeys = returnkeys.union(set(self.default_dict['projects']\r\n .get_default_keys(project=project)))\r\n return returnkeys", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def _get_project_names(self):\n for folder in os.listdir(self.root):\n if folder[0].isdigit():\n try:\n self.new_source_paths[folder]\n pass\n except:\n self.new_source_paths[folder] = {}\n return self.new_source_paths", "def search_key_for_project(project):\n elements = []\n elements.append(project['name'])\n elements.append(project['client'])\n elements.append(project['project_state'])\n elements.append(str(project['project_code']))\n return u' '.join(elements)", "def get_keys(self):\n return list(self.public_keys.keys())", "def list_projects(self):\n project_keys = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"projectKeys\", [])\n return [DSSProject(self.client, pkey) for pkey in project_keys]", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def keys(self):\r\n return keys.RepoKeys(self)", "def index(self):\n return {'projects': [p for p in self.server.projects.values()]}", "def ListKeys(project, show_deleted=None, page_size=None, limit=None):\n client = GetClientInstance(calliope_base.ReleaseTrack.GA)\n messages = client.MESSAGES_MODULE\n\n request = messages.ApikeysProjectsLocationsKeysListRequest(\n parent=GetParentResourceName(project), showDeleted=show_deleted)\n return list_pager.YieldFromList(\n client.projects_locations_keys,\n request,\n limit=limit,\n batch_size_attribute='pageSize',\n batch_size=page_size,\n field='keys')", "def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data", "def get_unique_project_list(self) -> List[str]:\n return self.tasks.get_project_list()", "def get_projects(self):\n return conf.projects", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]", "def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)", "def _keys(self):\n for name in listdir(abspath(self._path)):\n key, ext = splitext(name)\n if ext == \".pkl\":\n yield key", "def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])", "def get_all_keys(self):\n return self.psettings.allKeys()", "def keys(self) -> List[str]:\n raise NotImplementedError", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys", "def keys(self):\n return self.config.keys()", "def keys(self):\n return self.config.keys()", "def list_keystone_v3_projects(self):\n LOG_OBJ.debug(\"List the projects.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Projects list : %s \" % output)\n print (\"Projects list : %s \" % output)\n return output['projects']", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')" ]
[ "0.7168071", "0.7027032", "0.68906045", "0.6847208", "0.67857355", "0.6781415", "0.6719056", "0.66914505", "0.6626524", "0.66041535", "0.65820473", "0.6570286", "0.6526935", "0.65014863", "0.6491836", "0.64669544", "0.64484143", "0.6429212", "0.6370583", "0.6344642", "0.6315035", "0.62964165", "0.6270344", "0.62647617", "0.6253206", "0.62498873", "0.6249768", "0.6249768", "0.62271625", "0.621733" ]
0.8718406
0
Move this project folder into another project folder (aka. destination)
def move_to(self, destination): params = { "destination": destination.project_folder_id } self.client._perform_empty("POST", "/project-folders/%s/move" % self.project_folder_id, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_project_to(self, project_key, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/projects/%s/move\" % (self.project_folder_id, project_key), params=params)", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def move_from_temp_directory(self):", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)", "def moveUp():\r\n\tos.chdir(\"..\")", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def movedir(self):\n pass", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()", "def MovePath(options, src, dst):\n # if the destination is not an existing directory, then overwrite it\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n # If the destination exists, the remove it\n if os.path.exists(dst):\n if options.force:\n Remove(['-vfr', dst])\n if os.path.exists(dst):\n raise OSError('mv: FAILED TO REMOVE ' + dst)\n else:\n raise OSError('mv: already exists ' + dst)\n for _ in range(5):\n try:\n os.rename(src, dst)\n break\n except OSError as error:\n print('Failed on %s with %s, retrying' % (src, error))\n time.sleep(5)\n else:\n print('Gave up.')\n raise OSError('mv: ' + error)", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def move_file(source, destination):\n shutil.move(source, destination)", "def move(self, source, target, force=False):\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _move_folder():\n MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path),\n MoveCopyOptions(\n retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativePath\", _move_folder)\n return target_folder", "def copydir(self):\n pass", "def _move_self_to(self, new_dir=None, new_name=None):\n if self.is_downloaded:\n if new_dir and not new_name:\n shutil.move(self._download_path, os.path.join(new_dir, self.download_filename))\n elif new_name and not new_dir:\n shutil.move(self._download_path, os.path.join(self.download_dir, new_name))\n elif new_name and new_dir:\n shutil.move(self._download_path, os.path.join(new_dir, new_name))", "def mv(cur_path, new_path):\n cur_abs = navigate.get_abs_path(cur_path)\n new_abs = navigate.get_abs_path(new_path)\n cur_parent, cur_name = navigate.split_path(cur_abs)\n new_parent, new_name = navigate.split_path(new_abs)\n up_parent, up_name = navigate.split_path(new_parent)\n if not db.file_exists(cur_parent, cur_name):\n print \"Error: '\" + cur_name + \"' does not exist.\"\n elif up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + new_parent + \"' is not a valid directory.\"\n elif db.file_exists(new_parent, new_name):\n print \"Error: '\" + new_name + \"' already exists at that location.\"\n else:\n cur_dbox_path = '/' + cur_name\n new_dbox_path = '/' + new_name\n access_token = db.get_access_to_file(cur_parent, cur_name)\n client = dropbox.client.DropboxClient(access_token)\n client.file_move(cur_dbox_path, new_dbox_path)\n db.move_file(cur_parent, cur_name, new_parent, new_name)", "def moveTo(self, newFolder):\n moveURI = self.metaData.getLink(\"move\")\n parent = self.metaData.getLinkIndex('parent')\n\n assert parent != -1\n assert moveURI is not None\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your newFolder does not have a self link\")\n\n self.metaData.jsonObj['links'][parent] = {'href' : newFolder.selfLink, 'rel' : 'parent'}\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n response = self._adapter.putRequest(moveURI,header, json.dumps(self.metaData.jsonObj))\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n return Folder(self._client, newLink)", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def move_to_folder(self):\n if \"moveToFolder\" in self._prop_dict:\n return self._prop_dict[\"moveToFolder\"]\n else:\n return None", "def install(src, dest):\n shutil.move(src, dest)\n restorecon(dest, recursive=True)", "def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")" ]
[ "0.71520203", "0.68892497", "0.68195164", "0.669439", "0.65629345", "0.64931214", "0.6460968", "0.63149184", "0.6205377", "0.61995924", "0.6159439", "0.6138532", "0.6100024", "0.6050833", "0.6043683", "0.60310405", "0.60202485", "0.60058755", "0.59887636", "0.5975746", "0.5966222", "0.5959474", "0.5935062", "0.592487", "0.5913992", "0.5913191", "0.5908814", "0.5897646", "0.5852577", "0.58503586" ]
0.7494612
0
Move a project within this project folder into another project folder (aka. destination)
def move_project_to(self, project_key, destination): params = { "destination": destination.project_folder_id } self.client._perform_empty("POST", "/project-folders/%s/projects/%s/move" % (self.project_folder_id, project_key), params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_to(self, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/move\" % self.project_folder_id, params=params)", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def move_from_temp_directory(self):", "def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def move_image(src_project, img1, dest_project, img2):\n with BMI(_username, _password, src_project) as bmi:\n ret = bmi.move_image(img1, dest_project, img2)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move(self, source, target, force=False):\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def moveUp():\r\n\tos.chdir(\"..\")", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def copy_project(self, new_name, switch=True):\n if new_name in self:\n raise ValueError(\"Project {} already exists\".format(new_name))\n fp = self._base_data_dir / safe_filename(new_name, full=self.dataset.full_hash)\n if fp.exists():\n raise ValueError(\"Project directory already exists\")\n project_data = ProjectDataset.get(ProjectDataset.name == self.current).data\n ProjectDataset.create(\n data=project_data, name=new_name, full_hash=self.dataset.full_hash\n )\n shutil.copytree(self.dir, fp)\n create_dir(self._base_logs_dir / safe_filename(new_name))\n if switch:\n self.set_current(new_name)", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")", "def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)", "def upgrade_project(ctx, path):\n with ctx.cd(path):\n ctx.run(\"newt upgrade\")", "def test_replace_project(self):\n pass", "def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)", "def sync(local_dir, remote_dir):\n\n rsync_project(local_dir=local_dir, remote_dir=remote_dir, delete=True, exclude=['*.pyc', '*.log', '__pycache__', '.idea', '.DS_Store'])", "def move_file(source, destination):\n shutil.move(source, destination)", "def mv(cur_path, new_path):\n cur_abs = navigate.get_abs_path(cur_path)\n new_abs = navigate.get_abs_path(new_path)\n cur_parent, cur_name = navigate.split_path(cur_abs)\n new_parent, new_name = navigate.split_path(new_abs)\n up_parent, up_name = navigate.split_path(new_parent)\n if not db.file_exists(cur_parent, cur_name):\n print \"Error: '\" + cur_name + \"' does not exist.\"\n elif up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + new_parent + \"' is not a valid directory.\"\n elif db.file_exists(new_parent, new_name):\n print \"Error: '\" + new_name + \"' already exists at that location.\"\n else:\n cur_dbox_path = '/' + cur_name\n new_dbox_path = '/' + new_name\n access_token = db.get_access_to_file(cur_parent, cur_name)\n client = dropbox.client.DropboxClient(access_token)\n client.file_move(cur_dbox_path, new_dbox_path)\n db.move_file(cur_parent, cur_name, new_parent, new_name)", "def move_tasks_inner(source_project, source_section, target_project, target_section):\n source_project_id, source_project_name = source_project['id'], source_project['name']\n source_section_id, source_section_name = source_section['id'], source_section['name']\n target_project_id, target_project_name = target_project['id'], target_project['name']\n target_section_id, target_section_name = target_section['id'], target_section['name']\n\n source_tasks = get_paginated_json(f\"https://app.asana.com/api/1.0/sections/{source_section_id}/tasks\")\n\n if len(source_tasks) == 0:\n print(f\"no tasks to move in section {source_section_name} of project {source_project_name}\")\n\n for task in source_tasks:\n task_id = task['id']\n if source_project_id == target_project_id:\n print(f\"moving task {task_id} from {source_section_name} to {target_section_name} \"\n f\"within project {target_project_name}\", end=\"...\")\n else:\n print(f\"moving task {task_id} from {source_section_name} in {source_project_name} \"\n f\"to {target_section_name} in {target_project_name}\", end=\"...\")\n response = s.post(f\"https://app.asana.com/api/1.0/tasks/{task_id}/addProject\", data={\n \"project\": target_project_id, \"section\": target_section_id})\n if response.status_code != 200:\n print(f\"failed\")\n error = parse_asana_error_response(response)\n print(error, file=sys.stderr)\n sys.exit(1)\n else:\n print(f\"success!\")", "def MovePath(options, src, dst):\n # if the destination is not an existing directory, then overwrite it\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n # If the destination exists, the remove it\n if os.path.exists(dst):\n if options.force:\n Remove(['-vfr', dst])\n if os.path.exists(dst):\n raise OSError('mv: FAILED TO REMOVE ' + dst)\n else:\n raise OSError('mv: already exists ' + dst)\n for _ in range(5):\n try:\n os.rename(src, dst)\n break\n except OSError as error:\n print('Failed on %s with %s, retrying' % (src, error))\n time.sleep(5)\n else:\n print('Gave up.')\n raise OSError('mv: ' + error)", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)", "def move_files_into_build():\n build_root = os.path.join(template_path, 'build')\n create_python_package(build_root)\n\n build_buildbot = os.path.join(template_path, 'build', 'buildbot')\n create_python_package(build_buildbot)\n\n pythonify('runtests', [], ['build']) \n pythonify('master.cfg', ['buildbot'], ['build', 'buildbot'])" ]
[ "0.7171021", "0.63889736", "0.6316078", "0.621614", "0.6122555", "0.5999708", "0.5974252", "0.5811231", "0.5784794", "0.57436264", "0.5729", "0.5725763", "0.5717845", "0.5717237", "0.57152677", "0.57001674", "0.56600225", "0.5658027", "0.5654615", "0.5640274", "0.5616604", "0.55971414", "0.5588295", "0.55794764", "0.5572186", "0.55521476", "0.5544307", "0.5538686", "0.5538435", "0.55102396" ]
0.7311976
0
Gets all settings as a raw dictionary. This returns a reference to the raw retrieved settings, not a copy, so changes made to the returned object will be reflected when saving.
def get_raw(self): return self.settings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_values(self):\n rv = {}\n for setting in self.manager.all():\n rv[setting.name] = setting.to_dict()\n return rv", "def get_settings(self):\n\n\t\t# TODO: Consider YAML. Human writable, machine readable.\n\t\twith open(self.filename) as fp:\n\t\t\ttry:\n\t\t\t\treturn json.load(fp)\n\t\t\texcept Exception, e:\n\t\t\t\tif self.DEBUG:\n\t\t\t\t\tprint >>sys.stderr, 'get_settings exception:', e\n\t\t\t\treturn {}", "def get_settings(self):\n return self.settings", "def settings(self):\r\n return settings.Settings(self)", "def settings(self):\n return {}", "def settings(self) -> BaseSettings:\n return self._context.settings", "def settings(self) -> BaseSettings:\n return self._context.settings", "def settings(self) -> Dict[str, Any]:\n return {}", "def settings(self):\n return self._settings", "def settings(self):\n return self._settings", "def get_settings(self):\n return {\n \"game_name\": self.game_name,\n \"n_epochs\": self.n_epochs,\n \"n_episodes\": self.n_episodes,\n \"n_frames\": self.n_frames,\n \"agent\": self.agent.get_settings(),\n \"results_dir\": self.results_dir,\n \"use_minimal_action_set\": self.use_minimal_action_set,\n }", "def get_values(self):\n self.active_changes = False # (flag) Once changes are retrieved, we assume that they will be sent to the controller\n return self.settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def current_settings(self):\n return {\n 'power_state': self.power_state,\n 'brightness': self.brightness,\n }", "def get_settings():\n return db.get_data()", "def get_settings():\n return SettingCollection.build()", "def get_settings(self):\n url = \"https://api.imgur.com/3/account/{0}/settings\".format(self.name)\n return self._imgur._send_request(url)", "def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def allValues(cls, user=None, exclude_hidden=False):\n results = cls.objects.all()\n\n # Optionally filter by user\n if user is not None:\n results = results.filter(user=user)\n\n # Query the database\n settings = {}\n\n for setting in results:\n if setting.key:\n settings[setting.key.upper()] = setting.value\n\n # Specify any \"default\" values which are not in the database\n for key in cls.SETTINGS.keys():\n\n if key.upper() not in settings:\n settings[key.upper()] = cls.get_setting_default(key)\n\n if exclude_hidden:\n hidden = cls.SETTINGS[key].get('hidden', False)\n\n if hidden:\n # Remove hidden items\n del settings[key.upper()]\n\n for key, value in settings.items():\n validator = cls.get_setting_validator(key)\n\n if cls.is_protected(key):\n value = '***'\n elif cls.validator_is_bool(validator):\n value = InvenTree.helpers.str2bool(value)\n elif cls.validator_is_int(validator):\n try:\n value = int(value)\n except ValueError:\n value = cls.get_setting_default(key)\n\n settings[key] = value\n\n return settings", "def getSettings(self):\n return self.cfg", "def settings(self) -> Any:\n self.ensure_initialized()\n return SettingsItem(self._data, self, FragmentPath())", "def __getSettingsFromStorage():\n return AccountSettings.getSettings(NEW_SETTINGS_COUNTER)", "def as_dict(self) -> dict:\n return self._config", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def settings(self):\n if self._settings is not None:\n return self._settings\n\n settings = self.binaries['KeeAgent.settings'].content\n self._settings = objectify.fromstring(settings)\n return self._settings", "def cont_settings_(request):\n \n return {\"settings\": settings}", "def get_settings():\n with open('config/config.json') as data_file:\n settings = json.load(data_file)\n return settings", "def settings_global(self) -> api.SettingsGlobal:\n return self._get_model(model=api.SettingsGlobal)", "def settings() -> Settings:\n return Settings()" ]
[ "0.72432184", "0.6985964", "0.6944899", "0.6929001", "0.69271636", "0.6859243", "0.6859243", "0.68463105", "0.6821209", "0.6821209", "0.6763247", "0.67585516", "0.6684184", "0.6684184", "0.6682379", "0.668028", "0.66603684", "0.66174513", "0.6500105", "0.6481581", "0.6417557", "0.6377056", "0.6365384", "0.6334218", "0.6322007", "0.6272341", "0.62613046", "0.61969554", "0.6196479", "0.6192799" ]
0.78474265
1
Set the owner of the project folder
def set_owner(self, owner): self.settings["owner"] = owner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_owner(self, owner):\n self.__owner = owner", "def set_ownership(self):\n\n os.chmod(os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id), 0600)", "def set_file_owner(host, fqpath, user):\n command = \"chown %s %s\" % (user, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chown failed: %s' % rerr)\n return False", "def owner(self, owner: str):\n\n self._owner = owner", "def chown(self, path, owner=None, group=None):\n kwargs = {}\n if owner is not None:\n kwargs[\"owner\"] = owner\n if group is not None:\n kwargs[\"group\"] = group\n self._call(\"SETOWNER\", method=\"put\", path=path, **kwargs)", "def test_owner(self):\n self.assertIsNone(self.env.project_repo_owner)", "def changeOwn():\n os.system('sudo chown -R test:users /etc/resolv.conf')\n os.system('sudo chown -R test:named /etc/named.conf')", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def set_object_owner(self, bucket_name, object_name, uid, gid):\n\n return h3lib.set_object_owner(self._handle, bucket_name, object_name, uid, gid, self._user_id)", "def changeOwnership(self, document):\n document.changeOwnership(getSecurityManager().getUser(), False)", "def set_owner(plugin_id, username, logger, client):\n plugin = client.plugins.set_owner(plugin_id, username)\n logger.info('Plugin `%s` is now owned by user `%s`.',\n plugin_id, plugin.get('created_by'))", "def setOwnerPassword(self,value):\n self.PDFreactorConfiguration.in1[\"ownerPassword\"] = value", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "def get_owner(self):\n return self.settings.get(\"owner\", None)", "def owner(self, owner):\n if self.local_vars_configuration.client_side_validation and owner is None: # noqa: E501\n raise ValueError(\"Invalid value for `owner`, must not be `None`\") # noqa: E501\n\n self._owner = owner", "def make_project_creator_an_owner(\n sender, instance: Project, created: bool, *args, **kwargs\n):\n if sender is Project and created and instance.creator:\n ProjectAgent.objects.create(\n project=instance, user=instance.creator, role=ProjectRole.OWNER.name\n )", "def set_file_owner_perm(path, permission, user, group):\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n current_perm = get_permissions(path)\n try:\n logger.debug('Current permission: {0}, changing to {1}'.format(current_perm, oct(permission)))\n os.chmod(path, permission)\n os.chown(path, uid, gid)\n except Exception as e:\n logger.warning('Unable to change permissions on {0}: {1}'.format(path, e))", "def test_patch_project_owner(self):\n new_owner = self.make_user('new_owner')\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'owner': str(new_owner.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "def chown_dir ( self, fspath ):\n return", "def chown_file(filename, file_owner, sudo=True):\n LOG.info(\"Changing the user that owns {}\".format(filename))\n cmd = \"chown {} {}\".format(file_owner, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)" ]
[ "0.70010644", "0.67605895", "0.6558529", "0.65345246", "0.64300406", "0.6328601", "0.6328463", "0.6319", "0.6319", "0.6319", "0.6319", "0.62747085", "0.6262846", "0.616684", "0.616684", "0.616684", "0.616684", "0.6150944", "0.61200434", "0.6045264", "0.60192376", "0.5943294", "0.5889295", "0.581812", "0.57970184", "0.5778253", "0.57749593", "0.5758468", "0.57304543", "0.5678243" ]
0.7705122
0
Test case for list_supported_assets
def test_list_supported_assets(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_system_assets(self):\n pass", "def test_get_test_assets(self):\n pass", "def test_list_dependent_assets(self):\n pass", "def test_list_dependent_assets3(self):\n pass", "def test_list_dependent_assets2(self):\n pass", "def test_list_dependent_assets1(self):\n pass", "def test_get_container_assets(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_get_test_assets_expanded(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_get_test_asset(self):\n pass", "def get_available_data_asset_names(self) -> List[str]:\n raise NotImplementedError", "def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}", "def test_import_software_asset(self):\n pass", "def get_selected_assets(self):\n raise NotImplementedError", "def test_import_system_asset(self):\n pass", "def ListAssets(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def resources():\n check_resources()", "def test_get_software_asset_bundle_expanded(self):\n pass", "def list_assets(request):\n user_assets = Asset.objects.filter(user=request.user, deleted=False).all()\n\n json_assets = ASSET_LIST_RESOURCE.to_json(dict(\n user_id=request.user.id,\n next_page_token=uuid.uuid4(),\n assets=user_assets\n ))\n request_format = request.GET.get('format', '')\n if request_format.lower() == 'json':\n return partial_json_response(request, json_assets)\n else:\n render_data = {'resource': json.dumps(json_assets)}\n render_data.update(csrf(request))\n return render('index.html', render_data)", "def supported():\n return os.path.isfile(OPENCOR)", "def test_list_image(self):\n pass", "def test_available_output_formats():\n assert set([\"GTiff\", \"PNG\", \"PNG_hillshade\", \"GeoJSON\"]).issubset(\n set(available_output_formats())\n )", "def test_create_system_asset(self):\n pass", "def test_itar_restrict_software_asset(self):\n pass", "def check_all_renderer_specific_textures(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n excluded_extensions = [\".ptex\"]\n\n renderer_texture_extensions = {\"arnold\": \".tx\", \"redshift\": \".rstexbin\"}\n default_renderer = \"redshift\"\n\n # set the default extension to default_renderer\n current_renderer = pm.PyNode(\"defaultRenderGlobals\").currentRenderer.get()\n current_renderer_texture_extension = renderer_texture_extensions.get(\n current_renderer, renderer_texture_extensions[default_renderer]\n )\n\n # For Redshift skip generation of the texture files, Redshift generates and\n # stores them automatically on the render machine.\n if current_renderer == \"redshift\":\n progress_controller.complete()\n return\n\n v = staging.get(\"version\")\n if v and Representation.repr_separator in v.take_name:\n progress_controller.complete()\n return\n\n texture_file_paths = []\n workspace_path = pm.workspace.path\n\n def add_path(path):\n if path != \"\":\n path = os.path.expandvars(path)\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(workspace_path, path))\n texture_file_paths.append(path)\n\n maya_version = int(pm.about(v=1))\n all_file_nodes = pm.ls(type=\"file\")\n all_ai_image_nodes = pm.ls(type=\"aiImage\")\n\n progress_controller.maximum = len(all_file_nodes) + len(all_ai_image_nodes)\n\n for node in all_file_nodes:\n if maya_version <= 2014:\n file_path = node.fileTextureName.get()\n else:\n file_path = node.computedFileTextureNamePattern.get()\n\n if os.path.splitext(file_path)[-1] not in excluded_extensions:\n add_path(file_path)\n progress_controller.increment()\n\n for node in all_ai_image_nodes:\n file_path = node.filename.get()\n if os.path.splitext(file_path)[-1] not in excluded_extensions:\n add_path(file_path)\n progress_controller.increment()\n\n import glob\n\n textures_with_no_tx = []\n\n # add more iterations to progress_controller\n progress_controller.maximum += len(texture_file_paths)\n for path in texture_file_paths:\n expanded_path = (\n path.replace(\"<udim>\", \"*\")\n .replace(\"<UDIM>\", \"*\")\n .replace(\"<U>\", \"*\")\n .replace(\"<V>\", \"*\")\n )\n textures_found_on_path = glob.glob(expanded_path)\n\n for orig_texture_path in textures_found_on_path:\n # now check if there is a .tx for this texture\n bin_texture_path = \"%s%s\" % (\n os.path.splitext(orig_texture_path)[0],\n current_renderer_texture_extension,\n )\n\n if not os.path.exists(bin_texture_path):\n textures_with_no_tx.append(orig_texture_path)\n progress_controller.increment()\n\n # add event more steps to progress_controller\n number_of_textures_to_process = len(textures_with_no_tx)\n progress_controller.maximum += number_of_textures_to_process\n if number_of_textures_to_process:\n for path in textures_with_no_tx:\n print(path)\n progress_controller.complete()\n raise PublishError(\n \"There are textures with no <b>%s</b> file!!!<br><br>\"\n \"%s\"\n % (\n current_renderer_texture_extension.upper(),\n \"<br>\".join(textures_with_no_tx),\n )\n )\n progress_controller.complete()", "def test_get_existing_archives(self):\n support = saltsupport.SaltSupportModule()\n out = support.archives()\n assert len(out) == 3\n for name in [\n \"/mnt/storage/one-support-000-000.bz2\",\n \"/mnt/storage/two-support-111-111.bz2\",\n \"/mnt/storage/000-support-000-000.bz2\",\n ]:\n assert name in out", "def test_itar_restrict_asset(self):\n pass", "def asset_checker(self, assets):\n self.logger.debug(\"Checking assets: '%s'\", assets)\n assets = assets.split(\",\")\n reference = self.get_assets()\n for asset in assets:\n if asset in reference:\n pass\n else:\n raise InvalidAssetError(\"Invalid asset: '{}'\".format(asset))", "def assets():\n pass" ]
[ "0.7656596", "0.6691637", "0.65236694", "0.6175305", "0.6162377", "0.5957903", "0.5940173", "0.5921987", "0.5883309", "0.5817313", "0.5779641", "0.5772436", "0.5749467", "0.5739388", "0.57346356", "0.57313186", "0.55638844", "0.55423427", "0.5535344", "0.55265224", "0.5520921", "0.5510388", "0.54993695", "0.5495025", "0.54922086", "0.5484107", "0.5450841", "0.54293656", "0.54271805", "0.54080456" ]
0.9424378
0
Determine if a value is a sequence type.
def is_sequence(value): return (hasattr(value, "__iter__") and not isinstance(value, (six.string_types, six.binary_type)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_sequence_type(self):\n raise exceptions.NotImplementedError()", "def is_sequence(self) -> bool:\n return isinstance(self.yaml_node, yaml.SequenceNode)", "def isSequence(obj):\n # type: (Any) -> bool\n return isinstance(obj, Sequence)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def is_seq_of(seq, expected_type, seq_type=None):\n if seq_type is None:\n exp_seq_type = abc.Sequence\n else:\n assert isinstance(seq_type, type)\n exp_seq_type = seq_type\n if not isinstance(seq, exp_seq_type):\n return False\n for item in seq:\n if not isinstance(item, expected_type):\n return False\n return True", "def _is_sequence(obj):\n return hasattr(obj, \"__iter__\") and not isinstance(obj, str)", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def _is_sequence_like(self, data):\n return hasattr(data, \"__iter__\") and hasattr(data, \"__getitem__\")", "def issequence(obj) -> bool:\n return hasattr(type(obj), '__iter__') and hasattr(type(obj), '__len__')", "def is_sequence(arg):\n return (not hasattr(arg, \"strip\") and\n hasattr(arg, \"__getitem__\") or\n hasattr(arg, \"__iter__\"))", "def is_sequence(arg):\n\n # np.float{16,32,64} and np.int types have __getitem__ defined\n # this is a long-standing bug in NumPy and unlikely to be fixed\n # todo: backport to qmmlpack, write tests\n if isinstance(arg, (str, bytes, np.number, dict, set)):\n return False\n\n return hasattr(arg, \"__getitem__\") or hasattr(arg, \"__iter__\")", "def sequence_type(self) -> str:\n raise NotImplementedError()", "def is_sequence(x):\n return (not hasattr(x, 'strip') and\n hasattr(x, '__getitem__') or\n hasattr(x, '__iter__'))", "def is_generator_or_sequence(x):\n builtin_iterators = (str, list, tuple, dict, set, frozenset)\n if isinstance(x, (tensor.Tensor, np.ndarray) + builtin_iterators):\n return False\n return (tf_inspect.isgenerator(x) or\n isinstance(x, Sequence) or\n isinstance(x, typing.Iterator))", "def validate_sequence(outcome):\n from collections.abc import Sequence\n if not isinstance(outcome, Sequence):\n raise ditException('Outcome class is not a sequence.')\n else:\n return True", "def is_refseq(val):\n return refseq_regexp.match(val)", "def _check_sequence(self) -> PossibleResult[T]:\n if isinstance(self.constructor_origin, type) and issubclass(\n self.constructor_origin, Sequence\n ):\n if not isinstance(self.obj, Sequence):\n raise DeserializeError(\n Sequence, self.obj, self.new_depth, self.key\n )\n if self.constructor_args:\n _arg = self.constructor_args[0]\n else:\n _arg = Any # type: ignore\n return self.constructor_origin(\n Deserialize(\n obj=value,\n constructor=_arg,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n ).run()\n for value in self.obj\n ) # type: ignore\n return NO_RESULT", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def is_type(value):\n if isinstance(value, type):\n return issubclass(value, Type)\n return isinstance(value, Type)", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def is_tuple_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=tuple)", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def is_sequence_of_list(items):\n return all(isinstance(item, list) for item in items)", "def checkType(self, value):\n pass", "def require_sequence(self) -> None:\n if not isinstance(self.yaml_node, yaml.SequenceNode):\n raise RecognitionError('A sequence is required here')", "def has_acceptable_type(self, value):\n if not value:\n return False\n if super().has_acceptable_type(value):\n return True\n # Hmmm ok maybe we're running under IPython:\n try:\n import IPython\n return isinstance(value, IPython.kernel.zmq.iostream.OutStream)\n except ImportError:\n return False", "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "def _validate_value_type(value: Any, expected: Sequence[Type]) -> bool:\n\n for entry in expected:\n if get_origin(entry) is None:\n if type(value) == entry: # pylint: disable=unidiomatic-typecheck\n return True\n continue\n if _validate_value_type(value, get_args(entry)):\n return True\n return False", "def is_str_or_coll(value):\n return bool(is_str(value)) or bool(is_tuple_or_list(value))", "def validate(self,value):\r\n return type(value) is self.datatype" ]
[ "0.7498325", "0.72475845", "0.7201947", "0.7077905", "0.68908113", "0.6705126", "0.6571976", "0.6474664", "0.64672744", "0.6388015", "0.6350229", "0.63393205", "0.6258271", "0.6190354", "0.6108412", "0.60880846", "0.6007587", "0.59494674", "0.5857607", "0.5830456", "0.5734059", "0.57093364", "0.56954277", "0.5694758", "0.567189", "0.5651999", "0.56346965", "0.56117123", "0.5597521", "0.55925554" ]
0.7734437
0
Import the class referred to by the fully qualified class path.
def import_class(classpath): modname, classname = classpath.rsplit(".", 1) module = importlib.import_module(modname) klass = getattr(module, classname) return klass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_class(path):\n components = path.split(\".\")\n module = components[:-1]\n module = \".\".join(module)\n # __import__ needs a native str() on py2\n mod = __import__(module, fromlist=[str(components[-1])])\n return getattr(mod, str(components[-1]))", "def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)", "def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ImportError, ValueError, AttributeError), exc:\n logging.debug('Inner Exception: %s', exc)\n raise", "def importClass(class_name, module_name, module_path):\n spec = importlib.util.spec_from_file_location(\n module_name, module_path, submodule_search_locations=[])\n module = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = module\n spec.loader.exec_module(module)\n importlib.invalidate_caches()\n single_class = getattr(module, class_name)\n return single_class", "def load_class(\n fully_qualified_class_name: str\n):\n\n (module_name, fully_qualified_class_name) = fully_qualified_class_name.rsplit('.', 1)\n module_ref = importlib.import_module(module_name)\n class_ref = getattr(module_ref, fully_qualified_class_name)\n\n return class_ref", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n try:\r\n return getattr(sys.modules[mod_str], class_str)\r\n except AttributeError:\r\n raise ImportError('Class %s cannot be found (%s)' %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n try:\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)\r\n except (ValueError, AttributeError):\r\n raise ImportError(_('Class %s cannot be found (%s)') %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))", "def import_class_from_module_path(path, class_name):\n try:\n module_ = SourceFileLoader('', path).load_module()\n return getattr(module_, class_name)\n except FileNotFoundError:\n raise FileNotFoundError(\"%s not found\" % path)\n except AttributeError:\n raise AttributeError(\"%s class not found in %s\" % (class_name, path))", "def importClass(importStr):\n moduleStr, _sep, classStr = importStr.rpartition(\".\")\n \n try:\n __import__(moduleStr)\n return getattr(sys.modules[moduleStr], classStr)\n except (ValueError, AttributeError):\n raise ImportError(\"Class %s cannot be found (%s)\" %\n (classStr, traceback.format_exception(*sys.exc_info())))", "def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ValueError, AttributeError):\n raise ImportError('Class %s cannot be found (%s).' %\n (class_str,\n traceback.format_exception(*sys.exc_info())))", "def import_class(import_path, setting_name=None):\n mod_name, class_name = import_path.rsplit('.', 1)\n\n # import module\n mod = _import_module(mod_name, classnames=(class_name,))\n if mod is not None:\n # Loaded module, get attribute\n try:\n return getattr(mod, class_name)\n except AttributeError:\n pass\n\n # For ImportError and AttributeError, raise the same exception.\n if setting_name:\n raise ImproperlyConfigured(\"{0} does not point to an existing class: {1}\".format(setting_name, import_path))\n else:\n raise ImproperlyConfigured(\"Class not found: {0}\".format(import_path))", "def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c", "def _import(module, cls):\n global Scanner\n\n try:\n cls = str(cls)\n mod = __import__(str(module), globals(), locals(), [cls], 1)\n Scanner = getattr(mod, cls)\n except ImportError:\n pass", "def import_class(implementation_filename, base_class):\n\n\n impl_dir, impl_filename = os.path.split(implementation_filename)\n module_name, _ = os.path.splitext(impl_filename)\n\n try:\n sys.path.insert(0, impl_dir)\n fp, filename, description = imp.find_module(module_name)\n module = imp.load_module(module_name, fp, filename, description)\n logging.debug(f\"trying to import fp {fp} \"\n f\" filename {filename} \"\n f\" description {description} \")\n for name in dir(module):\n logging.debug(f\"name {name}\")\n obj = getattr(module, name)\n logging.debug(f\"obj {obj}\")\n try:\n if (type(obj) == type(base_class)\n and issubclass(obj, base_class)\n and obj != base_class):\n return obj\n\n except TypeError as excpt:\n \"\"\" issubclass will throw TypeError for some imports \"\"\"\n logging.debug(f\"caught {excpt}\")\n\n raise ValueError(\"No subclass of {0} in {1}\".format(\n base_class.__name__, implementation_filename))\n\n finally:\n sys.path.pop(0)", "def dynamic_import_from(source_file: str, class_name: str) -> Any:\n module = importlib.import_module(source_file)\n return getattr(module, class_name)", "def AddImport(self, fully_qualified_class):\n if fully_qualified_class.startswith(self._JAVA_LANG_IMPORT):\n # Returning True because it is imported by default.\n return True\n\n # check to see if it already exists\n class_name = self.GetClassName(fully_qualified_class)\n current_import = self._class_name_to_qualified_name.get(\n class_name)\n if current_import:\n return current_import == fully_qualified_class\n\n if fully_qualified_class.startswith('com.google.'):\n self._google_imports.add(fully_qualified_class)\n elif fully_qualified_class.startswith('java.'):\n self._java_imports.add(fully_qualified_class)\n else:\n self._other_imports.add(fully_qualified_class)\n # Now add it to the map\n self._class_name_to_qualified_name[class_name] = fully_qualified_class\n return True", "def of_import(module = None, classname = None, country = None):\n \n if module is None:\n module_str = \"\"\n else:\n module_str = \".\" + module\n \n if classname is None or country is None:\n raise Exception(\"classname or country needed\")\n \n _temp = __import__('src.countries.' + country + module_str, globals = globals(), locals = locals(), fromlist = [classname], level=-1)\n\n \n return getattr(_temp, classname, None)", "def import_from_string(import_path: str) -> Any:\n\n import_classname = import_path.split(\".\")[-1]\n import_module = \".\".join(import_path.split(\".\")[:-1])\n\n module = importlib.import_module(import_module)\n return getattr(module, import_classname)", "def load_class(path):\r\n\r\n mod_name, klass_name = path.rsplit('.', 1)\r\n\r\n try:\r\n mod = import_module(mod_name)\r\n except AttributeError as e:\r\n raise ImproperlyConfigured('Error importing {0}: \"{1}\"'.format(mod_name, e))\r\n\r\n try:\r\n klass = getattr(mod, klass_name)\r\n except AttributeError:\r\n raise ImproperlyConfigured('Module \"{0}\" does not define a \"{1}\" class'.format(mod_name, klass_name))\r\n\r\n return klass", "def load_class(full_class_string):\r\n class_data = full_class_string.split(\".\")\r\n module_path = \".\".join(class_data[:-1])\r\n class_str = class_data[-1]\r\n module = importlib.import_module(module_path)\r\n return getattr(module, class_str)", "def require(path,className=None):\n (dirname, basename) = os.path.split(path)\n packageName = dirname.replace('/','.')\n moduleName = basename.rstrip('.py')\n\n logging.getLogger().debug(\"Loading: %s.%s[%s]\" %(packageName,moduleName,className))\n\n mod = __import__(packageName+'.'+moduleName, globals(), locals(), [className])\n if className:\n return getattr(mod, className)\n\n return mod", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = importlib.import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n ) from err", "def get_class_import_name(name):\n name = _strip_class_name(name)\n return name", "def class_from_class_path(class_path):\n if class_path not in _CLASS_PATH_TO_CLASS_CACHE:\n module_name, class_name = class_path.rsplit('.', 1)\n m = importlib.import_module(module_name)\n c = getattr(m, class_name)\n _CLASS_PATH_TO_CLASS_CACHE[class_path] = c\n\n return _CLASS_PATH_TO_CLASS_CACHE[class_path]", "def load_class(full_class_string):\n\n class_data = full_class_string.split(\".\")\n module_path = \".\".join(class_data[:-1])\n class_str = class_data[-1]\n\n module = importlib.import_module(module_path)\n # Finally, we retrieve the Class\n return getattr(module, class_str)", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)) from err", "def load_class(full_class_name):\n\n last_dot = full_class_name.rfind('.')\n\n if last_dot == -1:\n message = (\"We require at least two dot-separated components in the \"\n \"class-name [%s].\" % (full_class_name))\n\n logging.exception(message)\n raise Exception(message)\n \n module_name = full_class_name[:last_dot]\n class_name = full_class_name[last_dot + 1:]\n\n logging.debug(\"Loading class [%s] from module [%s].\" % (class_name, \n module_name))\n\n try:\n module = importlib.import_module(module_name)\n except:\n logging.exception(\"Could not import module [%s].\" % (module_name))\n raise\n\n try:\n return module.__dict__[class_name]\n except:\n logging.exception(\"Class [%s] does not exist in module [%s].\" % \n (class_name, module_name))\n raise", "def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)", "def __init__(self, module_name,class_name):\n\n try:\n self.module = importlib.import_module(module_name)\n self.get_class_object = getattr(self.module,class_name)\n \n except:\n print(\"Failed to import the module {} from {}\".format(class_name,module_name))" ]
[ "0.7455476", "0.69682497", "0.68978304", "0.68467927", "0.6829854", "0.68212545", "0.6801098", "0.6758792", "0.67015433", "0.66877824", "0.66808754", "0.66743", "0.66590977", "0.64971083", "0.6385219", "0.63572675", "0.63111407", "0.6289722", "0.6236301", "0.6071559", "0.60609794", "0.60318047", "0.60105973", "0.5970172", "0.59476006", "0.594525", "0.5926746", "0.58977544", "0.58788216", "0.5801363" ]
0.70014346
1
Attempt to return a Python class for the input class reference. If `classref` is a class or None, return it. If `classref` is a python classpath (e.g., "foo.bar.MyClass") import the class and return it.
def resolve_class(classref): if classref is None: return None elif isinstance(classref, six.class_types): return classref elif isinstance(classref, six.string_types): return import_class(classref) else: raise ValueError("Unable to resolve class for '%s'" % classref)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def class_from_class_path(class_path):\n if class_path not in _CLASS_PATH_TO_CLASS_CACHE:\n module_name, class_name = class_path.rsplit('.', 1)\n m = importlib.import_module(module_name)\n c = getattr(m, class_name)\n _CLASS_PATH_TO_CLASS_CACHE[class_path] = c\n\n return _CLASS_PATH_TO_CLASS_CACHE[class_path]", "def load_class(\n fully_qualified_class_name: str\n):\n\n (module_name, fully_qualified_class_name) = fully_qualified_class_name.rsplit('.', 1)\n module_ref = importlib.import_module(module_name)\n class_ref = getattr(module_ref, fully_qualified_class_name)\n\n return class_ref", "def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)", "def get_cls(module_name, class_name, relaxed=True):\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n if relaxed:\n return None\n else:\n raise ImportError(\"Cannot load module: %s\" % module_name)\n try:\n return getattr(module, class_name)\n except AttributeError:\n if relaxed:\n return None\n else:\n raise NotImplementedError(\"Cannot load class: %s.%s\" % (module_name, class_name))", "def get_class(class_name, module_paths=None):\n class_ = locate(class_name)\n if (class_ is None) and (module_paths is not None):\n for module_path in module_paths:\n class_ = locate('.'.join([module_path, class_name]))\n if class_ is not None:\n break\n\n if class_ is None:\n raise ValueError(\n \"Class not found in {}: {}\".format(module_paths, class_name))\n\n return class_", "def instantiate_class(self, requested_class_name, *args, **kwargs):\r\n classes = self.get_class_list()\r\n ref = None\r\n casted_args = []\r\n \r\n # Change the args to ints/floats. Assuming that that is all that is required.\r\n for i in range(0, len(args)):\r\n val = args[i]\r\n \r\n if val == '':\r\n continue\r\n \r\n if '.' in val:\r\n casted_args.append(float(val))\r\n else:\r\n casted_args.append(int(val))\r\n \r\n for class_tuple in classes:\r\n class_name = class_tuple[0]\r\n class_ref = class_tuple[1]\r\n\r\n if class_name == requested_class_name:\r\n ref = class_ref(*casted_args) # Instantiate the class with parameters!\r\n # If you want to use parameter names, try kwargs instead.\r\n\r\n # If ref is not set, the class was not located!\r\n if ref is None:\r\n raise NameError(\"The class {0} could not be found.\".format(requested_class_name))\r\n\r\n return ref", "def import_class(classpath):\n modname, classname = classpath.rsplit(\".\", 1)\n module = importlib.import_module(modname)\n klass = getattr(module, classname)\n return klass", "def deferred(ref):\n module, _ = ref.split(\".\", 1)\n if module in sys.modules:\n return _getcls(ref)\n\n @meta\n def check(cls):\n full_cls_mod = getattr(cls, \"__module__\", None)\n cls_module = full_cls_mod.split(\".\", 1)[0] if full_cls_mod else None\n if cls_module == module:\n return issubclass(cls, _getcls(ref))\n else:\n return False\n\n return check", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def getclass(instance_or_cls):\n return instance_or_cls if inspect.isclass(instance_or_cls) \\\n else instance_or_cls.__class__", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c", "def get_class(classname):\n parts = classname.split('.')\n module = '.'.join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp) \n return m", "def expand_class_ref(cls_ref: str) -> Tuple[str, str]:\n parts = cls_ref.rpartition(\".\")\n return parts[0], parts[-1]", "def get_class(self, class_name, output_type=\"PythonClass\"):\n uris = self.cls_converter.get_uri(class_name)\n if type(uris) == list:\n warnings.warn(\"Found more than 1 classes defined within schema using label {}\".format(class_name))\n return [SchemaClass(_item, self, output_type) for _item in uris]\n else:\n return SchemaClass(class_name, self, output_type)", "def import_class(import_path, setting_name=None):\n mod_name, class_name = import_path.rsplit('.', 1)\n\n # import module\n mod = _import_module(mod_name, classnames=(class_name,))\n if mod is not None:\n # Loaded module, get attribute\n try:\n return getattr(mod, class_name)\n except AttributeError:\n pass\n\n # For ImportError and AttributeError, raise the same exception.\n if setting_name:\n raise ImproperlyConfigured(\"{0} does not point to an existing class: {1}\".format(setting_name, import_path))\n else:\n raise ImproperlyConfigured(\"Class not found: {0}\".format(import_path))", "def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )", "def get_class(string):\n logger = logman.getLogger(__name__)\n if '/' not in string:\n logger.error(\"The string is not properly formatted. Use '/' to separate module path from classname. String is: {}\".format(string))\n return\n module_name, class_name = string.split('/')\n try:\n logger.debug('Retrieving class {} from module {}'.format(class_name, module_name))\n temp_class = getattr(importlib.import_module(module_name), class_name)\n except ModuleNotFoundError:\n logger.error(\"Module not found: {}\".format(module_name))\n raise\n except AttributeError:\n logger.error(\"Class not found: {}\".format(class_name))\n raise\n except:\n logger.error(\"Unexpected error while loading {}\".format(string))\n raise\n\n return temp_class", "def import_class_from_module_path(path, class_name):\n try:\n module_ = SourceFileLoader('', path).load_module()\n return getattr(module_, class_name)\n except FileNotFoundError:\n raise FileNotFoundError(\"%s not found\" % path)\n except AttributeError:\n raise AttributeError(\"%s class not found in %s\" % (class_name, path))", "def import_class(path):\n components = path.split(\".\")\n module = components[:-1]\n module = \".\".join(module)\n # __import__ needs a native str() on py2\n mod = __import__(module, fromlist=[str(components[-1])])\n return getattr(mod, str(components[-1]))", "def load_class(class_path, setting_name=None):\n try:\n class_module, class_name = class_path.rsplit('.', 1)\n except ValueError:\n if setting_name:\n txt = '%s isn\\'t a valid module. Check your %s setting' % (class_path,setting_name)\n else:\n txt = '%s isn\\'t a valid module.' % class_path\n raise exceptions.ImproperlyConfigured(txt)\n \n try:\n mod = import_module(class_module)\n except ImportError, e:\n if setting_name:\n txt = 'Error importing backend %s: \"%s\". Check your %s setting' % (class_module, e, setting_name)\n else:\n txt = 'Error importing backend %s: \"%s\".' % (class_module, e)\n raise exceptions.ImproperlyConfigured(txt)\n \n try:\n clazz = getattr(mod, class_name)\n except AttributeError:\n if setting_name:\n txt = 'Backend module \"%s\" does not define a \"%s\" class. Check your %s setting' % (class_module, class_name, setting_name)\n else:\n txt = 'Backend module \"%s\" does not define a \"%s\" class.' % (class_module, class_name)\n raise exceptions.ImproperlyConfigured(txt)\n return clazz", "def classFromString(className, mod=None):\n if mod is None:\n mod = className\n if className == \"NoneType\":\n cls = None\n else:\n try:\n __import__(mod, globals(), locals(), [], -1)\n cls = sys.modules[mod].__dict__[className]\n except ImportError:\n try:\n cls = eval(\"{0}\".format(className))\n except NameError:\n print('Class \"{0}\" from modue \"{1}\"'\n ' was not found.'.format(className, mod))\n return\n except:\n print('An unanticipated error occurred '\n 'while trying to find Class \"{0}\"'\n ' in module \"{1}\".'.format(className, mod))\n raise\n except:\n print('Module \"{0}\" was not found, terminating'.format(mod))\n raise\n return cls", "def get_class(mod, class_name: str):\n for name_val in inspect.getmembers(mod, inspect.isclass):\n name = name_val[0]\n val = name_val[1]\n if name == class_name:\n return val\n return None", "def find_class(self, class_name: str) -> Type:\n pass", "def stringToClass(cls_str):\n import_stg1 = cls_str.split(\" \")[1]\n import_stg2 = import_stg1.replace(\"'\", \"\")\n import_stg3 = import_stg2.replace(\">\", \"\")\n import_parse = import_stg3.split(\".\")\n cls = import_parse[-1]\n import_path = '.'.join(import_parse[:-1])\n import_statement = \"from %s import %s\" % (import_path, cls)\n exec(import_statement)\n assign_statement = \"this_class = %s\" % cls\n exec(assign_statement)\n return this_class", "def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])", "def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ValueError, AttributeError):\n raise ImportError('Class %s cannot be found (%s).' %\n (class_str,\n traceback.format_exception(*sys.exc_info())))", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n try:\r\n return getattr(sys.modules[mod_str], class_str)\r\n except AttributeError:\r\n raise ImportError('Class %s cannot be found (%s)' %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n try:\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)\r\n except (ValueError, AttributeError):\r\n raise ImportError(_('Class %s cannot be found (%s)') %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))", "def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)" ]
[ "0.6821553", "0.6546032", "0.6535509", "0.64897174", "0.6375194", "0.62873596", "0.6285258", "0.62793916", "0.62676936", "0.6228637", "0.61416215", "0.61140877", "0.6087256", "0.6058573", "0.6048758", "0.5986935", "0.59225583", "0.58762515", "0.5859201", "0.5783632", "0.57542336", "0.57446617", "0.57274795", "0.5717874", "0.57054806", "0.57022184", "0.5702019", "0.5700215", "0.56861526", "0.56813043" ]
0.883918
0
Function decorator which checks that the decorated function is called with a set of required kwargs.
def needkwargs(*argnames): required = set(argnames) def decorator(func): def inner(*args, **kwargs): missing = required - set(kwargs) if missing: err = "%s kwargs are missing." % list(missing) raise ValueError(err) return func(*args, **kwargs) return inner return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def require_arguments(required):\n\n def decorator(func):\n def wrapper(request):\n request_params = get_dict_from_request(request)\n for param in required:\n if param not in request_params:\n return APIMissingArgumentResponse(error_msg=param)\n return func(request)\n\n return wrapper\n\n return decorator", "def test_no_mutually_exclusive_args_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func(), 'foo')\n self.assertEqual(_func(arg3='hello'), 'foo')", "def check_mock_called_with_kwargs(mock: MagicMock, params: dict) -> None:\n for call in mock.mock_calls:\n check_call_contains_kwargs(call, params)", "def check_in_kwargs(kwarg_names):\n def layer(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n for kwarg in kwarg_names:\n if kwarg not in kwargs:\n raise SCBPaymentError('\"{0}\" attrs is required'.format(kwarg))\n return func(self, *args, **kwargs)\n return wrapper\n return layer", "def accepts_kwarg(func, kwarg):\n signature = inspect.signature(func)\n try:\n signature.bind_partial(**{kwarg: None})\n return True\n except TypeError:\n return False", "def test_require_at_least_one_and_several_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy', 'there'), 'foo')\n self.assertEqual(_func(arg1='ahoy', arg2='there'), 'foo')\n self.assertEqual(_func('ahoy', arg2='there', arg3='matey'), 'foo')", "def test_single_positional_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func('hello'), 'foo')", "def test_require_at_least_one_and_one_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy'), 'foo')\n self.assertEqual(_func(arg2='ahoy'), 'foo')", "def test_single_keyword_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func(arg1='hello'), 'foo')", "def test_one_mutually_exclusive_arg_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('hello'), 'foo')\n self.assertEqual(_func(arg1='hello'), 'foo')\n self.assertEqual(_func(arg2='hello'), 'foo')", "def isCallableWithArgs(func, argdict):\n return not missingArgs(func, argdict) and not invalidArgs(func, argdict)", "def keyword_only(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if len(args) > 0:\n raise TypeError(\"Method %s forces keyword arguments.\" % func.__name__)\n self._input_kwargs = kwargs\n return func(self, **kwargs)\n\n return wrapper", "def _validate_kwargs(self, kwargs):\n pass", "def call_has_args(*args, **kwargs) -> CallHasArgs:\n return CallHasArgs(*args, **kwargs)", "def test_arguments(self):\n calls = []\n decorator = self.decorator()\n\n @decorator\n def func(a, b, c):\n calls.append((a, b, c))\n\n func(1, 2, c=3)\n self.assertEqual(calls, [(1, 2, 3)])", "def test_exactly_one_required(self):\n\n @mutually_exclusive_parameters('arg1', 'arg2')\n @at_least_one_of('arg1', 'arg2')\n def _func1_decorated(arg1=None, arg2=None, arg3=None):\n return 'foo'\n\n from plone.api.exc import InvalidParameterError\n from plone.api.exc import MissingParameterError\n\n # test it errors if you provide none\n with self.assertRaises(MissingParameterError):\n _func1_decorated()\n\n # test that it errors if you provide both\n with self.assertRaises(InvalidParameterError):\n _func1_decorated('ahoy', 'there')\n\n # everything ok\n self.assertEqual(_func1_decorated('ahoy'), 'foo')\n self.assertEqual(_func1_decorated('ahoy', arg3='there'), 'foo')", "def test_required_and_mutually_exclusive(self):\n @mutually_exclusive_parameters('arg2', 'arg3')\n @required_parameters('arg1')\n def _func1_decorated(arg1=None, arg2=None, arg3=None):\n return 'foo'\n\n from plone.api.exc import InvalidParameterError\n from plone.api.exc import MissingParameterError\n\n # test that the required parameter error works (missing arg1)\n with self.assertRaises(MissingParameterError):\n _func1_decorated(arg2='ahoy')\n\n # test that the mutually exclusive decorator works\n # (arg2 and arg3 should not be there)\n with self.assertRaises(InvalidParameterError):\n _func1_decorated(\n arg1='ahoy',\n arg2='there',\n arg3='matey',\n )\n\n # test that they both work. Making no assumptions here about the order\n # in which they fire.\n with self.assertRaises((InvalidParameterError, MissingParameterError)):\n _func1_decorated(\n arg2='ahoy',\n arg3='there',\n )\n\n # everything ok\n self.assertEqual(_func1_decorated('ahoy', arg3='there'), 'foo')", "def pass_return_value(func):\n spec = inspect.getfullargspec(func)\n return (\n \"return_value\" in spec.args\n or \"return_value\" in spec.kwonlyargs\n or spec.varkw == \"kwargs\"\n )", "def decorator(f):\n @functools.wraps(f)\n def decoratored_function(*args, **kwargs):\n if args and len(args) == 1:\n return f(*args, **kwargs)\n\n if args:\n raise TypeError(\n \"This decorator only accepts extra keyword arguments.\")\n\n return lambda g: f(g, **kwargs)\n\n return decoratored_function", "def _apply_decorator(fn, *args, **kwargs):\n if args and (kwargs or len(args) > 1):\n # Case 1\n # If both args and kwargs are in play, they have to have been passed explicitly like @foo(a1, k2=v2).\n # If more than one positional is given, that has to be something like @foo(a1, a2, ...)\n # Decorators using this function need to agree to only accept keyword arguments, so those cases can't happen.\n # They can do this by using an optional first positional argument, as in 'def foo(x=3):',\n # or they can do it by using a * as in 'def foo(*, x)' or if no arguments are desired, obviously, 'def foo():'.\n raise SyntaxError(\"Positional arguments to decorator (@%s) not allowed here.\" % fn.__name__)\n elif args: # i.e., there is 1 positional arg (an no keys)\n # Case 2\n arg0 = args[0] # At this point, we know there is a single positional argument.\n #\n # Here there are two cases.\n #\n # (a) The user may have done @foo, in which case we will have a fn which is the value of foo,\n # but not the result of applying it.\n #\n # (b) Otherwise, the user has done @foo(), in which case what we'll have the function of one\n # argument that does the wrapping of the subsequent function or class.\n #\n # So since case (a) expects fn to be a function that tolerates zero arguments\n # while case (b) expects fn to be a function that rejects positional arguments,\n # we can call fn with the positional argument, arg0. If that argument is rejected with a TypeError,\n # we know that it's really case (a) and that we need to call fn once with no arguments\n # before retrying on arg0.\n if _is_function_of_exactly_one_required_arg(fn):\n # Case 2A\n # We are ready to wrap the function or class in arg0\n return fn(arg0)\n else:\n # Case 2B\n # We are ALMOST ready to wrap the function or class in arg0,\n # but first we have to call ourselves with no arguments as in case (a) described above.\n return fn()(arg0)\n else:\n # Case 3\n # Here we have kwargs = {...} from @foo(x=3, y=4, ...) or maybe no kwargs either @foo().\n # Either way, we've already evaluated the foo(...) call, so all that remains is to call on our kwargs.\n # (There are no args to call it on because we tested that above.)\n return fn(**kwargs)", "def add_check_function(check_function: Callable):\n\n def decorator(func: Callable):\n @wraps(func)\n def wrapper(*args, **kwargs):\n check_function(*args, *kwargs.values())\n return func(*args, **kwargs)\n\n return wrapper\n\n name = getattr(check_function, '__name__', '`func`')\n decorator.__doc__ = f\"Check the function's arguments via `{name}` before calling it.\"\n return decorator", "def keyword_only(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > 0:\n raise TypeError(\"Method %s only takes keyword arguments.\" % func.__name__)\n return func(**kwargs)\n notice = \".. Note:: This method requires all argument be specified by keyword.\\n\"\n wrapper.__doc__ = notice + wrapper.__doc__\n return wrapper", "def require_post_params(*args, **kwargs):\r\n required_params = []\r\n required_params += [(arg, None) for arg in args]\r\n required_params += [(key, kwargs[key]) for key in kwargs]\r\n # required_params = e.g. [('action', 'enroll or unenroll'), ['emails', None]]\r\n\r\n def decorator(func): # pylint: disable=C0111\r\n def wrapped(*args, **kwargs): # pylint: disable=C0111\r\n request = args[0]\r\n\r\n error_response_data = {\r\n 'error': 'Missing required query parameter(s)',\r\n 'parameters': [],\r\n 'info': {},\r\n }\r\n\r\n for (param, extra) in required_params:\r\n default = object()\r\n if request.POST.get(param, default) == default:\r\n error_response_data['parameters'].append(param)\r\n error_response_data['info'][param] = extra\r\n\r\n if len(error_response_data['parameters']) > 0:\r\n return JsonResponse(error_response_data, status=400)\r\n else:\r\n return func(*args, **kwargs)\r\n return wrapped\r\n return decorator", "def mandatory(arguments):\n\n def inner(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n for arg in arguments:\n if not request.args.get(arg):\n return redirect(\"/\")\n return f(*args, **kwargs)\n\n return wrapper\n\n return inner", "def _assert_resolved_kwargs_valid(postconditions: List[Contract],\n resolved_kwargs: Mapping[str, Any]) -> Optional[TypeError]:\n if postconditions:\n if 'result' in resolved_kwargs:\n return TypeError(\"Unexpected argument 'result' in a function decorated with postconditions.\")\n\n if 'OLD' in resolved_kwargs:\n return TypeError(\"Unexpected argument 'OLD' in a function decorated with postconditions.\")\n\n return None", "def permitted_kwargs(permitted):\n def _wraps(func):\n @functools.wraps(func)\n def _inner(name, description, kwargs):\n bad = [a for a in kwargs.keys() if a not in permitted]\n if bad:\n raise OptionException('Invalid kwargs for option \"{}\": \"{}\"'.format(\n name, ' '.join(bad)))\n return func(description, kwargs)\n return _inner\n return _wraps", "def _validate_args(\n autologging_integration,\n function_name,\n user_call_args,\n user_call_kwargs,\n autologging_call_args,\n autologging_call_kwargs,\n):\n\n def _validate_new_input(inp):\n \"\"\"\n Validates a new input (arg or kwarg) introduced to the underlying / original ML function\n call during the execution of a patched ML function. The new input is valid if:\n\n - The new input is a function that has been decorated with\n `exception_safe_function_for_class` or `pickalable_exception_safe_function`\n - OR the new input is a class with the `ExceptionSafeClass` metaclass\n - OR the new input is a list and each of its elements is valid according to the\n these criteria\n \"\"\"\n if type(inp) == list:\n for item in inp:\n _validate_new_input(item)\n elif callable(inp):\n assert getattr(inp, _ATTRIBUTE_EXCEPTION_SAFE, False), (\n f\"New function argument '{inp}' passed to original function is not exception-safe.\"\n \" Please decorate the function with `exception_safe_function` or \"\n \"`pickalable_exception_safe_function`\"\n )\n else:\n assert hasattr(inp, \"__class__\") and type(inp.__class__) in [\n ExceptionSafeClass,\n ExceptionSafeAbstractClass,\n ], (\n f\"Invalid new input '{inp}'. New args / kwargs introduced to `original` function \"\n \"calls by patched code must either be functions decorated with \"\n \"`exception_safe_function_for_class`, instances of classes with the \"\n \"`ExceptionSafeClass` or `ExceptionSafeAbstractClass` metaclass safe or lists of \"\n \"such exception safe functions / classes.\"\n )\n\n def _assert_autologging_input_positional_args_are_superset(\n autologging_call_input, user_call_input\n ):\n length_diff = len(autologging_call_input) - len(user_call_input)\n assert (\n length_diff >= 0\n ), f\"{length_diff} expected inputs are missing from the call to the original function.\"\n\n def _assert_autologging_input_kwargs_are_superset(autologging_call_input, user_call_input):\n assert set(user_call_input.keys()).issubset(set(autologging_call_input.keys())), (\n \"Keyword or dictionary arguments to original function omit\"\n \" one or more expected keys: '{}'\".format(\n set(user_call_input.keys()) - set(autologging_call_input.keys())\n )\n )\n\n def _validate(autologging_call_input, user_call_input=None):\n \"\"\"\n Validates that the specified `autologging_call_input` and `user_call_input`\n are compatible. If `user_call_input` is `None`, then `autologging_call_input`\n is regarded as a new input added by autologging and is validated using\n `_validate_new_input`. Otherwise, the following properties must hold:\n\n - `autologging_call_input` and `user_call_input` must have the same type\n (referred to as \"input type\")\n - if the input type is a tuple, list or dictionary, then `autologging_call_input` must\n be equivalent to `user_call_input` or be a superset of `user_call_input`\n - for all other input types, `autologging_call_input` and `user_call_input`\n must be equivalent by reference equality or by object equality\n\n :param autologging_call_input: call input from autologging\n :param user_call_input: call input from user\n \"\"\"\n\n if user_call_input is None and autologging_call_input is not None:\n _validate_new_input(autologging_call_input)\n return\n\n assert type(autologging_call_input) == type(\n user_call_input\n ), \"Type of input to original function '{}' does not match expected type '{}'\".format(\n type(autologging_call_input), type(user_call_input)\n )\n\n if type(autologging_call_input) in [list, tuple]:\n _assert_autologging_input_positional_args_are_superset(\n autologging_call_input, user_call_input\n )\n # If the autologging call input is longer than the user call input, we `zip_longest`\n # will pad the user call input with `None` values to ensure that the subsequent calls\n # to `_validate` identify new inputs added by the autologging call\n for a, u in itertools.zip_longest(autologging_call_input, user_call_input):\n _validate(a, u)\n elif type(autologging_call_input) == dict:\n _assert_autologging_input_kwargs_are_superset(autologging_call_input, user_call_input)\n for key in autologging_call_input.keys():\n _validate(autologging_call_input[key], user_call_input.get(key, None))\n else:\n assert (\n autologging_call_input is user_call_input\n or autologging_call_input == user_call_input\n ), (\n \"Input to original function does not match expected input.\"\n f\" Original: '{autologging_call_input}'. Expected: '{user_call_input}'\"\n )\n\n # Similar validation logic found in _validate, unraveling the list of arguments to exclude\n # checks for any validation exempt positional arguments.\n _assert_autologging_input_positional_args_are_superset(autologging_call_args, user_call_args)\n for index, autologging_call_arg, user_call_arg in itertools.zip_longest(\n range(len(user_call_args)), autologging_call_args, user_call_args\n ):\n if not _is_arg_exempt_from_validation(\n autologging_integration,\n function_name,\n user_call_arg,\n argument_index=index,\n ):\n _validate(autologging_call_arg, user_call_arg)\n\n # Similar validation logic found in _validate, unraveling the dictionary of arguments to exclude\n # checks for any validation exempt keyword arguments.\n _assert_autologging_input_kwargs_are_superset(autologging_call_kwargs, user_call_kwargs)\n for key in autologging_call_kwargs.keys():\n if not _is_arg_exempt_from_validation(\n autologging_integration,\n function_name,\n user_call_kwargs.get(key, None),\n argument_name=key,\n ):\n _validate(\n autologging_call_kwargs[key],\n user_call_kwargs.get(key, None),\n )", "def check_args(*args: Tuple[Any, ...], **kwargs: Any) -> None:\n\n # We begin by initializing the maximum number of args we will allow at 0. We will iterate\n # this if by chance we see an argument whose name is \"self\".\n max_arg_len = 0\n\n # iterate through every parameter passed in\n for idx, param_name in enumerate(literal_signature.parameters):\n\n if idx == 0 and (param_name == \"self\" or param_name == \"cls\"):\n max_arg_len += 1\n continue\n\n # if this parameter isn't in kwargs, then it's probably in args. However, we can't check\n # directly because we don't have arg names, only the list of args which were passed in.\n # Thus, the way this check works is to return an error if we find an argument which\n # isn't in kwargs and isn't \"self\".\n if param_name not in kwargs and len(args) > max_arg_len:\n traceback_and_raise(\n AttributeError(\n f\"'{param_name}' was passed into a function as an arg instead of a kwarg. \"\n f\"Please pass in all arguments as kwargs when coding/using PySyft.\"\n )\n )", "def test_non_existant_required_arg(self):\n with self.assertRaises(ValueError):\n _func = required_parameters('arg1', 'wibble', 'wobble')\n _func(undecorated_func)\n\n with self.assertRaises(ValueError):\n _func = mutually_exclusive_parameters(\n 'arg1',\n 'wibble',\n 'wobble'\n )\n _func(undecorated_func)", "def test_two_mutually_exclusive_args_provided(self):\n from plone.api.exc import InvalidParameterError\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n with self.assertRaises(InvalidParameterError):\n _func('ahoy', 'there')\n\n with self.assertRaises(InvalidParameterError):\n _func(arg1='ahoy', arg2='there')" ]
[ "0.72308356", "0.7081471", "0.7008813", "0.6984335", "0.69742376", "0.69423914", "0.69055426", "0.6887728", "0.6887091", "0.6854476", "0.6846821", "0.67375857", "0.6730363", "0.6717054", "0.6682574", "0.6669929", "0.66130555", "0.6594434", "0.65906775", "0.6549689", "0.649757", "0.64112556", "0.63627553", "0.63203925", "0.6318825", "0.6282667", "0.62501705", "0.62447375", "0.6214369", "0.61939955" ]
0.7618426
0
Test the ArmiCLI.showVersion method.
def test_showVersion(self): origout = sys.stdout try: out = io.StringIO() sys.stdout = out ArmiCLI.showVersion() finally: sys.stdout = origout self.assertIn("armi", out.getvalue()) self.assertIn(meta.__version__, out.getvalue())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_show_version():\n result = runner.invoke(app, [\"--version\"])\n assert result.exit_code == 0\n assert \"Confluence poster version\" in result.stdout", "def test_cli_version_info(config, capsys):\n CLI.version_info()\n captured = capsys.readouterr()\n assert captured.out == \"yessssms \" + VERSION + \"\\n\"", "def test__print_version(capsys):\n util._print_version(\"midgard\")\n version, _ = capsys.readouterr()\n assert isinstance(version, str) and re.search(\"[0-9]\", version)", "def test_main_version(\n app_tester: ApplicationTester, valiant_app_title: str, valiant_version: str\n) -> None:\n app_tester.execute(\"--version\")\n expected = f\"{valiant_app_title} version {valiant_version}\\n\"\n assert expected == app_tester.io.fetch_output()", "def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')", "def test_version_display():\n output = subprocess.run(['smif', '-V'], stdout=subprocess.PIPE)\n assert smif.__version__ in str(output.stdout)", "def test_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0", "def test_version(self):\n result = check_output([b\"flocker-reportstate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))", "def print_version():\n print(\"1.0\")", "def test_cli__passes__with_version_command() -> None:\n\n runner = CliRunner()\n result = runner.invoke(\n cli,\n args=\"version\",\n )\n\n assert_that(result.exit_code).is_equal_to(0)\n assert_that(result.output).is_instance_of(str)\n assert_that(result.output).contains(f\"SlickML Version: {__version__}\")", "def test_cli_name(self):\n output = self.update_command('--version').strip()\n\n assert output == '{cli_name} {version}'.format(\n cli_name=self.CLI_name, version=_version.__version__\n )", "def cli_show_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n show_versions()\n\n ctx.exit()", "def test_version(self):\n result = check_output([b\"flocker-changestate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))", "def test_get_version(self):\n pass", "def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)", "def test_arg_version(run_nait) -> None: # type: ignore\n expected = nanaimo.version.__version__\n assert run_nait(['--version']).stdout.decode('utf-8').startswith(expected)", "def test_version():\n mock = MagicMock(\n return_value=(\n \" LVM version: 2.02.168(2) (2016-11-30)\\n\"\n \" Library version: 1.03.01 (2016-11-30)\\n\"\n \" Driver version: 4.35.0\\n\"\n )\n )\n with patch.dict(linux_lvm.__salt__, {\"cmd.run\": mock}):\n assert linux_lvm.version() == \"2.02.168(2) (2016-11-30)\"", "def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)", "def do_version(*args, **kwargs):\n print(Shell.__version__)", "def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)", "def version() -> None:\n click.echo(__version__)", "def do_version(self, a):\n print(\"\\tversion: \" + (str(ise.getVersion())) +\n \".\" + (str(ise.getFirmware())))", "def print_version():\n parser = parsersetup()\n parser.print_version()", "def test_main_first_arg_version(capsys):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--version'])\n\n stdout, stderr = capsys.readouterr()\n expected = uflash.get_version()\n # On python 2 --version prints to stderr. On python 3 to stdout.\n # https://bugs.python.org/issue18920\n assert (expected in stdout) or (expected in stderr)", "def _print_ver(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.secho(__version__, fg='yellow')\n ctx.exit()", "def test_cli__passes__with_version_option(args: str) -> None:\n\n runner = CliRunner()\n result = runner.invoke(\n cli,\n args=args,\n )\n\n assert_that(result.exit_code).is_equal_to(0)\n assert_that(result.output).is_instance_of(str)\n assert_that(result.output).is_equal_to(f\"cli, version {__version__}\\n\")", "def _print_version(ctx: click.Context, _, value: str):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(__version__)\n ctx.exit()", "def test_get_short_version(self):\n pass", "def version():\n click.echo(__version__)", "def test_get_iiqtools_version_ok(self, fake_get_distribution):\n fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\n\n v = versions.get_iiqtools_version()\n\n self.assertTrue(isinstance(v, versions.Version))" ]
[ "0.7923721", "0.771824", "0.7611274", "0.75258106", "0.749578", "0.74099976", "0.7334167", "0.7327753", "0.723506", "0.7200838", "0.71340364", "0.7133125", "0.7112922", "0.70674443", "0.7005836", "0.69929063", "0.6932062", "0.6920988", "0.6911848", "0.6906924", "0.68915796", "0.6888352", "0.68627423", "0.685866", "0.6828042", "0.68172395", "0.6781864", "0.67756534", "0.6773865", "0.6759254" ]
0.81843084
0
Wrapper around ProtobufReceiver.send_obj for logging
def send_obj(self, obj): ProtobufReceiver.send_obj(self, obj) self.factory.sent_message_log[obj.__class__.__name__] += obj.ByteSize()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, obj):\n try:\n obj = dic_pickle_dumps_and_b64(obj)\n self.bufout += json.dumps(obj) + \"\\n\\n\"\n except:\n # It should never happen! And not be reported to the client!\n logging.exception(\"Client.send(%s)\", self)", "def send(self, kwargs):\n self.logger.log_struct(**kwargs)", "def send(self, obj):\n b = json.dumps(obj)\n length = struct.pack(\"!L\", len(b))\n self.s.send(length + b.encode('utf-8'))", "def send_pickle(self, obj, flags=0, protocol=-1):\n pobj = pickle.dumps(obj, protocol)\n return self.send(pobj, flags=flags)", "def _stringify_proto(obj):\n if isinstance(obj, str): return obj\n elif isinstance(obj, Message): return obj.SerializeToString()\n else: raise TypeError('Object can not be serialized as a string.')", "def send(self, obj, tag=None):\n if hasattr(obj, 'send'):\n return obj.send(self, tag=tag)\n else:\n # treat object as bytes\n return self.stream(obj)", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def write(self, proto):\n pass", "def _send_object(object: Any, src: int, dst: int, group: ProcessGroup) -> None:\n # then broadcast safely\n _broadcast_object_list([object], src, group)", "def _send(self, receiver_group, receiver_id, typ, msg):\n self._out.append((receiver_group, receiver_id, (typ, msg)))", "def _send_serialized(self, socket, msg):\n socket.send(pickle.dumps(msg))", "def callback_client_send(self, connection_object, data, compression=None):\n # define override here\n data = JSONSerializer.serialize(data)\n logger.debug(f\"Sending message to client at {connection_object.address} : {data['class']}\")\n # print(f\"Sending message to client at {connection_object.address} : {data['class']}\")\n return super(MastermindServerUDP, self).callback_client_send(connection_object, data, compression)", "def send(self, buf):\n if self._verbose:\n self._prettyprint(buf, True)\n self._s.sendall(buf)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send_msg_to_server(self, msg_obj=None):\n serialized_msg = pickle.dumps(msg_obj)\n msg_header = general_message_header(len(serialized_msg),BUFFER_LENGTH)\n full_msg = bytes(msg_header.encode(\"utf-8\")) + serialized_msg\n\n # send msg to client\n self.server_socket.send(full_msg)", "def send(self, msg, receiver):\n raise NotImplementedError", "def send(self, data):", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def send_message(self, msg):\n self.logger.debug(msg)\n self.writer.send(json.dumps(msg))", "def send_jsonified(self, msg, stats=True):\n raise NotImplemented()", "def send_pickle(self, object, header, destination):\n\t\tprint(\"Sending pickled object with header\",header)\n\t\tmessage=pickle.dumps((\"START\",header,object,'END'))\n\t\tdata,addr=[],[]\n\t\tcomplete=False\n\t\tn=0\n\t\tfor i in range(0,len(message),420):\n\t\t\tdata.append(message[i:i+420])\t\t\n\t\twhile not complete:\n\t\t\tself.listener.sendto(data[n],destination)\n\t\t\tprint(\"Server sent datagram number\",n)\n\t\t\twhile addr!=destination:\n\t\t\t\tmsg,addr=self.listener.recvfrom(128)\n\t\t\tif msg.decode(enc)[8:]=='next_datagram':\n\t\t\t\tprint(\"Server received confirmation of datagram\",i,\"receipt\")\n\t\t\t\tn+=1\n\t\t\t\taddr,msg='',''\n\t\t\t\tcontinue\n\t\t\telif msg.decode(enc)[8:]=='complete':\n\t\t\t\tprint(\"Server received confirmation whole message receipt\")\n\t\t\t\tcomplete=True\n\t\t\t\tbreak\n\t\tprint(\"Successfully Sent pickled object\")", "def send_msg(self, type, data):\n data = json.dumps(\n {\n \"job\": self._job_id,\n \"idx\": self._job_idx,\n \"tool\": self._tool,\n \"type\": type,\n \"data\": data\n },\n\n # use this so that users don't run into errors with ObjectIds not being\n # able to be encodable. If using bson.json_util.dumps was strictly used\n # everywhere, could just use that dumps method, but it's not, and I'd rather\n # keep it simple for now\n cls=FriendlyJSONEncoder\n )\n\n self._connected.wait(2 ** 31)\n\n data_len = struct.pack(\">L\", len(data))\n if not self._dev:\n try:\n with self._send_recv_lock:\n self._sock.send(data_len + data)\n except:\n # yes, just silently fail I think???\n pass", "def send_sensor_proto(self, sensor_proto):\n self.sensor_proto_sender.send(sensor_proto)", "def send(self, msg_dict):\n message = ejson.dumps(msg_dict)\n super(DDPSocket, self).send(message)\n self._debug_log('<<<{}'.format(message))", "def inspect(obj:Any) -> None:\n\t\tLogging._log(Logging.logLevel, obj)", "def send_message(self, proto_buf):\n # print 'sending....'\n #s = message.SerializeToString()\n # packed_len = struct.pack(self.packformat, len(message))\n message = proto_buf.SerializeToString()\n packed_len = str(len(message) + 100000000)\n server_log.debug(\"Sending msg of length: {0}\".format(packed_len))\n self.sock.sendall(packed_len + message)", "async def on_socket_send(self, msg: \"Msg | MsgProto\") -> None:", "def send(self, obj):\n\t\tif not isinstance(obj, NotificationMessage):\n\t\t\traise ValueError, u\"You can only send NotificationMessage objects.\"\n\t\tself._send_queue.put(obj)" ]
[ "0.6729722", "0.65537256", "0.62075424", "0.6126205", "0.6092147", "0.6076926", "0.60404396", "0.6032093", "0.59861624", "0.59832275", "0.59643966", "0.59317803", "0.5841705", "0.5838265", "0.5838265", "0.5838265", "0.57502455", "0.57261884", "0.5718602", "0.56995785", "0.56558406", "0.5648886", "0.5602186", "0.55792", "0.5573707", "0.55046743", "0.5494863", "0.54931563", "0.54806215", "0.548033" ]
0.7474465
0
Broadcast a message to all nodes in self.peers, the list should include myself
def bcast(self, msg): for k, v in self.peers.iteritems(): proto = v[2] proto.send_obj(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def broadcast(self, message):\n for s in self.connections:\n s.send(bytes(message, encoding='utf-8'))", "def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)", "def broadcast(self, msg, mtype = 'message', back = True):\n for p in DixitConnection.participants:\n if back or (DixitConnection.participants[p] != self):\n DixitConnection.participants[p].emit(mtype, msg)", "def Broadcast(self, method, *args, **kwargs):\n for peer_id, (host, port, peer) in self.peers.iteritems():\n logging.debug('Calling method %r on peer %r.' % (method, peer_id))\n m = getattr(peer, method)\n m(self.peer_id, *args, **kwargs)", "def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def broadcast(self, tx):\n\n for neighbor_id in self.adjacencies:\n self.sendMsg(neighbor_id, Message(self.id, Type.BLOCK, tx))", "def broadcast(self, addr, message):\n for addr in set(six.iterkeys(self.addr_to_conn_struct_map)) - {addr}:\n try:\n self.addr_to_conn_struct_map[addr].conn.send(message)\n except:\n # if we have any error sending, close the client connection, then remove it from our list\n self.clean(addr)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(self, clients, msg):\n self.server.broadcast(clients, msg)", "def broadcast(self, message, exclude=()):\r\n\t\tfor player in self.players:\r\n\t\t\tif player not in exclude:\r\n\t\t\t\tplayer.send(message)", "def broadcast(client, msg):\n for client_target in CLIENT_LIST:\n if client_target != client:\n client_target.send(msg)", "def broadcast(message):\n for client in CLIENTS:\n client.send(message)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\") + msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(self, new_par):\n for client in self.clients:\n client.recv(new_par.copy())", "def broadcast(self, message):\r\n for c in self.characters:\r\n c.notify(message)", "def send_to_all(self, message: Message):\n\t\tto_send = self.registry.get_user(message.sender) + \": \" + message.body\n\n\t\tfor ip in self.registry.ip():\n\t\t\tself.send(to_send, ip)", "def broadcast(self, writer, message):\r\n for user in self.connection_pool:\r\n if user != writer:\r\n # We don't need to also broadcast to the user sending the message\r\n user.write(f\"{message}\\n\".encode())", "def broadcast(self, message, *args):\n\t\tComponent.broadcast(self, message, *args)\n\t\tfor comp in self._contents:\n\t\t\tcomp.broadcast(message, *args)", "def start_peers(self):\n for i in self.nodes:\n i.start()", "def broadcast(mensagem, prefixo = \"\"):\n for sock in clients:\n sock.send(bytes(prefixo, \"utf8\") + mensagem)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix) + msg)", "def broadcast(self,message_type,message):\n for socket in self.connections:\n if socket != self.server_socket:\n self.sendToSocket(socket,message_type,message)", "def _broadcast_message_to_users(self, message):\n self.logger.info(f\"Broadcasting message `{message}`\")\n for id, name in self.users.items():\n time.sleep(.1) # Telegram servers does not let you send more than 30 messages per second\n try:\n self.updater.bot.sendMessage(int(id), message)\n\n except BaseException as e:\n traceback.print_exc()\n self.logger.info(f'Failed to broadcast message to {name} due to {e}')", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)", "def send_broadcast_packet(self, broadcast_packet: Packet) -> None:\n for neighbor_address in [*self.children_addresses, self.parent_address]:\n if neighbor_address:\n self.stream.add_message_to_out_buff(neighbor_address, broadcast_packet)\n log(f'Message packet added to out buff of Node({neighbor_address}).')", "def broadcast(data):\n for client in CLIENTS:\n client.write_message(data)", "def broadcast(self, txt):\n\n for i in self.bots:\n i.broadcast(txt)" ]
[ "0.7086546", "0.69735074", "0.69041973", "0.67947894", "0.67869544", "0.6753279", "0.64555633", "0.64517486", "0.6389841", "0.6380765", "0.6365368", "0.634614", "0.6294606", "0.6270231", "0.62304324", "0.62302434", "0.62242216", "0.62085104", "0.6192714", "0.6187464", "0.61657107", "0.6162834", "0.6160943", "0.61592615", "0.6156672", "0.61473835", "0.6131311", "0.6098637", "0.60880125", "0.60536116" ]
0.699885
1
sets all peers to promoters, only use this method for testing
def overwrite_promoters(self): logging.debug("NODE: overwriting promoters {}".format(len(self.peers))) self.promoters = self.peers.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_peers(self):\n for i in self.nodes:\n i.start()", "def honeypot_peer(self,honeypotids,ip,port):\n req = {\"type\":\"set_peer\",\n \"from\":self.network.mc_id,\n \"to\":honeypotids,\n \"ip\":ip,\n \"port\":port}\n expect_dict = {\"type\":\"peer_set\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = [msg[\"ip\"],msg[\"port\"]]\n return answer", "def setup_targets(self):\n for i in range(self.min_peers):\n self.targets.append(dict(address=0, tolerance=0, connected=False))\n # NOT IMPLEMENTED HERE", "def peer_addresses(self, peer_addresses):\n\n self._peer_addresses = peer_addresses", "def get_peers(self):\n self.peers = []\n retriever_methods = [\n m\n for m in rtorrent9.peer.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # need to leave 2nd arg empty (dunno why)\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"p.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n self.peers.append(Peer(self._rt_obj, self.info_hash, **results_dict))\n\n return self.peers", "def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()", "def link_promoter(self, promoter: Promoter):\n if promoter.id not in [prom.id for prom in self.promoters]:\n self.promoters.append(promoter)", "def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()", "def setPeer (self, peer):\n\t\tself.peer = peer", "def _try_peers(self, peers):\n for peer_entry in peers:\n if peer_entry['id'] == self.peer_id:\n continue\n\n print('Trying peer: {}'.format(peer_entry))\n peer = Peer(peer_entry['id'], peer_entry['ip'], peer_entry['port'], self._torrent)\n try:\n peer.connect(self.peer_id)\n except PeerConnectionError:\n continue\n else:\n self._peers.append(peer)\n peer.subscribe_for_messages_to_client(self.peer_message_receiver(peer))", "def bcast(self, msg):\n for k, v in self.peers.iteritems():\n proto = v[2]\n proto.send_obj(msg)", "def test_peers_get(self):\n pass", "def peer_list_all(self):\n return self.client.call('GET', self.name + 'peer-list/all')", "def set_mpi_procs(self, mpi_procs):\n self._mpi_procs = mpi_procs", "def set_mpi_procs(self, mpi_procs):\n QueueAdapter.set_mpi_procs(self, mpi_procs)\n\n num_nodes, rest_cores = self.hw.divmod_node(mpi_procs, omp_threads=1)\n if num_nodes == 0:\n self.qparams[\"nodes\"] = 1\n self.qparams[\"ppn\"] = mpi_procs\n else:\n if rest_cores != 0:\n # Pack cores as much as possible.\n num_nodes += 1\n self.qparams[\"nodes\"] = num_nodes\n self.qparams[\"ppn\"] = self.hw.cores_per_node", "def reset_remotes(self):\n for remote in [self._tester, self._sut]:\n remote.stop_all()\n remote.reset_stats()\n remote.set_pkt_size(self._cores, 64)\n remote.set_speed(self._cores, 100)\n remote.set_count(0, self._cores)", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}, \\\n\t\t\t\t{'ip': '100.100.100.100', \\\n\t\t\t\t\t'peer id': 'test2', 'port': 1000}])", "def move_peers(self, data):\n for peer_id, row, col in data:\n if peer_id in self.peers:\n self.peers[peer_id].move(row, col)\n return", "def _wake_players(self):\n self.__current_player = None\n players = self.get_players(self.get_state_info(\"wake_all\"))\n self.__player_wakeup_iter = iter(players.copy())\n self.player_done(None)", "def setPUsers(self, users):\n model = self.tvPUsers.get_model()\n model.clear()\n for user in users:\n model.append((user,))\n\n self.on_entPUser_changed(self.entPUser)\n self.on_tvPUsers_cursor_changed(self.tvPUsers)", "def Setup(self):\n self.Peers = [] # active nodes that we're connected to\n self.KNOWN_ADDRS = [] # node addresses that we've learned about from other nodes\n self.DEAD_ADDRS = [] # addresses that were performing poorly or we could not establish a connection to\n self.MissionsGlobal = []\n self.NodeId = random.randint(1294967200, 4294967200)", "async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}", "def network_initialize(self, payments):\n for pt in payments:\n if pt.id1 not in self.users.keys():\n self.users[pt.id1] = set()\n if pt.id2 not in self.users.keys():\n self.users[pt.id2] = set()\n self.users[pt.id1].add(pt.id2)\n self.users[pt.id2].add(pt.id1)", "async def store_peers(self, peer: Peer):\n await self.peers.store(peer)", "def reset_players(self):\n self.dealer.reset()\n for player in self.players:\n player.reset()\n if player.bank <= 500:\n player.set_bank(1000)", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8dddd\\x03\\xe8\")", "def apply_peers(\n peers: Iterable[Peer],\n name: str,\n namespace: Union[None, str],\n legacy: bool = False,\n):\n patch = {'status': {peer.id: None if peer.is_dead else peer.as_dict() for peer in peers}}\n resource = (LEGACY_PEERING_RESOURCE if legacy else\n CLUSTER_PEERING_RESOURCE if namespace is None else\n NAMESPACED_PEERING_RESOURCE)\n patching.patch_obj(resource=resource, namespace=namespace, name=name, patch=patch)", "def setUp(self):\n self.pcp = PeerConnectionPool(None, None, 4321, schema)", "def setup_players(self, players):\n\t\tself.players.clear()\n\t\tids = set([p.get_player_id() for p in players])\n\t\tfor p in self.state.get_players():\n\t\t\tif p not in ids:\n\t\t\t\traise PlayerException(p)\n\t\tfor p in players:\n\t\t\tself.players[p.get_player_id()] = p", "def set_mpi_procs(self, mpi_procs):\n super(MOABAdapter, self).set_mpi_procs(mpi_procs)\n self.qparams[\"procs\"] = mpi_procs" ]
[ "0.65473676", "0.5898622", "0.58977187", "0.58709633", "0.5758995", "0.5739834", "0.55967075", "0.5580255", "0.5573473", "0.55024105", "0.54508686", "0.5439354", "0.54012555", "0.5398449", "0.5394799", "0.53926075", "0.53751516", "0.5373392", "0.53349924", "0.5330222", "0.53198296", "0.53065175", "0.53053623", "0.53043264", "0.5287089", "0.527519", "0.5262504", "0.5260026", "0.5244477", "0.5242318" ]
0.80836135
0
Release the play_lock, allowing this player to take action.
def release_play_lock(self) : self.play_lock = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_play_lock(self) : \n self.play_lock = True", "def release_lock(self):\n if self.lock:\n self.lock.release()", "def _unload_player(self):\n if self.player is not None:\n Logger.info(\"VLCPlayer: Unloading player\")\n self.player.event_manager().event_detach(\n EventType.MediaPlayerEndReached)\n if self.player.is_playing():\n self.player.stop()\n self.player.release()\n SoundVLCPlayer.player = None", "def stop(self):\n self.set_state_null()\n self.player = None", "def release_lock(self):\n self._multistore._unlock()", "def unload(self):\n self._unload_player()", "def release(self):\n self.is_locked = False\n os.unlink(self.lockfile)", "def release(self):\n fcntl.flock(self.lock_file, fcntl.LOCK_UN)", "def release_lock():\r\n get_lock.n_lock -= 1\r\n assert get_lock.n_lock >= 0\r\n # Only really release lock once all lock requests have ended.\r\n if get_lock.lock_is_enabled and get_lock.n_lock == 0:\r\n get_lock.start_time = None\r\n get_lock.unlocker.unlock()", "def api_release(self):\n\n self._api_release_lock_with_timer()", "def _release(self):\n try:\n os.unlink(self.lockfile)\n\n # Log success.\n logging.info(\"Released lock at \" + self.lockfile + \"...\")\n except:\n # Ignore all errors.\n pass", "def unlock(lock):\n lock.release()", "def release_lock (self):\n\n self.connection.commit ()\n self.locked = False", "def release(self, o):\n if not self.available(o):\n raise ValueError('you do not own this lock')\n self._owner = None", "def unlock(self):\n\n self.wait = False", "def unset_player(self):\n self.server.object_manager.remove_player(self.player.guid)\n self.player = None", "def un_lock(self):\n self._un_lock()", "def unlock(self):\n self.mainloop().unlock()", "def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False", "def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False", "def release(self):\r\n if self.is_locked:\r\n os.close(self.fd)\r\n os.unlink(self.lockfile)\r\n self.is_locked = False", "def release(self):\n if self.is_locked:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False", "def release(self):\n self.acquired = False", "def stop(self):\n if self.player and self.player.is_playing():\n self.player.pause()\n super().stop()", "def _unlock(self):\n self._lockFile.close()\n os.unlink(self._lockFilename)", "def unlock(self):\n assert self._pa_threaded_mainloop is not None\n # TODO: This is not completely safe. Unlock might be called without lock.\n assert self._lock_count > 0\n self._lock_count -= 1\n pa.pa_threaded_mainloop_unlock(self._pa_threaded_mainloop)", "def unlock (self):\n fcntl.flock(self._lockHandle, fcntl.LOCK_UN)\n self._lockHandle.close()", "def _release_lock(self, job_info):\n os.remove(self.lock_file)\n self.logger.debug(\"lock release for '%s'\" % job_info)", "def _unlock(self):\n if self.is_locked():\n self._unlink(self.lockfile)\n self._remove_unique_file()\n self._p(\"Lock removed.\")\n else:\n self._remove_unique_file()", "def __del__(self):\n if self.is_locked:\n self.release()" ]
[ "0.80757964", "0.681601", "0.6544812", "0.6506472", "0.6494424", "0.64629257", "0.6462893", "0.64540225", "0.6352633", "0.6288496", "0.62782377", "0.62748384", "0.62733674", "0.6267041", "0.62507015", "0.6235214", "0.6166721", "0.6162678", "0.6134564", "0.6134564", "0.611125", "0.6091232", "0.60526425", "0.60497385", "0.60330725", "0.6030467", "0.5988395", "0.5987305", "0.5963864", "0.59319097" ]
0.89283556
0
Prevent the player from doing anything by closing his play_lock.
def close_play_lock(self) : self.play_lock = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_play_lock(self) :\n self.play_lock = False", "def stop(self):\n self.set_state_null()\n self.player = None", "def stop(self):\n if self.player and self.player.is_playing():\n self.player.pause()\n super().stop()", "def _control_stop(self):\n self.player.stop()", "def nothing_playing(self):\n self.state.set_active_player(None)", "def unlock(self):\n self.mainloop().unlock()", "def stop(self):\n self.stopped = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def unload(self):\n self._unload_player()", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def _unload_player(self):\n if self.player is not None:\n Logger.info(\"VLCPlayer: Unloading player\")\n self.player.event_manager().event_detach(\n EventType.MediaPlayerEndReached)\n if self.player.is_playing():\n self.player.stop()\n self.player.release()\n SoundVLCPlayer.player = None", "def reject(self):\n\n # Send a completion message.\n if self.parent_gui is not None:\n self.signal_finished.emit()\n\n # Close the Window.\n self.player_thread.quit()\n self.close()", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "async def skip(self):\n await self.play()", "def stop(self):\n if logging.getLogger().getEffectiveLevel() != 10:\n try:\n self._player.terminate()\n except AttributeError as e: # Make things a bit more user friendly and allow a stop command even if not playing\n if str(e) == \"'Player' object has no attribute '_player'\":\n return\n else:\n raise AttributeError(str(e)) # Only catch the known error and raise any others to pass them through\n logging.debug(\"Stopping Playback\")", "def force_stop(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False\n self.stopped = True", "def unlock(self):\n\n self.wait = False", "def stopGame(event):\n if event.action == sense_hat.ACTION_RELEASED:\n global playAgain, alive\n playAgain = False\n alive = False", "def slot_stop(self):\n\n self.thread.working = False", "async def cancel_game(self) -> None:\r\n # Checks if the client is already authenticated\r\n if self.is_auth is True and self.is_waiting is True and self.is_in_game is False:\r\n packaged_leave_game_queue_document = self.pkg_doc_manager(\"[CANCEL GAME]\", self.user_data[0])\r\n self.send(packaged_leave_game_queue_document)", "def _close(self):\n # TODO\n self.holding = False", "def stop_music(self):\n self.load_music(None)", "def kill(self):\n if self.living == True:\n self.living = False\n self.arrow_enter_callback(self)", "def stop(self):\n if self.is_playing:\n self.is_playing = False\n self.tstamp_play = None\n self.thread.stop()", "def __stop_game(self) -> None:\n # delete objects\n self.pipes = []\n self.player = []\n \n # stop timer\n pygame.time.set_timer(PIPE_SPAWN, 0)", "def quit(self):\n self.running = False\n pygame.quit()", "def kill(self):\n # stuff\n pygame.sprite.Sprite.kill(self)", "def kill(self):\n # stuff\n pygame.sprite.Sprite.kill(self)", "def unload(self):\n spotify.Error.maybe_raise(\n lib.sp_session_player_unload(self._session._sp_session))", "def end_game(self):\n self.game.stop_running()", "def resign_game(self):\n if self._current_player == \"BLACK\":\n self._game_state = \"WHITE_WON\"\n\n else:\n self._game_state = \"BLACK_WON\"" ]
[ "0.80847925", "0.74121135", "0.704765", "0.6801468", "0.67960155", "0.6727953", "0.67137367", "0.6661506", "0.66500396", "0.662484", "0.6569142", "0.6555061", "0.651804", "0.6496678", "0.64666593", "0.6449531", "0.6446811", "0.6435666", "0.640964", "0.64022917", "0.6401895", "0.64012074", "0.639961", "0.63648754", "0.6361305", "0.6344288", "0.6344288", "0.6341939", "0.6324274", "0.63138735" ]
0.86099297
0
Play the combination of cards defined by the given indices. ======= =============================================================== indices list of int; the indices of the cards to play as they appear in self.cards ======= ===============================================================
def play(self, indices) : cards = [self.cards[i] for i in indices] combination = Combination(cards) return combination
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_card_by_index(self, index, **kwargs):\n target = kwargs.get('target', None)\n self.hand[index].play(target=target)", "def play_minion(num_cards_in_hand, card_idx):\n click_on_card(num_cards_in_hand, card_idx)\n mouseclick(510, 472)", "def play_card(self, index):\n if index < self._hand.size():\n card = self._hand.pop(index)\n self._active.push(card)\n self._money = self._money + card.money\n self._attack = self._attack + card.attack\n print '\\nCard played:\\n%s' % card\n else:\n print \"\\nInvalid index number! Please type a valid number!\"", "def play_card(self, player_index, card_index):\n card = self.hands[player_index][card_index]\n color_index = COLOR.index(card[0])\n if self.is_card_playable(card):\n # the color and the number match, add the card\n self.firework[color_index].append(card)\n # if we complete the firework for a color, we get an extra\n # blue stone\n if len(self.firework[color_index]) == 5:\n self.nb_blue_stone = min(self.nb_blue_stone + 1,\n MAX_BLUE_STONE)\n else:\n # error, the card cannot be played, remove a red_stone\n if self.nb_red_stone == 0:\n raise GameOverError(\"The card \" + card + \" cannot be\\\n played and there is no red stone anymore\")\n self.nb_red_stone = self.nb_red_stone - 1\n self.hands[player_index][card_index] = self.draw_card()\n return self.hands[player_index][card_index]", "def hit(\n self,\n card: List[Tuple[int, str, str]],\n card_index: int = 0\n ) -> None:\n self._cards[card_index].extend(card)", "def deal(list_of_Player, Multideck,dealer_index):\n for i in range(9):\n [player.hand.cards.append(Multideck.draw_top()) for player in list_of_Player]\n dealer_index = (dealer_index+1) % len(list_of_Player)\n return dealer_index", "def play(self):\n # log.debug(\"{0} is playing...\".format(self.label))\n legal_cards = []\n for c in self.hand:\n if self.is_legal_play(c):\n legal_cards.append(c)\n chosen_card_pos = random.randint(0, len(legal_cards)-1)\n # log.debug(str(legal_cards))\n chosen_card = legal_cards[chosen_card_pos]\n self.send_play(chosen_card)", "def play_card(self, card_index):\n if self.game.play_card(self.index, card_index) == NO_CARD:\n self.know[card_index] = NO_CARD\n else:\n self.know[card_index] = \"??\"", "def click_on_card(num_in_hand, card_idx):\n card_idx -= 1\n logger.debug(\"Clicking on hand card index {} with {} cards in hand\".\\\n format(card_idx, num_in_hand))\n coords = card_coords[num_in_hand]\n game_click(coords[card_idx])", "def next_play(self):\n\t\tfor card in self.hand:\n\t\t\tif is_valid(card):\n\t\t\t\tself.play_card(card)\n\t\t\t\treturn card\n\t\tglobal forced_rank\n\t\tif forced_rank == \"2\":\n\t\t\tglobal two_multiplier\n\t\t\tself.draw(two_multiplier)\n\t\t\tprint(f\"{self.name} draws {str(two_multiplier)} cards.\")\n\t\t\ttwo_multiplier = 0\n\t\t\tforced_rank = False\n\t\t\treturn None\n\t\tcard = self.draw(1)[0]\n\t\tprint(self.name + \" draws a card.\")\n\t\tif is_valid(card):\n\t\t\tself.play_card(card)\n\t\t\treturn card\n\t\tprint(self.name + \" passes the turn.\")", "def pair(handIn, indx = 0):", "def play(self):\r\n self.num_players = int(input(\"Welcome to card drawing game, Please enter number of players:\"))\r\n #contains all the draws from different players as list of draws per player.\r\n #with player number as key\r\n print(f\"Num players:{self.num_players}\")\r\n \r\n #initialize player points and draws\r\n self._initialize_player_stats()\r\n \r\n for y in range(DrawCardsGame.num_turns):\r\n for x in range(self.num_players):\r\n input(f\"Press enter for turn no {y+1} to draw for player {x+1}:\")\r\n card_drawn = self.cards.get_top_card()\r\n self.player_draws[f'{x}'].append(card_drawn)\r\n print(f\"card_drawn {card_drawn}\")\r\n self.player_points[f'{x}']+= DrawCardsGame.shades_points_dict[card_drawn[0]] * card_drawn[1]\r\n print(f\"player_points {self.player_points}\")\r\n \r\n print(repr(self.player_draws)) \r\n print(repr(self.player_points)) \r\n self.determine_winner(self.player_draws['0'],self.player_draws['1'])\r\n self.determine_winner1()", "def draw(self, canvas, yloc):\n \n for card in self.hand:\n card.draw(canvas, (xloc+(self.hand.index(card)*CARD_SIZE[0]), yloc))", "def print_card(self, index=0):\n print self._cards[index]", "def play(self):\n\n start_player = random.choice(self.names)\n turn_order = self.player_order(start=start_player)\n\n \"\"\"Play card from player hand when it is empty\"\"\"\n while self.hands[start_player].hand.cards:\n for name in turn_order:\n self.hands[name].player_card()\n print()", "def play_set(pl1, pl2, start):\n for plyr in pl1, pl2:\n print \"Cards of \", plyr.name, \" are :\"\n for this_card in plyr.hand:\n print this_card.num, this_card.suit \n \n pl1.score += 1", "def play_all(self):\n for _ in range(self._hand.size()):\n card = self._hand.pop()\n self._active.push(card)\n self._money = self._money + card.money\n self._attack = self._attack + card.attack\n print '\\nPlayed all cards!'", "def yatzy_card(players):\n return [[0 for x in range(0, 14)] for x in range(players)]", "def play(self, index):\n if index < 0 or index >= 9:\n raise IndexError(\"Invalid board index\")\n\n if self.board[index] != ' ':\n raise ValueError(\"Square already played\")\n\n # One downside of storing the board state as a string\n # is that you can't mutate it in place.\n board = list(self.board)\n board[index] = self.next_player\n self.board = u''.join(board)", "def quick_play(self, index=0):\n self.play(self.download(self.results[index]))", "def draw_a_card(cards):\n import random\n card_drawn = random.choices(card_deck)\n cards.append(card_drawn[0])\n return", "def draw(self, canvas, pos):\r\n # Draw every card in Hand\r\n for card in self.hand:\r\n card.draw(canvas, pos)\r\n # Hide the dealer's first card until the round ends\r\n if (self.tag.upper() == \"DEALER\" and\r\n self.hand.index(card) == 0 and in_play):\r\n canvas.draw_image(\r\n card_back,\r\n CARD_BACK_CENTER, CARD_BACK_SIZE,\r\n pos, CARD_SIZE)\r\n # Update the cards' coordinates to stack them appropriately\r\n pos[0] -= 15 * self.side", "def play_cards(player, boards_before, boards_after):\n\n new_species_index = len(player.boards)\n sorted_card_indices = sort_indices(player.cards)\n\n actions = {}\n actions.update({\n AddToWateringHole: AddToWateringHole(sorted_card_indices[0]),\n AddSpecies: [\n AddSpecies(sorted_card_indices[1], [sorted_card_indices[2]])\n ],\n AddPopulation: [\n AddPopulation(new_species_index, sorted_card_indices[3])\n ]\n })\n if len(player.cards) > 4:\n actions.update({\n AddBody: [\n AddBody(new_species_index, sorted_card_indices[4])\n ]\n })\n if len(player.cards) > 5:\n actions.update({\n ReplaceTrait: [\n ReplaceTrait(new_species_index, 0, sorted_card_indices[5])\n ]\n })\n\n return actions", "def deal_cards(self, players):\n hand_num = (len(self.deck)) // len(players)\n for index, player in enumerate(players):\n current_hand = self.deck[index * hand_num:(index + 1) * hand_num]\n current_hand.sort(key=functools.cmp_to_key(self.round.sort_card))\n player.set_current_hand(current_hand)\n player.initial_hand = cards2str(player.current_hand)", "def move_cards(self, hand, num):\n for i in range(num):\n hand.add_card(self.pop_card())", "def hit(self, hand_index: int) -> None:\n\n self._hands[hand_index].deal_card()", "def deal_opening_cards(self) -> None:\r\n for i in range(self.num_of_players):\r\n self.dealer.deal_cards_to(self.players[i].cards_stack, PokerRules.CARDS_PER_PLAYER)", "def print_collection(self, indexes=False):\n if indexes:\n for i in range(self.size()):\n print \"[%s] %s\" % (i, self._cards[i])\n else:\n for i in range(self.size()):\n print self._cards[i]", "def __init__(self, numberOfPairs):\n self._cards = []\n self._suitList=['s','h','d','c']\n for rank in range(1, numberOfPairs + 1): #start by 1 (not 0) as first rank\n suit=random.choice(self._suitList)\n c1 = Card(rank,suit)\n self._cards.append(c1)\n c2 = Card(rank,suit)\n self._cards.append(c2)", "def startGame(d_hand, p_hand, deck1):\n NUM_CARDS = 2\n\n for i in range(NUM_CARDS):\n d_hand.getCard(deck1.drawCard())\n p_hand.getCard(deck1.drawCard())" ]
[ "0.69412464", "0.6330761", "0.6193321", "0.5735996", "0.57159185", "0.57118446", "0.5599808", "0.55448586", "0.553087", "0.55256015", "0.5468829", "0.5448354", "0.54388344", "0.54294634", "0.540727", "0.54032826", "0.5387532", "0.5382046", "0.53635436", "0.53505224", "0.53424853", "0.53364646", "0.5317767", "0.5297387", "0.5287526", "0.52774477", "0.52720577", "0.52585673", "0.5243393", "0.52260673" ]
0.87667453
0
Initializes DINT model with the specified feature configuration and resampling strategy. The model gets created at schema_matcher server.
def __init__(self, schema_matcher, feature_config, resampling_strategy, description): logging.info("Initializing DINT model.") if not(type(schema_matcher) is SchemaMatcher): logging.error("DINTModel init: SchemaMatcher instance required.") raise InternalError("DINTModel init", "SchemaMatcher instance required") super().__init__("DINTModel", description=description) self.server = schema_matcher self.feature_config = feature_config self.resampling_strategy = resampling_strategy self.classifier = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def __init__(self, config):\n self.model = None\n self.config = config\n self.batch_size = config.get('batch_size')\n self.epochs = config.get('epochs')\n self.steps_per_epoch = config.get('steps_per_epoch')\n self.validation_steps = config.get('validation_steps')\n self.distributed = config.get('distributed', False)\n \n # init model\n self.init()", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def init(self, start_sample, fhat, budget):\n self.proposed_points = start_sample\n self.fhat = fhat\n self.n0 = start_sample.shape[0]\n for i in range(self.nstrats):\n self.sampling_strategies[i].init(self.proposed_points, fhat, budget)", "def initialize_model(config, d_out, is_featurizer=False):\n # If load_featurizer_only is True,\n # then split into (featurizer, classifier) for the purposes of loading only the featurizer,\n # before recombining them at the end\n featurize = is_featurizer or config.load_featurizer_only\n\n if config.model in ('resnet18', 'resnet34', 'resnet50', 'resnet101', 'wideresnet50', 'densenet121'):\n if featurize:\n featurizer = initialize_torchvision_model(\n name=config.model,\n d_out=None,\n **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = initialize_torchvision_model(\n name=config.model,\n d_out=d_out,\n **config.model_kwargs)\n\n elif 'bert' in config.model:\n if featurize:\n featurizer = initialize_bert_based_model(config, d_out, featurize)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = initialize_bert_based_model(config, d_out)\n\n elif config.model == 'resnet18_ms': # multispectral resnet 18\n from models.resnet_multispectral import ResNet18\n if featurize:\n featurizer = ResNet18(num_classes=None, **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = ResNet18(num_classes=d_out, **config.model_kwargs)\n\n elif config.model == 'gin-virtual':\n from models.gnn import GINVirtual\n if featurize:\n featurizer = GINVirtual(num_tasks=None, **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = GINVirtual(num_tasks=d_out, **config.model_kwargs)\n\n elif config.model == 'code-gpt-py':\n from models.code_gpt import GPT2LMHeadLogit, GPT2FeaturizerLMHeadLogit\n from transformers import GPT2Tokenizer\n name = 'microsoft/CodeGPT-small-py'\n tokenizer = GPT2Tokenizer.from_pretrained(name)\n if featurize:\n model = GPT2FeaturizerLMHeadLogit.from_pretrained(name)\n model.resize_token_embeddings(len(tokenizer))\n featurizer = model.transformer\n classifier = model.lm_head\n model = (featurizer, classifier)\n else:\n model = GPT2LMHeadLogit.from_pretrained(name)\n model.resize_token_embeddings(len(tokenizer))\n\n elif config.model == 'logistic_regression':\n assert not featurize, \"Featurizer not supported for logistic regression\"\n model = nn.Linear(in_features=config.in_features, out_features=d_out, **config.model_kwargs)\n elif config.model == 'unet-seq':\n from models.CNN_genome import UNet\n if featurize:\n featurizer = UNet(num_tasks=None, **config.model_kwargs)\n classifier = nn.Linear(featurizer.d_out, d_out)\n model = (featurizer, classifier)\n else:\n model = UNet(num_tasks=d_out, **config.model_kwargs)\n\n elif config.model == 'fasterrcnn':\n if featurize:\n raise NotImplementedError('Featurizer not implemented for detection yet')\n else:\n model = initialize_fasterrcnn_model(config, d_out)\n model.needs_y = True\n\n else:\n raise ValueError(f'Model: {config.model} not recognized.')\n\n # Load pretrained weights from disk using our utils.load function\n if config.pretrained_model_path is not None:\n if config.model in ('code-gpt-py', 'logistic_regression', 'unet-seq'):\n # This has only been tested on some models (mostly vision), so run this code iff we're sure it works\n raise NotImplementedError(f\"Model loading not yet tested for {config.model}.\")\n\n if 'bert' not in config.model: # We've already loaded pretrained weights for bert-based models using the transformers library\n try:\n if featurize:\n if config.load_featurizer_only:\n model_to_load = model[0]\n else:\n model_to_load = nn.Sequential(*model)\n else:\n model_to_load = model\n\n prev_epoch, best_val_metric = load(\n model_to_load,\n config.pretrained_model_path,\n device=config.device)\n\n print(\n (f'Initialized model with pretrained weights from {config.pretrained_model_path} ')\n + (f'previously trained for {prev_epoch} epochs ' if prev_epoch else '')\n + (f'with previous val metric {best_val_metric} ' if best_val_metric else '')\n )\n except Exception as e:\n print('Something went wrong loading the pretrained model:')\n traceback.print_exc()\n raise\n\n # Recombine model if we originally split it up just for loading\n if featurize and not is_featurizer:\n model = nn.Sequential(*model)\n\n # The `needs_y` attribute specifies whether the model's forward function\n # needs to take in both (x, y).\n # If False, Algorithm.process_batch will call model(x).\n # If True, Algorithm.process_batch() will call model(x, y) during training,\n # and model(x, None) during eval.\n if not hasattr(model, 'needs_y'):\n # Sometimes model is a tuple of (featurizer, classifier)\n if is_featurizer:\n for submodel in model:\n submodel.needs_y = False\n else:\n model.needs_y = False\n\n return model", "def _setup(self, config: dict):\n\n #This is important! When this is declared outside, the tune can give errors.\n import tensorflow as tf\n\n ## CONFIG\n batch_size = 30\n\n self.train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)\n self.val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(batch_size)\n\n self.model = Model1DCNN(num_classes=num_classes, dilations=config[\"num_dilations\"], filter_size=config[\"filter_size\"])\n self.optimizer = tf.keras.optimizers.Adam(lr=config[\"lr\"])\n self.train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n self.val_loss = tf.keras.metrics.Mean(name=\"val_loss\")\n self.train_acc = tf.keras.metrics.SparseCategoricalAccuracy()\n self.val_acc = tf.keras.metrics.SparseCategoricalAccuracy()\n self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)\n\n @tf.function\n def train_step(x, y):\n \"\"\"\n does a single training step with the provided batch and updates the weights corresponding to the\n loss defined in the self.loss_object function\n\n :param batch:\n \"\"\"\n with tf.GradientTape() as tape:\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n self.train_loss(loss)\n self.train_acc(y, predictions)\n\n @tf.function\n def val_step(x, y):\n \"\"\"\n does a single validation step with the provided batch\n\n :param batch:\n \"\"\"\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n\n self.val_loss(loss)\n self.val_acc(y, predictions)\n\n self.tf_train_step = train_step\n self.tf_val_step = val_step", "def __init__(self, config):\n super(TFSlimFeaturizer, self).__init__()\n self.config = config\n self.validate(self.config)\n self._classifier = None", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))", "def __init__(self):\n Sampler.__init__(self)\n self._registeredIdentifiers = set() # tracks job identifiers used for this adaptive sampler and its inheritors\n self._prefixToIdentifiers = {} # tracks the mapping of run prefixes to particular identifiers\n self._inputIdentifiers = {} # identifiers for a single realization\n self._targetEvaluation = None # data object with feedback from sample realizations\n self._solutionExport = None # data object for solution printing\n self._requireSolnExport = False # if this object requires a solution export\n # NOTE TargetEvaluations consider all the Step <Output> DataObjects as candidates, so requiring\n # exactly one TargetEvaluation forces only having one <Output> DataObject in AdaptiveSampling\n # MultiRun Steps. For now, we leave it as \"n\".\n self.addAssemblerObject('TargetEvaluation', InputData.Quantity.one_to_infinity) # Place where realization evaluations go", "def initialise_sampler(self):\n raise NotImplementedError", "def __init__(self, config, xtdim, batch_size):\n self.float_type = 'float32' # This should be the default\n self.config = config\n self.dt = self.config['dt']\n\n self.n_input = self.config['n_input']\n self.n_output = self.config['n_output']\n\n self.batch_size = batch_size\n self.xtdim = xtdim\n\n # time major\n self.x = np.zeros((xtdim, batch_size, self.n_input), dtype=self.float_type)\n self.y = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n self.cost_mask = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n # strength of input noise\n self._sigma_x = config['sigma_x'] * math.sqrt(2./self.config['alpha'])\n\n if config['rule_name'] == 'timed_spatial_reproduction_broad_tuning' \\\n or config['rule_name'] == 'spatial_reproduction_broad_tuning' \\\n or config['rule_name'] == 'spatial_comparison_broad_tuning' \\\n or config['rule_name'] == 'spatial_change_detection_broad_tuning':\n self.n_guassianline = 32 + 12\n self.sd_gaussianline = 4.\n else:\n self.n_guassianline = 32\n self.sd_gaussianline = 2.\n\n self.pref_line_gaussian = np.arange(0, self.n_guassianline)", "def init_model(self):\n pass", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def __init__(self, config, xtdim, batch_size):\n self.float_type = 'float32' # This should be the default\n self.config = config\n self.dt = self.config['dt']\n\n self.n_input = self.config['n_input']\n self.n_output = self.config['n_output']\n\n self.batch_size = batch_size\n self.xtdim = xtdim\n\n # time major\n self.x = np.zeros((xtdim, batch_size, self.n_input), dtype=self.float_type)\n self.y = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n self.cost_mask = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n # strength of input noise\n self._sigma_x = config['sigma_x'] * math.sqrt(2./self.config['alpha'])\n\n if config['rule_name'] == 'timed_spatial_reproduction_broad_tuning' \\\n or config['rule_name'] == 'spatial_reproduction_broad_tuning':\n self.n_guassianline = 32 + 12\n self.sd_gaussianline = 4.\n else:\n self.n_guassianline = 32\n self.sd_gaussianline = 2.\n\n self.pref_line_gaussian = np.arange(0, self.n_guassianline)", "def __init__(self, env, sampling_function=None, instances=None, reset_interval=0):\n super(InstanceSamplingWrapper, self).__init__(env)\n if sampling_function:\n self.sampling_function = sampling_function\n elif instances:\n self.sampling_function = self.fit_dist(instances)\n else:\n raise Exception(\"No distribution to sample from given\")\n self.reset_interval = reset_interval\n self.reset_tracker = 0", "def __init__(self, method=\"RandomForest\", n_random_feature_ratio=5, problem_type=\"infer\", rows_to_scan=\"all\"):\n self.feature_importances_ = None\n self.method = method\n self.problem_type = problem_type \n self.rows_to_scan = rows_to_scan \n self.n_random_feature_ratio = n_random_feature_ratio", "def __init__(\n self,\n dataset_root: str = \"./dataset\",\n intersection_file: str = None,\n lr: float = 1e-2,\n model: dict = None,\n analytic_sender_id: str = \"analytic_sender\",\n fp16: bool = True,\n val_freq: int = 1000,\n ):\n super().__init__()\n self.dataset_root = dataset_root\n self.intersection_file = intersection_file\n self.lr = lr\n self.model = model\n self.analytic_sender_id = analytic_sender_id\n self.fp16 = fp16\n self.val_freq = val_freq\n\n self.target_names = None\n self.app_root = None\n self.current_round = None\n self.num_rounds = None\n self.batch_size = None\n self.writer = None\n self.client_name = None\n self.other_client = None\n self.device = None\n self.optimizer = None\n self.criterion = None\n self.transform_train = None\n self.transform_valid = None\n self.train_dataset = None\n self.valid_dataset = None\n self.split_id = None\n self.train_activations = None\n self.train_batch_indices = None\n self.train_size = 0\n self.val_loss = []\n self.val_labels = []\n self.val_pred_labels = []\n self.compute_stats_pool = None\n\n # use FOBS serializing/deserializing PyTorch tensors\n fobs.register(TensorDecomposer)", "def init(param):\n MODULE_HELPER.check_parameter(param, key='featureCount_exec', dtype=str)\n MODULE_HELPER.check_parameter(param, key='featureCount_t', dtype=str)\n MODULE_HELPER.check_parameter(param, key='featureCount_id', dtype=str)\n MODULE_HELPER.check_parameter(param, key='featureCount_by_meta', dtype=bool)\n MODULE_HELPER.check_parameter(param, key='Rscript_exec', dtype=str)\n\n #deriving the stranded parameter\n if param['stranded'] == 'reverse':\n param['featureCount_s'] = '2'\n elif param['stranded'] == 'yes':\n param['featureCount_s'] = '1'\n else:\n param['featureCount_s'] = '0'", "def __init__(self, config_dictionary): #completed\n self.mode = 'test'\n super(RNNLM_Tester,self).__init__(config_dictionary)\n self.check_keys(config_dictionary)\n \n self.weight_matrix_name = self.default_variable_define(config_dictionary, 'weight_matrix_name', arg_type='string')\n self.model.open_weights(self.weight_matrix_name)\n self.label_file_name = self.default_variable_define(config_dictionary, 'label_file_name', arg_type='string',error_string=\"No label_file_name defined, just running forward pass\",exit_if_no_default=False)\n if self.label_file_name != None:\n self.labels = self.read_label_file()\n# self.labels, self.labels_sent_id = self.read_label_file()\n self.check_labels()\n else:\n del self.label_file_name\n self.dump_config_vals()\n self.classify()\n self.write_posterior_prob_file()", "def __init__(self):\n super().__init__()\n self.indices_dir = ''\n self.split_file = ''\n\n self.model = '' # string identifying the model\n self.experiment = '' # string to describe experiment\n self.maps = [data.ID_MAP_T1H2O, data.ID_MAP_FF, data.ID_MAP_B1] # the used maps\n self.patch_size = [1, 32, 32]\n\n # training configuration\n self.loss = 'mse' # string identifying the loss function (huber, mse or mae)\n self.learning_rate = 0.01 # the learning rate\n self.dropout_p = 0.2\n self.norm = 'bn' # none, bn\n\n # we use the mean absolute error as best model score\n self.best_model_score_is_positive = True\n self.best_model_score_name = 'mae'", "def _init_feedback(self):\n self._feedback_layer = DNN(\n num_units=self._params['generator']['feedback'],\n activation=tf.nn.sigmoid,\n kernel_initializer=tf.contrib.layers.xavier_initializer()\n )", "def __init__(self, config=None, class_min=0):\n self.config = self._resolve_config(config)\n self.class_min = self._resolve_class_min(class_min)\n self.model = LogReg(**self.config)\n self.scaler = StandardScaler()", "def initialize_model(self):\n pass", "def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def __init__(self, in_features, out_features):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n self.in_features = in_features\n self.out_features = out_features\n\n self.__MEAN = 0\n self.__STD = 0.0001\n\n self.params = {\n 'weight': np.random.normal(loc=self.__MEAN, scale=self.__STD, size=(out_features, in_features)), \n 'bias': np.zeros(out_features),\n }\n self.grads = {\n 'weight': None, \n 'bias': None,\n }\n\n self.input_cache = None\n ########################\n # END OF YOUR CODE #\n #######################", "def _init_feature_processer(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.FEATURE_ENGINEERING_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._feature_processor = data_processor.DataProcessor(conf=conf,log_path = self.xeasy_log_path)\n if self._feature_processor.init() == runstatus.RunStatus.SUCC:\n return True\n else:\n return False\n except Exception as err:\n self.managerlogger.logger.error(\"init model error: %s\" % err)\n self.errorlogger.logger.error(\"init model error:\\n %s\" % traceback.format_exc())\n return False" ]
[ "0.6067533", "0.59074247", "0.5883574", "0.57862705", "0.5761598", "0.57383394", "0.5631568", "0.5629441", "0.55597234", "0.5543431", "0.5540538", "0.5529641", "0.5491851", "0.5476369", "0.54582095", "0.54497975", "0.54382914", "0.5434254", "0.5421252", "0.54178", "0.5411861", "0.54108876", "0.5380848", "0.5374989", "0.5356535", "0.53509843", "0.5345652", "0.5340336", "0.5332955", "0.5329797" ]
0.8336873
0
Train DINTModel and return True.
def train(self): logging.info("Training DINTModel.") start = time.time() tr = self.classifier.train() return time.time() - start
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train():\n pass", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n converged = False\n while not converged:\n failures = 0\n for item, classification in dataset.iterate_once(1):\n prediction = self.get_prediction(item)\n if prediction != nn.as_scalar(classification):\n failures += 1\n self.w.update(item, nn.as_scalar(classification))\n if failures == 0:\n converged = True", "def trainNet():", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def trainModel( self, featureTrain, classTrain):", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def train(self):\n self.training = True", "def train_digits(self):\n try:\n # TODO: Make decision taking validation into account validation\n metrics_result = self.model.train()\n logging.info(\"model performance is {}\".format(metrics_result))\n return metrics_result is not None\n # TODO: Apply specific exceptions and log,\n except:\n logging.error(\"Prediction Error:\", sys.exc_info()[0])\n raise ValueError()", "def train(self, train_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n pass\n self.model = torch.load(model_path)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n \n # capture best model\n best_val_psnr = -1\n best_model_state = self.model.state_dict()\n\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _ = self._check_PSNR(train_dataset)\n self.hist_train_psnr.append(train_psnr)\n \n\n \n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train():\n \n ## check for request data\n if not request.json:\n print(\"ERROR: API (train): did not receive request data\")\n return jsonify(False)\n\n ## set the test flag\n test = False\n if 'mode' in request.json and request.json['mode'] == 'test':\n test = True\n\n print(\"... training model\")\n model = model_train(test=test)\n print(\"... training complete\")\n\n return(jsonify(True))", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def train(self, dataset):\n if not self.model:\n self._build()\n\n samples_per_batch = self.dataset.number_of_examples_train() // self.config.batch_size\n\n # Train over multiple epochs\n with tf.Session() as sess:\n best_loss = float('inf')\n best_val_epoch = 0\n sess.run(self.init)\n\n # train until we reach the maximum number of epochs\n for epoch in range(self.config.max_epochs):\n total_training_loss = 0\n num_correct = 0\n prev_prediction = 0\n\n print(\" \")\n print('Epoch {}'.format(epoch))\n# start = time.time()\n\n for i in range(samples_per_batch):\n tr_elems, answers, i_seq_len, q_seq_len, imask = self.dataset.next_batch(self.config.batch_size)\n tr_elems, answers, imask = self.preprocess_batch(tr_elems[0], tr_elems[1], answers, imask)\n ans = np.zeros((self.config.batch_size, self.dataset.vocab_size))\n for i in np.arange(self.config.batch_size):\n ans[i][answers[i]] = 1.\n # ans[np.arange(self.config.batch_size), answers] = 1.0\n print(\"ans\", ans)\n print(\"answers\", answers)\n print(\"ans shape\", ans.shape)\n\n # For debugging:\n # Input module: _input_tensor - self.input_only_for_testing\n # Question module: _question_representation - self.question_representation\n # Episode module: _e_i - self.e_i / _e_m_s - self.episodic_memory_state\n loss, _, pred_prob, _projections = sess.run(\n [self.cost, self.optimizer, self.prediction, self.projections],\n feed_dict={self.input_placeholder: tr_elems[0],\n self.input_length_placeholder: i_seq_len,\n self.end_of_sentences_placeholder: imask,\n self.question_placeholder: tr_elems[1],\n self.question_length_placeholder: q_seq_len,\n self.labels_placeholder: ans,\n #self.gate_placeholder: [float(self.train_gate[i])]\n })\n\n total_training_loss += loss\n\n if np.argmax(pred_prob) == np.argmax(ans):\n num_correct += 1\n\n if i % self.config.update_length == 0:\n print \"Current average training loss: {}\".format(total_training_loss / (i + 1))\n print \"Current training accuracy: {}\".format(float(num_correct) / (i + 1))\n print(\"Ans: \" + str(self.dataset.ivocab[np.argmax(ans)]))\n print(\"Pred: \" + str(self.dataset.ivocab[np.argmax(pred_prob)]))", "def train(self, training_data):\n pass", "def train(self, batch):\n pass", "def train(self):\n raise NotImplementedError", "def train(self, trainData):\n pass", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def train(args: Dict):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print('use device: %s' % device)\n\n train_data_src = read_corpus(args['--train-src'], source='src')\n train_data_tgt = read_corpus(args['--train-tgt'], source='tgt')\n\n dev_data_src = read_corpus(args['--dev-src'], source='src')\n dev_data_tgt = read_corpus(args['--dev-tgt'], source='tgt')\n\n train_data = list(zip(train_data_src, train_data_tgt))\n dev_data = list(zip(dev_data_src, dev_data_tgt))\n\n train_batch_size = int(args['--batch-size'])\n N = int(args['--N'])\n d_model = int(args['--d_model'])\n d_ff = int(args['--d_ff'])\n h = int(args['--h'])\n dropout = float(args['--dropout'])\n\n valid_niter = int(args['--valid-niter'])\n log_every = int(args['--log-every'])\n model_save_path = args['--save-to']\n lr=float(args['--lr'])\n\n vocab = Vocab.load(args['--vocab'])\n vocab_mask = torch.ones(len(vocab.tgt))\n vocab_mask[vocab.tgt['<pad>']] = 0\n\n model = make_model(len(vocab.src), len(vocab.tgt), N, d_model, d_ff, h, dropout)\n model = model.to(device)\n\n optimizer = NoamOpt(model.src_embed[0].d_model, 1, 400,\n torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.98), eps=1e-9))\n\n num_trial = 0\n train_iter = patience = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0\n cum_exmaples = report_examples = epoch = valid_num = 0\n hist_valid_scores = []\n train_time = begin_time = time.time()\n print('begin Maximum Likelihood Training')\n\n while True:\n epoch += 1\n for src_sents, tgt_sents in batch_iter(train_data, batch_size=train_batch_size, shuffle=True):\n train_iter += 1\n optimizer.zero_grad()\n batch_size = len(src_sents)\n\n example_losses = - model(src_sents, tgt_sents) #(batch_size,)\n batch_loss = example_losses.sum()", "def train(args, model, train_data_loader, dev_data_loader, device):\n\n\tmodel.train()\n\toptimizer = torch.optim.Adam(model.parameters())\n\tprint_loss_total = 0\n\tepoch_loss_total = 0\n\tstart = time.time()\n\n\t#### modify the following code to complete the training funtion\n\n\tbest_train_acc, best_dev_acc = 0.0, 0.0\n\n\tfor idx, batch in enumerate(train_data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t#### Your code here ----\n\n\t\t# zero out\n\t\toptimizer.zero_grad()\n\n\t\t# get output from model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# use loss_fn defined above to calculate loss\n\t\tloss = loss_fn(logits, labels)\n\n\t\t# use accuracy_fn defined above to calculate 'error' and number of examples ('num_examples') used to\n\t\t# calculate accuracy below.\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# backprop\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\t###Your code ends ---\n\t\taccuracy = 1 - error / num_examples\n\t\tclip_grad_norm_(model.parameters(), 5)\n\t\tprint_loss_total += loss.data.numpy()\n\t\tepoch_loss_total += loss.data.numpy()\n\n\t\tif (idx + 1) % args.checkpoint == 0 and idx > 0:\n\t\t\tprint_loss_avg = print_loss_total / args.checkpoint\n\n\t\t\tdev_acc = evaluate(dev_data_loader, model, device)\n\n\t\t\tprint('number of steps: %d, train loss: %.5f, train acc: %.3f, dev acc: %.3f, time: %.5f'\n\t\t\t % (idx + 1, print_loss_avg, accuracy, dev_acc, time.time() - start))\n\t\t\tprint_loss_total = 0\n\t\t\tif accuracy > best_train_acc:\n\t\t\t\tbest_train_acc = accuracy\n\t\t\tif dev_acc > best_dev_acc:\n\t\t\t\tbest_dev_acc = dev_acc\n\n\treturn best_train_acc, best_dev_acc", "def train(self):\n\t\traise NotImplementedError" ]
[ "0.6943487", "0.69396836", "0.6853591", "0.6760111", "0.6710526", "0.6703681", "0.669696", "0.6665524", "0.6639153", "0.6626513", "0.66127175", "0.658711", "0.6572034", "0.6572034", "0.6572034", "0.6572034", "0.6572034", "0.6546802", "0.65428543", "0.65414816", "0.6514876", "0.6511346", "0.6490602", "0.6485377", "0.6464131", "0.646232", "0.64466935", "0.64183605", "0.6416534", "0.6415858" ]
0.7758692
0
Prediction with DINTModel for the source.
def predict(self, source): # TODO: track run time logging.info("Predicting with DINTModel for source {}".format(source)) if source not in self.allowed_sources: logging.warning("Source '{}' not in allowed_sources. Skipping it.".format(source)) return None # upload source to the schema matcher server matcher_dataset = self.server.create_dataset(file_path=os.path.join("data", "sources", source + ".csv"), description="testdata", type_map={}) start = time.time() predict_df = self.classifier.predict(matcher_dataset).copy() predict_df["running_time"] = time.time() - start column_map = dict([(col.id, col.name) for col in matcher_dataset.columns]) predict_df["column_name"] = predict_df["column_id"].apply(lambda x: column_map[x]) predict_df["source_name"] = source predict_df["model"] = self.model_type predict_df["model_description"] = self.description label_dict = self._construct_labelData(matcher_dataset, filepath=os.path.join("data", "labels", source + ".columnmap.txt"), header_column="column_name", header_label="semantic_type") predict_df["user_label"] = predict_df["column_id"].apply( lambda x: label_dict[x] if x in label_dict else 'unknown') return predict_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, model, x_test):\n pass", "def predict(self, src): # real signature unknown; restored from __doc__\n pass", "def predict_step(self, x):\n\n input_x = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n predictions_op = self.session.graph.get_operation_by_name(\"output/predictions\").outputs[0] \n\n d_ = {\n input_x: x\n }\n\n self.init_dataset(d_)\n\n return self.session.run([predictions_op])", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, predPoints=None):", "def predict(self, model, context, data):\n pass", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def predict(self, x, **kwargs):\n return self.tuner.get_best_models(1)[0].predict(x, **kwargs)", "def predict(self, X, pred_batch_size=None):", "def predict(self, X):", "def predict(self, X):", "def predict(self, obs):\n pass", "def predict(self, ex):\r\n # Eval mode\r\n self.network.eval()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n loss = self.criterion(score, label)\r\n probs = f.softmax(score, 1).data.cpu().numpy().tolist()\r\n predictions = np.argmax(score.data.cpu().numpy(), axis=1).tolist()\r\n\r\n return {\r\n 'loss': loss,\r\n 'probs': probs,\r\n 'predictions': predictions,\r\n }", "def predict(self):\n raise NotImplementedError", "def predict(self, src):\n\n src = torch.as_tensor(src).float()\n\n self.eval()\n\n return self.forward(src)", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, data_in):\n pass", "def _predict(self, testX):\n pass", "def predict(self,x):\n return self._model.predict(x)", "def predict(self): \n return self.model.predict(self.test_x)", "def predict(self, X):\n raise NotImplementedError", "def predict_only(self):", "def prediction(self, x):\n t = self.model.predict(x)\n return t", "def predict(self, x):\n raise NotImplementedError(\"Please Implement this method\")", "def predict(self, obs):\n return self.model(obs)", "def predict(self, x):\n return self.model.predict(x)" ]
[ "0.6938811", "0.69291127", "0.68213713", "0.6790836", "0.6790836", "0.6790836", "0.67622983", "0.67259806", "0.67074686", "0.6683066", "0.66768557", "0.6623888", "0.6623888", "0.6618554", "0.6615593", "0.65755063", "0.6575005", "0.65682495", "0.65682495", "0.65682495", "0.6566281", "0.6547888", "0.65158045", "0.6494013", "0.6482079", "0.6481117", "0.6469458", "0.6466136", "0.64588773", "0.64439166" ]
0.73403883
0
Read columns from source, and return them as a list of Column objects (as defined in neural_nets.museum_data_reader)
def _read(self, source, label_source=None): filename = os.path.join("data", "sources", source+".csv") if label_source is None: label_filename = os.path.join("data", "labels", source + ".columnmap.txt") else: label_filename = os.path.join("data", "labels", label_source) df = pd.read_csv(filename, dtype=str) # read the data source as a DataFrame # labels = pd.read_csv(label_filename) # read the semantic labels of columns in df labels_frame = pd.read_csv(label_filename, na_values=[""], dtype={'column_name': 'str'}) # dictionary (column_name, class_label) labels = labels_frame[['column_name', 'semantic_type']].dropna().set_index('column_name')['semantic_type'].to_dict() # logging.info("labels:{}".format(labels)) source_cols = [] for c in df.columns: # label = str(labels.get_value(labels[labels['column_name'] == c].index[0], 'semantic_type')) # extract semantic label of column c if c in labels: label = labels[c] else: label = 'unknown' col = Column(filename=filename, colname=c, title=label, lines=list(df[c])) source_cols.append(col) return source_cols
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_columns(source):\n return _get_tuple(source)", "def _get_column_parts(source):\n return _get_tuple(source, delimiter=' ')", "def load_columns(self):\n pass", "def columns(self) -> java.util.Collection:\n ...", "def list_column(self,\n column_name: str,\n start: int = None,\n end: int = None) -> List:\n return [getattr(i, column_name) for i in self.data[start:end]]", "def _get_columns(self):\n columns = []\n for column in self.plate_meta['columns']:\n columns.append(column['name'])\n self.columns = columns", "def readindata(chosen_columns, chosen_url):\n return pd.read_csv(chosen_url, usecols=chosen_columns)", "def columns(self):\n return self.__column_list", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(\n row[\"date_readable\"], \"%d %b %Y\"\n ),\n \"trans_type\": row[\"type\"],\n \"amount\": int(row[\"euro\"]) + int(row[\"cents\"]) / 100,\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"timestamp\"], \"%b %d %Y\"),\n \"trans_type\": row[\"type\"],\n \"amount\": row[\"amount\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns", "def columns(self):\n recordset = win32com.client.Dispatch('ADODB.Recordset')\n recordset.Open(\n unicode('SELECT * FROM [%s]' % self.name), self.document.connection,\n 0, 1)\n try:\n return [self.encoding(field.Name) for field in recordset.Fields]\n finally:\n recordset.Close()\n del recordset", "def load_columns(self, csv_data):\n column_date = []\n column_time = []\n column_hold = []\n column_outcome = []\n for row in dataframe_to_rows(csv_data, index=False):\n cell_date = row[18]\n cell_date = cell_date.split(': ')[1]\n cell_time = row[23]\n cell_hold = row[24]\n cell_outcome = row[25]\n column_date.append(cell_date)\n column_time.append(cell_time)\n column_hold.append(cell_hold)\n column_outcome.append(cell_outcome)\n return column_date, column_time, column_hold, column_outcome", "def read_fields_sources_list():\n from pandas import read_csv\n try:\n sources = read_csv(FIELDS_SOURCES_LIST)\n names = read_csv(SOURCES_NAMES_LIST)\n except FileNotFoundError:\n create_fields_sources_list()\n sources = read_csv(FIELDS_SOURCES_LIST)\n names = read_csv(SOURCES_NAMES_LIST)\n return sources, names", "def cols(self) -> List[str]:\n if self._cols:\n cols = self._cols\n else:\n if os.path.isfile(self.path):\n cols = io.list_columns_in_parquet(self.path)\n else:\n cols = db.list_cols(fqtable=self.fqtable)\n self._cols = cols\n return cols", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"date\"], \"%d-%m-%Y\"),\n \"trans_type\": row[\"transaction\"],\n \"amount\": row[\"amounts\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def columns(self):\n return list(self._scala.observationColumns())", "def read_col(self, y):\n colrange = self._col_range(y)\n col = []\n\n for i in colrange:\n col.append(self._read_bit(i))\n\n return col", "def columns(self):\n headers = self.headers()\n for h in headers:\n col = self.get_column(h)\n yield col", "def columns(self) -> List[List]:\n return list(map(list, zip(*self.rows)))", "def all_columns(self):\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = csv.DictReader(csv_file)\r\n _all_columns = csv_rows.fieldnames\r\n csv_file.close()\r\n return _all_columns\r\n except:\r\n return []", "def load_n_col(file):\n df = pd.read_csv(file, delimiter=\" \", header=None)\n columns = [list(df[col]) for col in df]\n return columns", "def get_columns(names: list) -> list:\n\n csv = read_csv(Path(DATA_DIR, \"high_diamond_ranked_10min.csv\"))\n return [csv[i] for i in names]", "def get_specific_col_data( self, columns):\n headers = []\n for i in range(len(columns)):\n headers.append(self.header2col[columns[i]])\n return self.data[:,headers]", "def readData(fname):\n pd = pandas.read_csv(fname)\n return [numpy.array(pd[colname]) for colname in pd.columns[1:]]", "def _read_feather_columns(path, columns, num_splits): # pragma: no cover\n from pyarrow import feather\n\n df = feather.read_feather(path, columns=columns)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index)]", "def getSampleColumnList( platformTarget):\n print(f'columnIndexDict: {columnIndexDict}')\n columnIndex = getColumnIndex(platformTarget, 'Sample')\n print(f' columnIndex:{columnIndex}')\n columnList = []\n for key, val in bltFieldsDict.items():\n columnList.append([key, val[columnIndex]])\n\n return columnList", "def determine_columns(self, data):\n columns = []\n for n, d in enumerate(data):\n name = 'col%d' % n\n if is_string(d):\n columns.append(StringCol(name))\n continue\n elif is_sequence(d):\n t = type(d[0])\n l = len(d)\n else:\n t = type(d)\n l = 1\n if isinstance(d, float):\n columns.append(FloatCol(name, length=l))\n elif is_sequence(d) and any(isinstance(c, float) for c in d):\n columns.append(FloatCol(name, length=l))\n else:\n columns.append(Column(name, type=t, length=l))\n return columns", "def GetColumnIterator(self):\n return self.columns.__iter__()", "def columns(self):\n result = self.execute(self.commands.table_columns(self.name))\n return [x[0] for x in result]" ]
[ "0.7480413", "0.659853", "0.6436579", "0.6131555", "0.60717994", "0.6018328", "0.59529024", "0.5887448", "0.5881336", "0.5843457", "0.58333224", "0.582966", "0.58264387", "0.580666", "0.5799173", "0.57792217", "0.57775426", "0.57338744", "0.57074404", "0.57035786", "0.57002944", "0.5699826", "0.56985176", "0.56942815", "0.5693251", "0.56898683", "0.56822705", "0.5664584", "0.5659079", "0.5644167" ]
0.7367568
1
Extract training columns from train_sources, and assign semantic labels to them The result should be self.train_cols a list of Column objects (defined in museum_data_reader) to pass to labeler in self.train()
def define_training_data(self, train_sources, train_labels=None): logging.info("Defining training data for NNetModel...") self.train_cols = [] if train_labels is None: for source in train_sources: self.train_cols += self._read(source) else: for source, label in zip(train_sources, train_labels): self.train_cols += self._read(source, label) logging.info("NNetModel: Training data contains {} columns from {} sources".format(len(self.train_cols), len(train_sources)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read(self, source, label_source=None):\n filename = os.path.join(\"data\", \"sources\", source+\".csv\")\n if label_source is None:\n label_filename = os.path.join(\"data\", \"labels\", source + \".columnmap.txt\")\n else:\n label_filename = os.path.join(\"data\", \"labels\", label_source)\n df = pd.read_csv(filename, dtype=str) # read the data source as a DataFrame\n # labels = pd.read_csv(label_filename) # read the semantic labels of columns in df\n\n labels_frame = pd.read_csv(label_filename, na_values=[\"\"], dtype={'column_name': 'str'})\n # dictionary (column_name, class_label)\n labels = labels_frame[['column_name', 'semantic_type']].dropna().set_index('column_name')['semantic_type'].to_dict()\n # logging.info(\"labels:{}\".format(labels))\n\n source_cols = []\n for c in df.columns:\n # label = str(labels.get_value(labels[labels['column_name'] == c].index[0], 'semantic_type')) # extract semantic label of column c\n if c in labels:\n label = labels[c]\n else:\n label = 'unknown'\n col = Column(filename=filename, colname=c, title=label, lines=list(df[c]))\n source_cols.append(col)\n\n return source_cols", "def train(self):\n start = time.time()\n self.labeler = NN_Column_Labeler([self.classifier_type], self.train_cols, split_by=hp['split_by'], test_frac=0, add_headers=self.add_headers, p_header=self.p_header) # test_frac = 0 means no further splitting into train and test sets, i.e., use train_cols as all_cols\n # TODO: rewrite NN_Column_Labeler to be initialized with train_cols only, instead of all_cols followed by internal splitting of all_cols into train, valid, ant test sets of columns\n\n # Train self.labeler:\n self.labeler.train(evaluate_after_training=False)\n\n return time.time() - start", "def preprocess(self):\n df = pd.read_csv(self.input, index_col = 0)\n diseaseCodes = pd.read_csv(self.metadata, sep = self.separator, index_col = 0, quotechar = '\"')\n\n diseaseColumn = []\n\n if self.transposeMetadataMatrix:\n diseaseCodes = diseaseCodes.T\n\n #iterate through all sample IDs and select the corresponding disease/annotation from the metadata for it\n for sample in df.index:\n try:\n diseaseCode = diseaseCodes[sample][self.diseaseColumn]\n except:\n diseaseCode = \"NotAvailable\"\n benchutils.logWarning(\"WARNING: No classLabel code found for sample \" + str(sample) + \". Assign class NotAvailable.\")\n diseaseColumn.append(diseaseCode)\n\n df.insert(0, column=\"classLabel\", value=diseaseColumn)\n\n df_without_missings = df.dropna(subset=['classLabel'])\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_withClassLabels.csv\"\n df_without_missings.to_csv(filename)\n return filename", "def train_build(df):\n print(\"Constructing training set...\")\n recent_labels = pr.labels.get_last_keypresses() #List of strings\n labeled_df = pr.labels.apply_labels_all(df, recent_labels)\n X, y = pr.build_model.make_training_set(labeled_df)\n\n return X, y", "def pre_process_dataset(self):\n sentences = []\n idx = 1\n # Iterates of dataframe to collect sentences and labels\n for index, row in self.df.iterrows():\n # Normalizing and separate words of each sentence\n norm_sentence = self.norm_text(row['comment_text'])\n word_sentences = re.sub(\"[^\\w]\", \" \", norm_sentence).split()\n sentences.append(word_sentences)\n # Creating a word dictionary\n for word in word_sentences:\n if word not in self.word_2_idx:\n self.word_2_idx[word] = idx\n idx += 1\n # Getting all labels and creates a one-hot vector\n row_label = row[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].values\n self.labels.append(row_label)\n\n # Collect word indexes from prepared word dictionary\n for words_sentence in sentences:\n self.input_data.append([self.word_2_idx[w] for w in words_sentence])", "def predict(self, source):\n\n # First, we need to extract query Column objects from source:\n query_cols = []\n for s in source:\n query_cols += self._read(s)\n logging.info(\"NNetModel: Predicting for {} columns from {} sources\".format(len(query_cols), len(source)))\n\n true_labels = []\n for c in query_cols:\n true_labels.append(c.title)\n\n # Then, pass these query cols to self.labeler.predict as\n start = time.time()\n y_pred_proba = self.labeler.predict_proba(query_cols)\n\n # predictions = []\n predictions_proba = []\n for y_proba in y_pred_proba:\n predictions_proba.append(y_proba[self.classifier_type])\n\n time_elapsed = time.time() - start\n # Finally, convert predictions to the pd dataframe in the required format:\n predictions_proba_dict = []\n for i, c in enumerate(query_cols):\n row = {\"column_name\": c.colname,\n \"source_name\": source,\n \"user_label\": c.title,\n \"model\": self.model_type,\n \"model_description\": self.description\n }\n preds = predictions_proba[i] # numpy array of probabilities for the i-th column\n max = 0\n label = \"unknown\"\n for j, score in enumerate(preds):\n class_name = self.labeler.inverted_lookup[j]\n row[\"scores_\"+class_name] = score\n if score > max:\n max = score\n label = class_name\n row[\"label\"] = label\n row[\"confidence\"] = max\n row[\"running_time\"] = time_elapsed\n predictions_proba_dict.append(row)\n\n\n\n # Return the predictions df:\n return pd.DataFrame(predictions_proba_dict)", "def load_data_and_labels(filename, dataset_name,is_train):\n label_count={}\n parameter_file = \"./parameters.json\"\n params = json.loads(open(parameter_file).read())\n if dataset_name == 'ag_news' or dataset_name == 'dbpedia' or dataset_name == 'sogou_news' or dataset_name == 'amazon_review_full' or dataset_name == 'amazon_review_polarity' :\n df = pd.read_csv(filename, names=['label', 'title', 'text'], dtype={'title': object,'text': object})\n selected = ['label', 'title','text','too_short','to_drop']\n\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[2]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df[selected[2]].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df[selected[2]].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n \n elif dataset_name == 'yelp_review_full' or dataset_name == 'yelp_review_polarity':\n df = pd.read_csv(filename, names=['label','text'], dtype={'text': object})\n selected = ['label','text','too_short','to_drop']\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[1]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['text'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['text'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n elif dataset_name == 'yahoo_answers':\n df = pd.read_csv(filename, names=['label', 'title', 'content','answer'], dtype={'title': object,'answer': object,'content': object})\n selected = ['label', 'title','content','answer','too_short','to_drop'] \n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['temp'] = df[['content','answer']].apply(lambda x: ' '.join(str(v) for v in x), axis=1)\n df['too_short']= df['temp'].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['temp'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['temp'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n return x_raw, y_raw, df, labels", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def classify_columns(df_preds, verbose=0):\r\n train = copy.deepcopy(df_preds)\r\n #### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable\r\n max_nlp_char_size = 30\r\n max_cols_to_print = 30\r\n print('############## C L A S S I F Y I N G V A R I A B L E S ####################')\r\n print('Classifying variables in data set...')\r\n #### Cat_Limit defines the max number of categories a column can have to be called a categorical colum\r\n cat_limit = 35\r\n float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###\r\n def add(a,b):\r\n return a+b\r\n sum_all_cols = dict()\r\n orig_cols_total = train.shape[1]\r\n #Types of columns\r\n cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1\r\n ) | (train[col].isnull().sum()/len(train) >= 0.90)]\r\n train = train[left_subtract(list(train),cols_delete)]\r\n var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(\r\n columns={0:'type_of_column'})\r\n sum_all_cols['cols_delete'] = cols_delete\r\n var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']\r\n and len(train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])\r\n sum_all_cols['string_bool_vars'] = string_bool_vars\r\n var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16','int32','int64',\r\n 'float16','float32','float64'] and len(\r\n train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])\r\n sum_all_cols['num_bool_vars'] = num_bool_vars\r\n ###### This is where we take all Object vars and split them into diff kinds ###\r\n discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[\r\n 'index'] not in string_bool_vars+cols_delete else 0,axis=1)\r\n ######### This is where we figure out whether a string var is nlp or discrete_string var ###\r\n var_df['nlp_strings'] = 0\r\n var_df['discrete_strings'] = 0\r\n var_df['cat'] = 0\r\n var_df['id_col'] = 0\r\n discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()\r\n if len(var_df.loc[discrete_or_nlp==1]) != 0:\r\n for col in discrete_or_nlp_vars:\r\n #### first fill empty or missing vals since it will blowup ###\r\n train[col] = train[col].fillna(' ')\r\n if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(\r\n ) >= max_nlp_char_size and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'nlp_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'discrete_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) == len(train) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])\r\n sum_all_cols['nlp_vars'] = nlp_vars\r\n discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])\r\n sum_all_cols['discrete_string_vars'] = discrete_string_vars\r\n ###### This happens only if a string column happens to be an ID column #######\r\n #### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...\r\n #### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###\r\n var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,\r\n axis=1)\r\n factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])\r\n sum_all_cols['factor_vars'] = factor_vars\r\n ########################################################################\r\n date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16',\r\n 'int32','int64'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ######### This is where we figure out whether a numeric col is date or id variable ###\r\n var_df['int'] = 0\r\n var_df['date_time'] = 0\r\n ### if a particular column is date-time type, now set it as a date time variable ##\r\n var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ### this is where we save them as date time variables ###\r\n if len(var_df.loc[date_or_id==1]) != 0:\r\n for col in var_df.loc[date_or_id==1]['index'].values.tolist():\r\n if len(train[col].value_counts()) == len(train):\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n pass\r\n int_vars = list(var_df[(var_df['int'] ==1)]['index'])\r\n date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])\r\n id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])\r\n sum_all_cols['int_vars'] = int_vars\r\n copy_date_vars = copy.deepcopy(date_vars)\r\n for date_var in copy_date_vars:\r\n #### This test is to make sure sure date vars are actually date vars\r\n try:\r\n pd.to_datetime(train[date_var],infer_datetime_format=True)\r\n except:\r\n ##### if not a date var, then just add it to delete it from processing\r\n cols_delete.append(date_var)\r\n date_vars.remove(date_var)\r\n sum_all_cols['date_vars'] = date_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n sum_all_cols['cols_delete'] = cols_delete\r\n ## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!\r\n var_df['numeric'] = 0\r\n float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',\r\n 'float32','float64'] else 0,\r\n axis=1)\r\n if len(var_df.loc[float_or_cat == 1]) > 0:\r\n for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():\r\n if len(train[col].value_counts()) > 2 and len(train[col].value_counts()\r\n ) <= float_limit and len(train[col].value_counts()) <= len(train):\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n else:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'numeric'] = 1\r\n cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])\r\n continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])\r\n ######## V E R Y I M P O R T A N T ###################################################\r\n ##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###\r\n cat_vars_copy = copy.deepcopy(cat_vars)\r\n for cat in cat_vars_copy:\r\n if df_preds[cat].dtype==float:\r\n continuous_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'numeric'] = 1\r\n elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:\r\n id_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'id_col'] = 1\r\n sum_all_cols['cat_vars'] = cat_vars\r\n sum_all_cols['continuous_vars'] = continuous_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n ###### This is where you consoldate the numbers ###########\r\n var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))\r\n for col, sumval in var_dict_sum.items():\r\n if sumval == 0:\r\n print('%s of type=%s is not classified' %(col,train[col].dtype))\r\n elif sumval > 1:\r\n print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))\r\n else:\r\n pass\r\n ############### This is where you print all the types of variables ##############\r\n ####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,\r\n ### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete\r\n if verbose == 1:\r\n print(\" Number of Numeric Columns = \", len(continuous_vars))\r\n print(\" Number of Integer-Categorical Columns = \", len(int_vars))\r\n print(\" Number of String-Categorical Columns = \", len(cat_vars))\r\n print(\" Number of Factor-Categorical Columns = \", len(factor_vars))\r\n print(\" Number of String-Boolean Columns = \", len(string_bool_vars))\r\n print(\" Number of Numeric-Boolean Columns = \", len(num_bool_vars))\r\n print(\" Number of Discrete String Columns = \", len(discrete_string_vars))\r\n print(\" Number of NLP String Columns = \", len(nlp_vars))\r\n print(\" Number of Date Time Columns = \", len(date_vars))\r\n print(\" Number of ID Columns = \", len(id_vars))\r\n print(\" Number of Columns to Delete = \", len(cols_delete))\r\n if verbose == 2:\r\n marthas_columns(df_preds,verbose=1)\r\n print(\" Numeric Columns: %s\" %continuous_vars[:max_cols_to_print])\r\n print(\" Integer-Categorical Columns: %s\" %int_vars[:max_cols_to_print])\r\n print(\" String-Categorical Columns: %s\" %cat_vars[:max_cols_to_print])\r\n print(\" Factor-Categorical Columns: %s\" %factor_vars[:max_cols_to_print])\r\n print(\" String-Boolean Columns: %s\" %string_bool_vars[:max_cols_to_print])\r\n print(\" Numeric-Boolean Columns: %s\" %num_bool_vars[:max_cols_to_print])\r\n print(\" Discrete String Columns: %s\" %discrete_string_vars[:max_cols_to_print])\r\n print(\" NLP text Columns: %s\" %nlp_vars[:max_cols_to_print])\r\n print(\" Date Time Columns: %s\" %date_vars[:max_cols_to_print])\r\n print(\" ID Columns: %s\" %id_vars[:max_cols_to_print])\r\n print(\" Columns that will not be considered in modeling: %s\" %cols_delete[:max_cols_to_print])\r\n ##### now collect all the column types and column names into a single dictionary to return!\r\n len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])\r\n if len_sum_all_cols == orig_cols_total:\r\n print(' %d Predictors classified...' %orig_cols_total)\r\n #print(' This does not include the Target column(s)')\r\n else:\r\n print('No of columns classified %d does not match %d total cols. Continuing...' %(\r\n len_sum_all_cols, orig_cols_total))\r\n ls = sum_all_cols.values()\r\n flat_list = [item for sublist in ls for item in sublist]\r\n if len(left_subtract(list(train),flat_list)) == 0:\r\n print(' Missing columns = None')\r\n else:\r\n print(' Missing columns = %s' %left_subtract(list(train),flat_list))\r\n return sum_all_cols", "def make_training_data(feature_funcs, annotations):\n extractor = FeatureExtractor(feature_funcs)\n \n training_instances = []\n \n for sent_str, anns in annotations:\n tree = parser.raw_parse(sent_str).next()\n tree = convert_brackets(tree)\n # print tree\n # some preprocessing, align the positions and \n # also use the sentence string given the parse tree\n anns = align_annotation_with_sentence(sent_str, ' '.join(tree.leaves()), anns)\n sent_str = ' '.join(tree.leaves())\n for ann in anns:\n frame_name = ann.frame_name\n start, end = ann.target.start, ann.target.end\n frame = Frame(start, end, frame_name)\n frame_node = find_node_by_positions(tree, start, end)\n\n # TODO: bug here\n if frame_node is None: \n sys.stderr.write(\"Warning: %r does not correspond to any tree node in sentence \\\"%s\\\"\\nSkip it\\n \" %(frame, sent_str))\n continue\n \n for node, (node_start_pos, node_end_pos) in collect_nodes(tree):\n node_pos = NodePosition(node_start_pos, node_end_pos)\n context = Context(sent_str, tree, frame, node_pos)\n\n feature_values = extractor.extract(node, context)\n \n # try to see the it has some semantic role\n found_matching_node = False\n for fe in ann.FE:\n other_node = find_node_by_positions(tree, fe.start, fe.end)\n if node == other_node:\n training_instances.append((feature_values, fe.name))\n found_matching_node = True\n break\n\n # semantic role => NULL\n if not found_matching_node:\n training_instances.append((feature_values, 'NULL'))\n\n return training_instances", "def preprocess(self):\n\n self._build_labels_dict(['one', 'two', 'three', 'four', 'five'])\n\n with open(self.data_path + self.file_name, 'rb') as csvfile:\n\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n self.texts.append(row[1])\n self.labels.append(self.labels_index[row[0]])\n\n print('Found %s texts.' % len(self.texts))", "def load_data_and_labels(data_source, remove_stopword=False, run_with_keras=False):\n # Read the CSV file and get its contents\n with open(data_source, 'r', encoding='utf-8', errors='ignore') as f:\n csv_reader = csv.reader(f)\n # get the header\n header = next(csv_reader)\n label_idx = header.index('label')\n content_idx = header.index('content')\n print(f'The label index is : {label_idx} and the content index is : {content_idx}')\n\n y_text = list()\n x_text = list()\n\n for line in csv_reader:\n # get the sentence from the line\n sentence = line[content_idx].strip()\n x_text.append(sentence)\n y_text.append(int(line[label_idx]))\n\n # preprocess input text\n if run_with_keras:\n x_text = [clean_str(sent, remove_stopword) for sent in x_text]\n else:\n x_text = [clean_str(sent, remove_stopword).split(' ') for sent in x_text]\n\n # get the lengths for every line\n lengths = np.array(list(map(len, [sent for sent in x_text])))\n\n return [x_text, y_text, lengths]", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def train(\n train_source_strings: List[str],\n train_target_strings: List[str]) -> Any:\n\n # ############################ REPLACE THIS WITH YOUR CODE #############################\n ngram_lvl = 3\n def obtain_train_dicts(train_source_strings, train_target_strings,\n ngram_lvl):\n ngrams_dict = col.defaultdict(lambda: col.defaultdict(int))\n for src_str,dst_str in zip(train_source_strings,\n train_target_strings):\n try:\n src_ngrams = [src_str[i:i+ngram_lvl] for i in\n range(len(src_str)-ngram_lvl+1)]\n dst_ngrams = [dst_str[i:i+ngram_lvl] for i in\n range(len(dst_str)-ngram_lvl+1)]\n except TypeError as e:\n print(src_ngrams, dst_ngrams)\n print(e)\n raise StopIteration\n for src_ngram in src_ngrams:\n for dst_ngram in dst_ngrams:\n ngrams_dict[src_ngram][dst_ngram] += 1\n return ngrams_dict\n \n ngrams_dict = col.defaultdict(lambda: col.defaultdict(int))\n for nl in range(1, ngram_lvl+1):\n ngrams_dict.update(\n obtain_train_dicts(train_source_strings,\n train_target_strings, nl))\n return ngrams_dict \n # ############################ REPLACE THIS WITH YOUR CODE #############################", "def doCols(col):\n p = []\n for clf in clfs:\n # print 'trainPreprocessed:', trainPreprocessed, trainPreprocessed.shape\n # print 'labels_train[:, col]', labels_train[:, col], labels_train[:, col].shape\n clf.fit(trainPreprocessed, labels_train[:, col])\n p.append(clf.predict_proba(testPreprocessed)[:, 1])\n return p", "def train(self, features, labels):\n pass", "def _make_traffic_source_preprocessing(self, df):\n # Get the trafficSource.medium,trafficSource.referralPath, trafficSource.source.\n train_df = df.copy(deep=False)\n le = preprocessing.LabelEncoder()\n to_encode = ['medium', 'referralPath', 'source']\n for item in to_encode:\n item_key = 'trafficSource.' + item\n encoding_key = 'encoding_' + item\n train_df[item_key] = train_df[item_key].fillna(\"missing\")\n fitting_label = train_df[item_key].unique()\n le.fit(fitting_label)\n train_df[encoding_key] = le.transform(train_df[item_key])\n train_gdf = train_df.groupby('fullVisitorId')\n return train_gdf['encoding_medium'].sum(), train_gdf['encoding_referralPath'].sum(), train_gdf['encoding_source'].sum()", "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 4:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])", "def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set", "def _read_column_labels(self):\n\n # read the label line (should be at row 15 of the file at this point)\n label_list = self._stream_handle.readline().strip().split()\n self.num_columns = len(label_list)\n self._header_dict['labels'] = label_list\n\n # the m_present_time label is required to generate particles, raise an exception if it is not found\n if GliderParticleKey.M_PRESENT_TIME not in label_list:\n raise DatasetParserException('The m_present_time label has not been found, which means the timestamp '\n 'cannot be determined for any particles')\n\n # read the units line (should be at row 16 of the file at this point)\n data_unit_list = self._stream_handle.readline().strip().split()\n data_unit_list_length = len(data_unit_list)\n\n # read the number of bytes line (should be at row 17 of the file at this point)\n num_of_bytes_list = self._stream_handle.readline().strip().split()\n num_of_bytes_list_length = len(num_of_bytes_list)\n\n # number of labels for name, unit, and number of bytes must match\n if data_unit_list_length != self.num_columns or self.num_columns != num_of_bytes_list_length:\n raise DatasetParserException(\"The number of columns in the labels row: %d, units row: %d, \"\n \"and number of bytes row: %d are not equal.\"\n % (self.num_columns, data_unit_list_length, num_of_bytes_list_length))\n\n # if the number of columns from the header does not match that in the data, but the rest of the file has\n # the same number of columns in each line this is not a fatal error, just parse the columns that are present\n if self._header_dict['sensors_per_cycle'] != self.num_columns:\n msg = 'sensors_per_cycle from header %d does not match the number of data label columns %d' % \\\n (self._header_dict['sensors_per_cycle'], self.num_columns)\n self._exception_callback(SampleException(msg))\n\n log.debug(\"Label count: %d\", self.num_columns)", "def _load_images_labels(self):\n path_dataset_file = self.path_model_id.joinpath(f'{self.set_name}_set.csv')\n \n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n if self.shuffle:\n rng = default_rng(self.seed)\n rng.shuffle(rows)\n \n self.n_examples = len(rows)\n\n ds_files = tf.data.Dataset.from_tensor_slices(\n [path.join(str(self.path_data), f'label_{row[1]}', row[0])\n for row in rows])\n \n ds_images = ds_files.map(self._load_preprocess_image)\n\n class_labels_enc = self.class_le.fit_transform(\n [row[1] for row in rows])\n\n ds_labels = tf.data.Dataset.from_tensor_slices(\n class_labels_enc)\n\n return ds_images, ds_labels", "def train(self):\n for doc, label in zip(self.train_docs(), self.train_labels()):\n yield doc, label", "def __extract_xy_train(self, colmns):\n self.keymap = None\n self.X_train_raw = self.data[colmns].as_matrix()\n # self.__replace_by_noise(c=1)\n n = self.X_train_raw.shape[0]\n self.X_train_raw = np.hstack((self.X_train_raw,\n np.ones(shape=(n, 1))))\n self.y = self.data['ACTION'].as_matrix().reshape(-1, 1).ravel()", "def prepareXyTrain(corpus: DataFrame, XtrainCol: str, y_train_col: str):\r\n Xcol = XtrainCol.split('*') if '*' in XtrainCol else XtrainCol\r\n\r\n if isinstance(Xcol, str):\r\n return corpus[Xcol], corpus[y_train_col]\r\n\r\n # else if multiple columns are concatenated (Xcol: List[String])\r\n strConcatenator = lambda row: reduce(lambda a, b: a + ' ' + b, row)\r\n Xtrain = corpus[Xcol].apply(strConcatenator, axis=1)\r\n return Xtrain, corpus[y_train_col]", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def train(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def train(self, x_train, y_train):\n\n # convert input to format for classifier\n list_of_embeddings = list(x_train[self.embeddings_col])\n x_train = np.array([[float(i) for i in embedding.strip('[]').split()] for embedding in list_of_embeddings])\n\n # discard fold ID column from labels\n review_groups = [col for col in y_train.columns if not col=='k']\n\n for review_group in tqdm(review_groups, desc='Train Review Groups'):\n\n # pull label column\n labels = y_train[review_group]\n\n # logistic classifier\n classifier = SGDClassifier(loss=\"log\", alpha=self.alpha,\n l1_ratio = self.l1_ratio, penalty=\"elasticnet\").fit(x_train, labels)\n\n # save the model in dictionary of models\n self.models[review_group] = classifier", "def train(self):\n if self.input_col is None:\n raise Exception(\"Preprocessing not specified\")\n self.classifier_model.train(self.input_col, self.output_col)" ]
[ "0.6559914", "0.6052187", "0.57164496", "0.5696534", "0.56487805", "0.5536536", "0.55146205", "0.5490378", "0.5440697", "0.5417198", "0.5373422", "0.53056693", "0.5295664", "0.52940863", "0.5287289", "0.52654225", "0.52557045", "0.52543795", "0.5223914", "0.52191883", "0.5207339", "0.5169487", "0.51641417", "0.51548654", "0.5144047", "0.5134905", "0.5125715", "0.5123625", "0.5113331", "0.510651" ]
0.6832204
0
Create an instance of NN_Column_Labeler, perform bagging, feature preparation, and training of the underlying classifier(s)
def train(self): start = time.time() self.labeler = NN_Column_Labeler([self.classifier_type], self.train_cols, split_by=hp['split_by'], test_frac=0, add_headers=self.add_headers, p_header=self.p_header) # test_frac = 0 means no further splitting into train and test sets, i.e., use train_cols as all_cols # TODO: rewrite NN_Column_Labeler to be initialized with train_cols only, instead of all_cols followed by internal splitting of all_cols into train, valid, ant test sets of columns # Train self.labeler: self.labeler.train(evaluate_after_training=False) return time.time() - start
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def naive_bayes(self,X_columns, y_column, cv_kfold=10, class_bins=0, bin_strat='uniform', feature_selection=True):\n \n try:\n\n valid_strategy = ('uniform', 'quantile', 'kmeans')\n if bin_strat not in valid_strategy:\n raise ValueError(\"Valid options for 'bin_strat' are {}. \"\n \"Got strategy={!r} instead.\"\n .format(valid_strategy, bin_strat))\n valid_feature_selection = {1,0}\n\n if feature_selection not in valid_feature_selection:\n raise ValueError(\"Valid options for 'bin_strat' are {}. \"\n \"Got strategy={!r} instead.\"\n .format(valid_feature_selection, feature_selection))\n\n X = self.df_input[X_columns]\n y = self.df_input[[y_column]]\n\n scaler = MinMaxScaler()\n for col in X.columns:\n X[col] = scaler.fit_transform(X[[col]].astype(float))\n\n if(class_bins!=0):\n est = KBinsDiscretizer(n_bins=class_bins, encode='ordinal', strategy='kmeans')\n if(bin_strat=='percentile'):\n est = KBinsDiscretizer(n_bins=class_bins, encode='ordinal', strategy='percentile')\n elif(bin_strat=='uniform'):\n est = KBinsDiscretizer(n_bins=class_bins, encode='ordinal', strategy='uniform')\n y[[y.columns[0]]] = est.fit_transform(y[[y.columns[0]]])\n\n if(feature_selection):\n X = X[self.nb_feature_select(LogisticRegression(solver='lbfgs', multi_class='auto'), X, y, cv_kfold=10)]\n print(\"Features Selected: \")\n for x in X.columns:\n print(x, end=\", \")\n \n X = X.values.tolist()\n\n nb = GaussianNB()\n y_true_values,y_pred_values = [], []\n\n y = y[y.columns[0]].values.tolist()\n \n if(cv_kfold!=0):\n kf = KFold(n_splits=cv_kfold)\n\n \n kf.get_n_splits(X)\n accuracy = []\n\n for train_index, test_index in kf.split(X,y):\n\n X_test = [X[ii] for ii in test_index]\n X_train = [X[ii] for ii in train_index]\n y_test = [y[ii] for ii in test_index]\n y_train = [y[ii] for ii in train_index]\n\n nb.fit(X_train,y_train)\n y_pred =nb.predict(X_test)\n accuracy = np.append(accuracy, np.around(balanced_accuracy_score(y_test, y_pred),decimals=4))\n y_pred_values = np.append(y_pred_values, y_pred)\n y_true_values = np.append(y_true_values, y_test)\n total_accuracy = np.around(np.sum(accuracy)/cv_kfold, decimals=4)\n else:\n nb.fit(X,y)\n y_pred =nb.predict(X)\n y_true_values = y\n y_pred_values = y_pred\n total_accuracy = np.around(balanced_accuracy_score(y_true_values, y_pred_values),decimals=4)\n\n return y_true_values, y_pred_values, total_accuracy\n \n\n\n except Exception as e:\n print(e)", "def __init__(self, data, class_column):\n print(\"Naive Bayes Model created!\")\n\n # create report\n self.predict_summary = {}\n self.fit_report = {}\n\n # self.data=data\n self.data = data\n self.class_column = class_column\n\n # get the class column and get classes\n col_data = self.data[class_column]\n self.class_list = unique_list(col_data)\n\n # get numeric columns and categorical columns\n self.num_cols, self.cat_cols = get_both_columns(self.data, class_column)\n\n # Build the pro\n self.prob_hub = {}", "def classify_columns(df_preds, verbose=0):\r\n train = copy.deepcopy(df_preds)\r\n #### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable\r\n max_nlp_char_size = 30\r\n max_cols_to_print = 30\r\n print('############## C L A S S I F Y I N G V A R I A B L E S ####################')\r\n print('Classifying variables in data set...')\r\n #### Cat_Limit defines the max number of categories a column can have to be called a categorical colum\r\n cat_limit = 35\r\n float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###\r\n def add(a,b):\r\n return a+b\r\n sum_all_cols = dict()\r\n orig_cols_total = train.shape[1]\r\n #Types of columns\r\n cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1\r\n ) | (train[col].isnull().sum()/len(train) >= 0.90)]\r\n train = train[left_subtract(list(train),cols_delete)]\r\n var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(\r\n columns={0:'type_of_column'})\r\n sum_all_cols['cols_delete'] = cols_delete\r\n var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']\r\n and len(train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])\r\n sum_all_cols['string_bool_vars'] = string_bool_vars\r\n var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16','int32','int64',\r\n 'float16','float32','float64'] and len(\r\n train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])\r\n sum_all_cols['num_bool_vars'] = num_bool_vars\r\n ###### This is where we take all Object vars and split them into diff kinds ###\r\n discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[\r\n 'index'] not in string_bool_vars+cols_delete else 0,axis=1)\r\n ######### This is where we figure out whether a string var is nlp or discrete_string var ###\r\n var_df['nlp_strings'] = 0\r\n var_df['discrete_strings'] = 0\r\n var_df['cat'] = 0\r\n var_df['id_col'] = 0\r\n discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()\r\n if len(var_df.loc[discrete_or_nlp==1]) != 0:\r\n for col in discrete_or_nlp_vars:\r\n #### first fill empty or missing vals since it will blowup ###\r\n train[col] = train[col].fillna(' ')\r\n if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(\r\n ) >= max_nlp_char_size and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'nlp_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'discrete_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) == len(train) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])\r\n sum_all_cols['nlp_vars'] = nlp_vars\r\n discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])\r\n sum_all_cols['discrete_string_vars'] = discrete_string_vars\r\n ###### This happens only if a string column happens to be an ID column #######\r\n #### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...\r\n #### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###\r\n var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,\r\n axis=1)\r\n factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])\r\n sum_all_cols['factor_vars'] = factor_vars\r\n ########################################################################\r\n date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16',\r\n 'int32','int64'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ######### This is where we figure out whether a numeric col is date or id variable ###\r\n var_df['int'] = 0\r\n var_df['date_time'] = 0\r\n ### if a particular column is date-time type, now set it as a date time variable ##\r\n var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ### this is where we save them as date time variables ###\r\n if len(var_df.loc[date_or_id==1]) != 0:\r\n for col in var_df.loc[date_or_id==1]['index'].values.tolist():\r\n if len(train[col].value_counts()) == len(train):\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n pass\r\n int_vars = list(var_df[(var_df['int'] ==1)]['index'])\r\n date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])\r\n id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])\r\n sum_all_cols['int_vars'] = int_vars\r\n copy_date_vars = copy.deepcopy(date_vars)\r\n for date_var in copy_date_vars:\r\n #### This test is to make sure sure date vars are actually date vars\r\n try:\r\n pd.to_datetime(train[date_var],infer_datetime_format=True)\r\n except:\r\n ##### if not a date var, then just add it to delete it from processing\r\n cols_delete.append(date_var)\r\n date_vars.remove(date_var)\r\n sum_all_cols['date_vars'] = date_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n sum_all_cols['cols_delete'] = cols_delete\r\n ## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!\r\n var_df['numeric'] = 0\r\n float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',\r\n 'float32','float64'] else 0,\r\n axis=1)\r\n if len(var_df.loc[float_or_cat == 1]) > 0:\r\n for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():\r\n if len(train[col].value_counts()) > 2 and len(train[col].value_counts()\r\n ) <= float_limit and len(train[col].value_counts()) <= len(train):\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n else:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'numeric'] = 1\r\n cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])\r\n continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])\r\n ######## V E R Y I M P O R T A N T ###################################################\r\n ##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###\r\n cat_vars_copy = copy.deepcopy(cat_vars)\r\n for cat in cat_vars_copy:\r\n if df_preds[cat].dtype==float:\r\n continuous_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'numeric'] = 1\r\n elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:\r\n id_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'id_col'] = 1\r\n sum_all_cols['cat_vars'] = cat_vars\r\n sum_all_cols['continuous_vars'] = continuous_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n ###### This is where you consoldate the numbers ###########\r\n var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))\r\n for col, sumval in var_dict_sum.items():\r\n if sumval == 0:\r\n print('%s of type=%s is not classified' %(col,train[col].dtype))\r\n elif sumval > 1:\r\n print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))\r\n else:\r\n pass\r\n ############### This is where you print all the types of variables ##############\r\n ####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,\r\n ### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete\r\n if verbose == 1:\r\n print(\" Number of Numeric Columns = \", len(continuous_vars))\r\n print(\" Number of Integer-Categorical Columns = \", len(int_vars))\r\n print(\" Number of String-Categorical Columns = \", len(cat_vars))\r\n print(\" Number of Factor-Categorical Columns = \", len(factor_vars))\r\n print(\" Number of String-Boolean Columns = \", len(string_bool_vars))\r\n print(\" Number of Numeric-Boolean Columns = \", len(num_bool_vars))\r\n print(\" Number of Discrete String Columns = \", len(discrete_string_vars))\r\n print(\" Number of NLP String Columns = \", len(nlp_vars))\r\n print(\" Number of Date Time Columns = \", len(date_vars))\r\n print(\" Number of ID Columns = \", len(id_vars))\r\n print(\" Number of Columns to Delete = \", len(cols_delete))\r\n if verbose == 2:\r\n marthas_columns(df_preds,verbose=1)\r\n print(\" Numeric Columns: %s\" %continuous_vars[:max_cols_to_print])\r\n print(\" Integer-Categorical Columns: %s\" %int_vars[:max_cols_to_print])\r\n print(\" String-Categorical Columns: %s\" %cat_vars[:max_cols_to_print])\r\n print(\" Factor-Categorical Columns: %s\" %factor_vars[:max_cols_to_print])\r\n print(\" String-Boolean Columns: %s\" %string_bool_vars[:max_cols_to_print])\r\n print(\" Numeric-Boolean Columns: %s\" %num_bool_vars[:max_cols_to_print])\r\n print(\" Discrete String Columns: %s\" %discrete_string_vars[:max_cols_to_print])\r\n print(\" NLP text Columns: %s\" %nlp_vars[:max_cols_to_print])\r\n print(\" Date Time Columns: %s\" %date_vars[:max_cols_to_print])\r\n print(\" ID Columns: %s\" %id_vars[:max_cols_to_print])\r\n print(\" Columns that will not be considered in modeling: %s\" %cols_delete[:max_cols_to_print])\r\n ##### now collect all the column types and column names into a single dictionary to return!\r\n len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])\r\n if len_sum_all_cols == orig_cols_total:\r\n print(' %d Predictors classified...' %orig_cols_total)\r\n #print(' This does not include the Target column(s)')\r\n else:\r\n print('No of columns classified %d does not match %d total cols. Continuing...' %(\r\n len_sum_all_cols, orig_cols_total))\r\n ls = sum_all_cols.values()\r\n flat_list = [item for sublist in ls for item in sublist]\r\n if len(left_subtract(list(train),flat_list)) == 0:\r\n print(' Missing columns = None')\r\n else:\r\n print(' Missing columns = %s' %left_subtract(list(train),flat_list))\r\n return sum_all_cols", "def __init__(self, classifier=None, df: pd.DataFrame=None, features: [str]=None, label_col: str= 'Label',\n timestamp_col: str='Timestamp'):\n self.classifier = classifier\n self.df = df\n self.features = features\n self.label_col = label_col\n self.timestamp_col = timestamp_col", "def naive_bayes_classify(df: pd.DataFrame, vect, names):\n features = vect\n target = df.success_lvl\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, target, test_size=0.2, random_state=42)\n\n nb_clf = MultinomialNB()\n nb_clf.fit(X_train, y_train)\n nb_predictions = nb_clf.predict(X_test)\n print('Accuracy score for Naive Bayes:', accuracy_score(y_test, nb_predictions))\n\n\n # Find Top/Bottom num of terms used to describe the classes.\n num = 10\n low_class_prob_sorted = nb_clf.feature_log_prob_[0, :].argsort()[::-1]\n hi_class_prob_sorted = nb_clf.feature_log_prob_[1, :].argsort()[::-1]\n print('\\n', f'Low score Top{num} phrases:', np.take(names, low_class_prob_sorted[:num]))\n print('\\n', f'Low score Bot{num} phrases:', np.take(names, low_class_prob_sorted[-num:]))\n print('\\n', f'High score Top{num} phrases:', np.take(names, hi_class_prob_sorted[:num]))\n print('\\n', f'High score Bot{num} phrases:', np.take(names, hi_class_prob_sorted[-num:]))", "def __init__(self, emb_matrix, vocab, extra_vocab, labels, args):\n super(CNNClassifier, self).__init__()\n self.labels = labels\n self.config = SimpleNamespace(filter_channels = args.filter_channels,\n filter_sizes = args.filter_sizes,\n fc_shapes = args.fc_shapes,\n dropout = args.dropout,\n num_classes = len(labels),\n wordvec_type = args.wordvec_type,\n extra_wordvec_method = args.extra_wordvec_method,\n extra_wordvec_dim = args.extra_wordvec_dim,\n extra_wordvec_max_norm = args.extra_wordvec_max_norm,\n model_type = 'CNNClassifier')\n\n self.unsaved_modules = []\n\n self.add_unsaved_module('embedding', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))\n self.vocab_size = emb_matrix.shape[0]\n self.embedding_dim = emb_matrix.shape[1]\n\n # The Pretrain has PAD and UNK already (indices 0 and 1), but we\n # possibly want to train UNK while freezing the rest of the embedding\n # note that the /10.0 operation has to be inside nn.Parameter unless\n # you want to spend a long time debugging this\n self.unk = nn.Parameter(torch.randn(self.embedding_dim) / np.sqrt(self.embedding_dim) / 10.0)\n\n self.vocab_map = { word: i for i, word in enumerate(vocab) }\n\n if self.config.extra_wordvec_method is not classifier_args.ExtraVectors.NONE:\n if not extra_vocab:\n raise ValueError(\"Should have had extra_vocab set for extra_wordvec_method {}\".format(self.config.extra_wordvec_method))\n if not args.extra_wordvec_dim:\n self.config.extra_wordvec_dim = self.embedding_dim\n if self.config.extra_wordvec_method is classifier_args.ExtraVectors.SUM:\n if self.config.extra_wordvec_dim != self.embedding_dim:\n raise ValueError(\"extra_wordvec_dim must equal embedding_dim for {}\".format(self.config.extra_wordvec_method))\n\n self.extra_vocab = list(extra_vocab)\n self.extra_vocab_map = { word: i for i, word in enumerate(self.extra_vocab) }\n # TODO: possibly add regularization specifically on the extra embedding?\n self.extra_embedding = nn.Embedding(num_embeddings = len(extra_vocab),\n embedding_dim = self.config.extra_wordvec_dim,\n max_norm = self.config.extra_wordvec_max_norm,\n padding_idx = 0)\n logger.debug(\"Extra embedding size: {}\".format(self.extra_embedding.weight.shape))\n else:\n self.extra_vocab = None\n self.extra_vocab_map = None\n self.config.extra_wordvec_dim = 0\n self.extra_embedding = None\n\n # Pytorch is \"aware\" of the existence of the nn.Modules inside\n # an nn.ModuleList in terms of parameters() etc\n if self.config.extra_wordvec_method is classifier_args.ExtraVectors.NONE:\n total_embedding_dim = self.embedding_dim\n elif self.config.extra_wordvec_method is classifier_args.ExtraVectors.SUM:\n total_embedding_dim = self.embedding_dim\n elif self.config.extra_wordvec_method is classifier_args.ExtraVectors.CONCAT:\n total_embedding_dim = self.embedding_dim + self.config.extra_wordvec_dim\n else:\n raise ValueError(\"unable to handle {}\".format(self.config.extra_wordvec_method))\n\n self.conv_layers = nn.ModuleList([nn.Conv2d(in_channels=1,\n out_channels=self.config.filter_channels,\n kernel_size=(filter_size, total_embedding_dim))\n for filter_size in self.config.filter_sizes])\n\n previous_layer_size = len(self.config.filter_sizes) * self.config.filter_channels\n fc_layers = []\n for shape in self.config.fc_shapes:\n fc_layers.append(nn.Linear(previous_layer_size, shape))\n previous_layer_size = shape\n fc_layers.append(nn.Linear(previous_layer_size, self.config.num_classes))\n self.fc_layers = nn.ModuleList(fc_layers)\n\n self.max_window = max(self.config.filter_sizes)\n\n self.dropout = nn.Dropout(self.config.dropout)", "def __init__(self, input_size: int, output_size: int, old_cols: List[AlphaModule] = None, hidden_size=768,\n ptm=\"bert\", feature_size=768, require_proto=False, tokenizer=None, prob_l=-1) -> None:\n super(PTMClassifier_PNN, self).__init__()\n \n if old_cols is None:\n old_cols = []\n \n self.old_cols = []\n \n self.output_size = output_size\n self.hidden_size = hidden_size # default\n self.feature_size = feature_size\n \n self.fc1 = nn.Linear(self.hidden_size, 100)\n self.classifier = nn.Linear(100, self.output_size)\n \n if len(old_cols) > 0:\n self.old_fc1s = ListModule()\n self.base_1 = nn.Sequential(\n nn.Linear(100 * len(old_cols), 100),\n nn.ReLU(),\n )\n self.base_2 = nn.Sequential(\n nn.Linear(100 * len(old_cols), 100),\n nn.ReLU(),\n )\n \n self.adaptor1 = nn.Sequential(AlphaModule(100 * len(old_cols)),\n self.base_1)\n self.adaptor2 = nn.Sequential(AlphaModule(100 * len(old_cols)),\n self.base_2)\n \n for old_col in old_cols:\n self.old_fc1s.append(\n nn.Sequential(nn.Linear(self.hidden_size, 100), nn.ReLU()))\n self.old_fc1s[-1][0].load_state_dict(old_col.fc1.state_dict())\n \n # ptm\n self.ptm = ptm.lower()\n assert self.ptm in supported_ptm\n ptm_ = import_from(\"transformers\", supported_ptm[self.ptm][0] + \"Model\")\n \n self.encoder = ptm_.from_pretrained(supported_ptm[self.ptm][1], output_hidden_states=True)\n if tokenizer is not None:\n self.encoder.resize_token_embeddings(len(tokenizer))\n self.prob_l = prob_l\n \n self.require_proto = require_proto\n \n # todo : modify net into one network\n if self.require_proto:\n # prototype-based classification\n self.net = nn.Sequential(\n self.fc1\n )\n self.net_ = nn.Sequential(\n self.encoder,\n self.fc1,\n self.encoder_adaptor,\n nn.ReLU(),\n )\n else:\n self.classifier = nn.Linear(self.feature_size, self.output_size, bias=True)\n self.net = nn.Sequential(\n self.classifier\n )\n self.net_ = nn.Sequential(\n self.encoder,\n self.classifier\n )\n \n self.reset_parameters()", "def naive_bayes_cm(self,X_columns, y_column,cv_kfold=10, class_bins=0, bin_strat='uniform', feature_selection=True):\n\n try:\n\n valid_strategy = ('uniform', 'quantile', 'kmeans')\n if bin_strat not in valid_strategy:\n raise ValueError(\"Valid options for 'bin_strat' are {}. \"\n \"Got bin_strat={!r} instead.\"\n .format(valid_strategy, bin_strat))\n valid_feature_selection = {True,False}\n \n if feature_selection not in valid_feature_selection:\n raise ValueError(\"Valid options for 'bin_strat' are {}. \"\n \"Got feature_selection={!r} instead.\"\n .format(valid_feature_selection, feature_selection)) \n\n y_true, y_pred, accuracy = self.naive_bayes(X_columns, y_column, cv_kfold=cv_kfold, class_bins=class_bins, bin_strat=bin_strat, feature_selection=feature_selection)\n cm = confusion_matrix(y_true, y_pred)\n cm_norm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis])*100\n \n ticks = []\n\n if(class_bins!=0):\n y = self.df_input[[y_column]]\n est = KBinsDiscretizer(n_bins=class_bins, encode='ordinal', strategy='kmeans')\n if(bin_strat=='percentile'):\n est = KBinsDiscretizer(n_bins=class_bins, encode='ordinal', strategy='percentile')\n elif(bin_strat=='uniform'):\n est = KBinsDiscretizer(n_bins=class_bins, encode='ordinal', strategy='uniform')\n new_y = est.fit_transform(y[[y.columns[0]]])\n new_df = pd.DataFrame(new_y)\n edges = est.bin_edges_[0]\n new_df = pd.concat([new_df,y],axis=1)\n first = True\n for bins in new_df[0].unique():\n if (first):\n ticks.append(str(int(round(edges[int(bins)])))+\" - \"+str(int(round(edges[int(bins+1)]))))\n first = False\n else:\n ticks.append(str(int(round(edges[int(bins)]))+1)+\" - \"+str(int(round(edges[int(bins+1)]))))\n\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n ax.figure.colorbar(im, ax=ax)\n\n ax.set(xticks=np.arange(cm.shape[1]),yticks=np.arange(cm.shape[0]),ylabel='True Label',xlabel='Predicted Label')\n\n thresh = cm.max() / 2\n for x in range(cm_norm.shape[0]):\n for y in range(cm_norm.shape[1]):\n if(x==y):\n ax.text(y,x,f\"{cm[x,y]}({cm_norm[x,y]:.2f}%)\",ha=\"center\", va=\"center\", fontsize=12, color=\"white\" if cm[x, y] > thresh else \"black\")\n else:\n ax.text(y,x,f\"{cm[x,y]}({cm_norm[x,y]:.2f}%)\",ha=\"center\", va=\"center\", color=\"white\" if cm[x, y] > thresh else \"black\")\n ax.annotate(\"Accuracy: \"+ str(accuracy),xy=(0.25, 0.9), xycoords='figure fraction')\n if(class_bins!=0):\n plt.xticks(np.arange(cm.shape[1]),ticks)\n plt.yticks(np.arange(cm.shape[0]),ticks)\n plt.title(\"Naive Bayes Confusion Matrix (\"+y_column+\")\", y=1.05)\n plt.subplots_adjust(left=0)\n plt.show()\n except Exception as e:\n print(e)", "def multiclass_toy_data(): \n #dataset = np.zeros((10,5), np.int)\n dataset = np.array([[0,0,0,0,4],\n [0,0,0,0,5],\n [1,3,0,0,0],\n [3,1,0,0,1],\n [0,0,6,2,0],\n [0,0,0,0,0],\n [0,0,1,7,2], \n [0,0,5,1,5],\n [0,0,34,0,0],\n [0,0,3,0,0]])\n Y = np.array([3,3,2,2,1,0,1,1,0,0])\n #for i in range(10):\n #for j in range(5):\n #dataset[i][j] = np.random.randint(0,10) \n dataset = np.column_stack((dataset, Y))\n return (dataset)", "def glasses_data_preprocessing(data_directory, filename_column, target_column, training_percentage_size=0.85,\n batches_size=16, validation_split=0.15, img_size=(96, 96), color_mode='rgb',\n horizontal_flip=True):\n # Loading the csv file\n path = os.path.join('../Datasets', data_directory)\n # The sep parameter chosen according to the delimiter adopted in labels.csv\n # Take the first 600 examples\n dataset_labels = pd.read_csv(os.path.join(path, 'labels.csv'), sep='\\t', dtype='str').head(600)\n dataset_labels[target_column] = list(map(str, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0,\n 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1,\n 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,\n 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0,\n 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0,\n 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0,\n 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0,\n # Others 300\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,\n 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0\n ]))\n # Divide data in two sets: one for training and one for testing\n train_folder = os.path.join(path, 'img')\n test_folder = os.path.join(path + '_test', 'img')\n # Division will be made only if the testing directory does not already exist\n if not os.path.isdir(test_folder):\n # Create the Test dataset folder\n # If parents is True, any missing parents of the folder will be created\n # If exist_ok is True, an Error is raised if the directory already exists\n Path(test_folder).mkdir(parents=True, exist_ok=True)\n # Sorted list of all the images available\n files = sorted(os.listdir(train_folder), key=lambda x: int(x.split(\".\")[0]))\n # Simple random sampling to select examples for the test dataset\n images_testing = sorted(sample(files, round(len(files) * (1 - training_percentage_size))))\n # Move all the test images into the related folder\n for file in images_testing:\n shutil.move(os.path.join(train_folder, file), test_folder)\n # List of all the images within the training and testing folders\n random_test_list = sorted([dataset_labels[dataset_labels[filename_column] == i].index[0]\n for i in os.listdir(test_folder)])\n random_training_list = sorted([dataset_labels[dataset_labels[filename_column] == i].index[0]\n for i in os.listdir(train_folder)])\n\n # Prepare the training, test and validation batches\n # Select labels associated to the images inside the training and test folders\n training_labels = dataset_labels.iloc[[i for i in random_training_list]]\n test_labels = dataset_labels.iloc[[i for i in random_test_list]]\n # With the following line the validation_split passed as argument becomes equal to the percentage of the total\n # dataset (and not anymore to the percentage of the training dataset dedicated to the validation dataset)\n validation_split = validation_split / training_percentage_size\n # ImageDataGenerator generates batches of images with real-time data augmentation\n image_generator = ImageDataGenerator(rescale=1. / 255., validation_split=validation_split,\n horizontal_flip=horizontal_flip)\n # It produces batches of images everytime it is called\n training_batches = image_generator.flow_from_dataframe(dataframe=training_labels, directory=train_folder,\n x_col=filename_column, y_col=target_column,\n subset=\"training\", batch_size=batches_size, seed=42,\n color_mode=color_mode, shuffle=True, target_size=img_size)\n # No data augmentation applied for validation and test data\n image_generator = ImageDataGenerator(rescale=1. / 255., validation_split=validation_split)\n valid_batches = image_generator.flow_from_dataframe(dataframe=training_labels, directory=train_folder,\n x_col=filename_column, y_col=target_column, subset=\"validation\",\n batch_size=batches_size, seed=42, shuffle=True,\n color_mode=color_mode, target_size=img_size)\n test_batches = image_generator.flow_from_dataframe(dataframe=test_labels, directory=test_folder,\n x_col=filename_column, y_col=target_column,\n color_mode=color_mode, batch_size=batches_size,\n shuffle=False, target_size=img_size)\n return training_batches, valid_batches, test_batches", "def predict(self, source):\n\n # First, we need to extract query Column objects from source:\n query_cols = []\n for s in source:\n query_cols += self._read(s)\n logging.info(\"NNetModel: Predicting for {} columns from {} sources\".format(len(query_cols), len(source)))\n\n true_labels = []\n for c in query_cols:\n true_labels.append(c.title)\n\n # Then, pass these query cols to self.labeler.predict as\n start = time.time()\n y_pred_proba = self.labeler.predict_proba(query_cols)\n\n # predictions = []\n predictions_proba = []\n for y_proba in y_pred_proba:\n predictions_proba.append(y_proba[self.classifier_type])\n\n time_elapsed = time.time() - start\n # Finally, convert predictions to the pd dataframe in the required format:\n predictions_proba_dict = []\n for i, c in enumerate(query_cols):\n row = {\"column_name\": c.colname,\n \"source_name\": source,\n \"user_label\": c.title,\n \"model\": self.model_type,\n \"model_description\": self.description\n }\n preds = predictions_proba[i] # numpy array of probabilities for the i-th column\n max = 0\n label = \"unknown\"\n for j, score in enumerate(preds):\n class_name = self.labeler.inverted_lookup[j]\n row[\"scores_\"+class_name] = score\n if score > max:\n max = score\n label = class_name\n row[\"label\"] = label\n row[\"confidence\"] = max\n row[\"running_time\"] = time_elapsed\n predictions_proba_dict.append(row)\n\n\n\n # Return the predictions df:\n return pd.DataFrame(predictions_proba_dict)", "def __multilabel_processing(self):\n # read the raw dataset\n self.data['image_name'] = self.data['image_name'].map(lambda x: '{}.{}'.format(x, img_format))\n self.data['tags'] = self.data['tags'].map(lambda x: x.split())\n\n # create a df with the same number of rows as the dataset filled with the name of the unique values in tags\n label_names = self.data['tags'].explode().unique().tolist()\n label_df = pd.DataFrame([label_names] * self.data.shape[0], columns=label_names)\n\n # binarize the labels according to if they exist for each image or not\n self.data = pd.concat([self.data, label_df], axis=1)\n self.data[['image_name'] + label_names] = self.data.apply(lambda x: pd.Series([x[0]] + [1 if label in x[1] else 0 for label in x[2:]]), axis=1)", "def create_classifier(rows, columns, encoder, units):\n\n # define the input\n input = Input(shape=(rows, columns, 1))\n x = input\n\n # pass the input through the encoder\n x = encoder(x)\n\n # flatten\n x = Flatten()(x)\n\n # pass then the result through two fully-connected layers\n x = Dense(units=units, activation='relu')(x)\n x = Dense(units=10, activation='softmax')(x)\n\n # create the model and return it\n classifier = Model(input, x, name=\"classifier\")\n return classifier", "def __init__(self, graph, cell_classifier,\n label='katz_genotype',\n **kwargs):\n\n # store label and attribute field names\n self.label = label\n\n # run community detection and store graph\n self.graph = graph\n\n # store cell classifier\n self.cell_classifier = cell_classifier\n\n # build genotype labeler based on community classifier\n self.labeler = self.build_classifier(**kwargs)", "def prepareSplitClassifier(df, models, choice):\n\n\n def classificationOutput(clf, X, Y):\n \"\"\"\n Fit the model and print the classification results\n - confusion_matrix\n - avg scores etc\n \"\"\"\n n_samples = 36\n\n print \"\\n\\nClassifier: \\n %s\" % (clf)\n print \"#\" * 79\n # classifier_gnb = naive_bayes.GaussianNB() # initiating the classifier\n\n clf.fit(X[:n_samples], Y[:n_samples]) # train on first n_samples and test on last 10\n\n expected = Y[n_samples:]\n predicted = clf.predict(X[n_samples:])\n print(\"Classification report:\\n%s\\n\" % (metrics.classification_report(expected, predicted)))\n print(\"\\nConfusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n\n\n def splitclassify(cDf):\n \"\"\"\n Given the dataframe combined with equal fair and unfair apps,\n classify them\n \"\"\"\n cDf = cDf.reindex(np.random.permutation(cDf.index)) # shuffle the dataframe\n featCols = set(cDf.columns)\n featCols.remove('appLabel')\n\n features = cDf[list(featCols)].astype('float')\n\n ## Scale the features to a common range\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(features.values)\n\n Y = cDf['appLabel'].values\n\n\n if choice == 'all':\n for key in models:\n classifier = models[key]\n classificationOutput(classifier, X, Y)\n else:\n if choice in models:\n classifier = models[choice]\n classificationOutput(classifier, X, Y)\n else:\n print \"Incorrect Choice\"\n\n\n\n fairDf = df[df['appLabel'] == False]\n unfairDf = df[df['appLabel'] == True]\n\n\n # calculate total possible splits of fair data frame relatie to\n # size of unfair dataframe\n splits = len(fairDf) // len(unfairDf)\n\n for i in range(splits):\n clDf = fairDf[i : i+len(unfairDf)].append(unfairDf)\n\n # print fairDf.values, unfairDf.values\n print \"Classifying %d th split of fair apps with unfair app\" % (i)\n print \"-\" * 79\n splitclassify(clDf)\n print \"\\n\\n\"", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def select_dfcol_as_label(self, col_name, bin_class):\n\n self.labels = self.df[col_name]\n if bin_class is True:\n print(\"not supported yet\")\n else:\n self.label_dict = {label: idx for idx, label in enumerate(\n np.unique(self.df[col_name]))}\n self.inv_label_dict = {v: k for k, v in self.label_dict.items()}\n self.labels.replace(to_replace=self.label_dict, value=None,\n inplace=True)\n if bin_class is True:\n print(\"not supported yet\")\n self.n_classes = np.size(self.labels.value_counts().index)\n self.labels_onehot = self.onehot_encode(self.labels, self.n_classes)\n # key_list = []\n # for key in dict.keys(self.label_dict):\n # key_list.append(key)\n # self.df.reset_index(key_list)\n\n self.true_distribution = self._get_true_distribution()", "def _binarization(self):\n for feat in self.cat_feats:\n lbl = preprocessing.LabelBinarizer()\n lbl.fit(self.dataframe[feat].values)\n val = lbl.transform(self.dataframe[feat].values)\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feat,axis=1)\n \n for j in range(val.shape[1]):\n new_col_name = feat + f'__bin_{j}'\n self.dataframe_d_copy[new_col_name] = val[:,j] \n self.binary_encoders[feat] = lbl\n joblib.dump(self.binary_encoders, f\"{self.output_path}/_binary_encoder.pkl\")\n return self.dataframe_d_copy", "def __init__(self):\n self.data_file = ''\n self.data = pd.DataFrame()\n self.labels = pd.DataFrame()\n self.labels_onehot = pd.DataFrame()\n self.df = pd.DataFrame()\n self.df_perm = pd.DataFrame() # for debug purpose\n self.n_samples = 0\n self.n_features = 0\n self.label_dict = {}\n self.inv_label_dict = {}\n self.n_classes = 0\n self.batch_size = 0\n self.n_batch = 0\n self.current_batch_idx = 0\n self.true_distribution = []", "def dummify_features(df):\n colnames = df.columns\n le_dict = {}\n for col in colnames:\n le_dict[col] = preprocessing.LabelEncoder()\n le_dict[col].fit(df[col])\n df.loc[:, col] = le_dict[col].transform(df[col])\n\n enc = preprocessing.OneHotEncoder()\n enc.fit(df)\n X = enc.transform(df)\n\n dummy_colnames = [cv + '_' + str(modality) for cv in colnames for modality in le_dict[cv].classes_]\n # for cv in colnames:\n # for modality in le_dict[cv].classes_:\n # dummy_colnames.append(cv + '_' + modality)\n\n return X, dummy_colnames, enc", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, type, embedding_size: tuple): \n super(AbstractClassifier, self).__init__()\n \n # CNN TODO: shape checks\n self.layer1 = nn.Sequential(\n nn.Conv1d(1, 20, kernel_size=2, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv1d(20, 50, kernel_size=2, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.dropout = nn.Dropout()\n self.fc1 = nn.Linear(50*44, 1024)\n self.fc2 = nn.Linear(1024, 2)", "def __init__(self, df, cat_features, enc_type, handle_na=False ):\n self.df = df\n self.cat_features = cat_features\n self.enc_type = enc_type\n self.label_encoder = dict()\n self.binary_encoder = dict()\n self.ohe = None\n self.handle_na = handle_na\n \n if self.handle_na:\n for cat in self.cat_features:\n self.df.loc[:,cat] = self.df.loc[:,cat].astype('str').fillna('-9999999')\n self.output_df = self.df.copy(deep=True)", "def splitclassify(cDf):\n cDf = cDf.reindex(np.random.permutation(cDf.index)) # shuffle the dataframe\n featCols = set(cDf.columns)\n featCols.remove('appLabel')\n\n features = cDf[list(featCols)].astype('float')\n\n ## Scale the features to a common range\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(features.values)\n\n Y = cDf['appLabel'].values\n\n\n if choice == 'all':\n for key in models:\n classifier = models[key]\n classificationOutput(classifier, X, Y)\n else:\n if choice in models:\n classifier = models[choice]\n classificationOutput(classifier, X, Y)\n else:\n print \"Incorrect Choice\"", "def Catboost(df, test_size,col_dummies):\n from sklearn.model_selection import train_test_split\n from catboost import CatBoostRegressor\n # Define input\n X = df.drop(['target'], axis=1)\n # Set validation\n y = df['target']\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n Cb = CatBoostRegressor(iterations=200,\n learning_rate=0.02,\n depth=12,\n eval_metric='RMSE',\n bagging_temperature = 0.2)\n column_index = [X_final.columns.get_loc(c) for c in col_dummies if c in X_final]\n # Fit model\n clf = Cb.fit(X_train, y_train,cat_features=column_index)\n print('Linear Regression RMSE',compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def train_classifier(images_path):\n car_imgs = get_images(images_path + '/vehicles/')\n non_car_imgs = get_images(images_path + '/non-vehicles/')\n\n print('Computing car features')\n car_features = extract_features(car_imgs,\n color_space=COLOR_SPACE,\n spatial_size=SPATIAL_SIZE,\n hist_bins=HIST_BINS,\n orient=ORIENT,\n pix_per_cell=PIX_PER_CELL,\n cell_per_block=CELL_PER_BLOCK,\n hog_channel=HOG_CHANNEL,\n spatial_feat=SPATIAL_FEAT,\n hist_feat=HIST_FEAT,\n hog_feat=HOG_FEAT)\n print(len(car_features))\n\n print('Computing non-car features')\n non_car_features = extract_features(non_car_imgs,\n color_space=COLOR_SPACE,\n spatial_size=SPATIAL_SIZE,\n hist_bins=HIST_BINS,\n orient=ORIENT,\n pix_per_cell=PIX_PER_CELL,\n cell_per_block=CELL_PER_BLOCK,\n hog_channel=HOG_CHANNEL,\n spatial_feat=SPATIAL_FEAT,\n hist_feat=HIST_FEAT,\n hog_feat=HOG_FEAT)\n print(len(non_car_features))\n \n X = np.vstack((car_features, non_car_features)).astype(np.float64) \n print('X shape: {}'.format(X.shape))\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(non_car_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n # Use a linear SVC \n svc = LinearSVC()\n # Check the training time for the SVC\n t=time.time()\n svc.fit(X_train, y_train)\n t2 = time.time()\n print(round(t2-t, 2), 'Seconds to train SVC...')\n # Check the score of the SVC\n print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n # Check the prediction time for a single sample\n t=time.time()\n\n return svc, X_scaler", "def lblencoder(self):\n for i in self.data.columns:\n if self.data[i].dtype=='object':\n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(self.data[i].values))\n self.data[i] = lbl.transform(list(self.data[i].values))\n \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def get_biases_features_labels(data_dir):\n g = ds.GraphData(data_dir)\n nodes = g.get_all_nodes(0)\n nodes_list = nodes.tolist()\n row_tensor = g.get_node_feature(nodes_list, [1, 2])\n features = row_tensor[0]\n features = features[np.newaxis]\n\n labels = row_tensor[1]\n\n nodes_num = labels.shape[0]\n class_num = labels.max() + 1\n labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)\n\n neighbor = g.get_all_neighbors(nodes_list, 0)\n node_map = {node_id: index for index, node_id in enumerate(nodes_list)}\n adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)\n for index, value in np.ndenumerate(neighbor):\n if value >= 0 and index[1] > 0:\n adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1\n adj = adj[np.newaxis]\n biases = adj_to_bias(adj)\n\n return biases, features, labels_onehot", "def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.LinearSVC(class_weight='auto',C=1.0)\n self.classifier.fit(X, Y)" ]
[ "0.61327016", "0.60631764", "0.5974141", "0.5968611", "0.5860819", "0.5854023", "0.58511835", "0.5840595", "0.58283985", "0.5798087", "0.5785424", "0.57740974", "0.5768317", "0.57531476", "0.5741446", "0.5738302", "0.5718393", "0.5714158", "0.5706167", "0.5693297", "0.5693103", "0.5692755", "0.5684473", "0.56756455", "0.56747216", "0.5667269", "0.56600225", "0.56581575", "0.5649493", "0.5636323" ]
0.7302397
0
Predict labels for all columns in source
def predict(self, source): # First, we need to extract query Column objects from source: query_cols = [] for s in source: query_cols += self._read(s) logging.info("NNetModel: Predicting for {} columns from {} sources".format(len(query_cols), len(source))) true_labels = [] for c in query_cols: true_labels.append(c.title) # Then, pass these query cols to self.labeler.predict as start = time.time() y_pred_proba = self.labeler.predict_proba(query_cols) # predictions = [] predictions_proba = [] for y_proba in y_pred_proba: predictions_proba.append(y_proba[self.classifier_type]) time_elapsed = time.time() - start # Finally, convert predictions to the pd dataframe in the required format: predictions_proba_dict = [] for i, c in enumerate(query_cols): row = {"column_name": c.colname, "source_name": source, "user_label": c.title, "model": self.model_type, "model_description": self.description } preds = predictions_proba[i] # numpy array of probabilities for the i-th column max = 0 label = "unknown" for j, score in enumerate(preds): class_name = self.labeler.inverted_lookup[j] row["scores_"+class_name] = score if score > max: max = score label = class_name row["label"] = label row["confidence"] = max row["running_time"] = time_elapsed predictions_proba_dict.append(row) # Return the predictions df: return pd.DataFrame(predictions_proba_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict_labels(model, x_test):\n \n pred = model.predict(x_test)\n #pred_labels = model.predict_classes(x_test) # depricated\n pred_labels = np.argmax(model.predict(x_test), axis=-1)\n \n return pred, pred_labels", "def predict(self, X):", "def predict(self, X):", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def predict(self, X, **kwargs):\n\n X = sanitize_dataframe(X)\n\n for c in set(self._features).difference(set(X.columns.values)):\n X = X.assign(**{c: 1})\n\n X[\"label_prediction\"] = self._base_model.predict(X)\n\n return self._model.predict(X[self._features], **kwargs)", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels", "def predict_only(self):", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels", "def label_extraction(self) -> None:\n self.df[\"label\"] = self.df[\"y\"]", "def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels", "def predict(self, df):\n results = [] \n _ds = pdfds.DataFrameDataset(df, self.fields) \n _iter = BucketIterator(_ds, batch_size=16, sort_key=lambda x: len(x.text),\n train=False, sort=True, sort_within_batch=True)\n self.odel.eval()\n with torch.no_grad():\n for (labels, text), _ in _iter:\n labels = labels.type(torch.LongTensor)\n text = text.type(torch.LongTensor)\n _, output = self.model(text, labels)\n sm = torch.nn.Softmax(dim=1)\n results.extend( sm(output).tolist()[1] )\n return results", "def predict(self, xs, **kwargs):", "def predict(self, source):\n # TODO: track run time\n logging.info(\"Predicting with DINTModel for source {}\".format(source))\n if source not in self.allowed_sources:\n logging.warning(\"Source '{}' not in allowed_sources. Skipping it.\".format(source))\n return None\n # upload source to the schema matcher server\n matcher_dataset = self.server.create_dataset(file_path=os.path.join(\"data\", \"sources\", source + \".csv\"),\n description=\"testdata\",\n type_map={})\n start = time.time()\n predict_df = self.classifier.predict(matcher_dataset).copy()\n predict_df[\"running_time\"] = time.time() - start\n column_map = dict([(col.id, col.name) for col in matcher_dataset.columns])\n predict_df[\"column_name\"] = predict_df[\"column_id\"].apply(lambda x: column_map[x])\n predict_df[\"source_name\"] = source\n predict_df[\"model\"] = self.model_type\n predict_df[\"model_description\"] = self.description\n label_dict = self._construct_labelData(matcher_dataset,\n filepath=os.path.join(\"data\", \"labels\", source + \".columnmap.txt\"),\n header_column=\"column_name\",\n header_label=\"semantic_type\")\n predict_df[\"user_label\"] = predict_df[\"column_id\"].apply(\n lambda x: label_dict[x] if x in label_dict else 'unknown')\n return predict_df", "def _predict_labels(self) -> pd.Series:\n\n # get the prediction dataset\n data = self._get_prediction_data()\n data_as_array = data.to_numpy()\n\n if self._standardize_data:\n data_as_array = self._final_scaler.transform(data_as_array)\n\n # predict with final model using the optimal threshold\n y_pred = self._final_model.predict_proba(data_as_array)[:,1]\n threshold_predictions = [1 if y > self._optimal_threshold else 0 for y in y_pred]\n\n # create series out of predictions\n y_labels = pd.Series(data = threshold_predictions, index = data.index)\n return y_labels", "def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels", "def predict(test_dataset,test_tX,weights):\n for idx, dataset in enumerate(test_tX):\n test_dataset[idx]['Prediction'] = predict_labels(weights[idx],dataset)\n return test_dataset", "def predict_label(self, x):\n best_path_ids, label_col = [], []\n for i in range(len(x)):\n # forward_var denote end this step each node will get max score\n forward_var = np.log(self.init_probability)\n\n for j in range(len(x[i])):\n if j == 0:\n forward_var += np.log(self.emission[x[i][j]].reshape(-1, 1))\n\n else:\n current_score, current_ids = np.zeros((self.n_classes, 1)), np.zeros(self.n_classes, dtype=int)\n for next_tag in range(self.n_classes):\n next_score = forward_var + np.log(self.transition[next_tag].reshape(-1, 1))\n max_score_id = np.argmax(next_score.reshape(-1))\n\n current_score[next_tag] = np.max(next_score)\n current_ids[next_tag] = max_score_id\n\n # update forward_var with each step\n forward_var = current_score + np.log(self.emission[x[i][j]].reshape(-1, 1))\n best_path_ids.append(current_ids.tolist())\n\n start_node = np.argmax(forward_var)\n label_col.append(self.find_path(start_node, best_path_ids))\n best_path_ids = []\n\n return label_col", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict(self, predPoints=None):", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction" ]
[ "0.75673765", "0.69536525", "0.6880678", "0.6880678", "0.6878754", "0.6861358", "0.6859072", "0.68183094", "0.6798352", "0.6798352", "0.6798352", "0.6794257", "0.67640674", "0.67569023", "0.6752388", "0.67336595", "0.6707299", "0.6691524", "0.666908", "0.6666647", "0.66116095", "0.6607081", "0.6601145", "0.6584323", "0.65781814", "0.657596", "0.6542705", "0.6542705", "0.6542705", "0.6536772" ]
0.79116446
0
Worker init func for dataloader. The seed of each worker equals to num_worker rank + worker_id + user_seed
def worker_init_fn(worker_id, num_workers, rank, seed): worker_seed = num_workers * rank + worker_id + seed np.random.seed(worker_seed) random.seed(worker_seed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def worker_init_fn(worker_id: int) -> None:\n worker_info = torch.utils.data.get_worker_info()\n set_rnd(worker_info.dataset, seed=worker_info.seed) # type: ignore[union-attr]", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(self, worker_id: int) -> None:\n np.random.seed(np.random.get_state()[1][0] + worker_id + random.randint(1, 1000))\n\n worker_info = torch.utils.data.get_worker_info()\n worker_info.dataset.set_worker_id(worker_id)\n worker_info.dataset.examples, shard_stats = self.get_worker_shard(\n worker_info.dataset.examples, worker_info.num_workers, worker_id\n )\n worker_info.dataset.logger.info(\n f\"Stats for shard created for worker {worker_id}: \\n {shard_stats}\"\n )\n worker_info.dataset.create_language_index_mapping()", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def worker_init_reset_seed(worker_id: int):\n initial_seed = torch.initial_seed() % 2**31\n seed_all_rng(initial_seed + worker_id)", "def seed_worker(_worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def _seed_npy_before_worker_init(worker_id, seed, worker_init_fn=None):\n try:\n import numpy as np\n np.random.seed(seed + worker_id)\n except ImportError:\n pass\n\n if worker_init_fn is not None:\n return worker_init_fn(worker_id)", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def _initialize_worker_prior(self):\r\n df = pd.read_csv(self._filepath, sep='\t')\r\n self._workers_id = df['!amt_worker_ids'].unique().tolist()\r\n for worker_id in self._workers_id:\r\n self._workers_prior[worker_id] = [self._c0, self._d0]", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, self.random_seed, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn", "def init(number_of_workers=0):\n global _wq, _use_workers\n\n if number_of_workers:\n _use_workers = number_of_workers\n else:\n _use_workers = benchmark_workers()\n\n # if it is best to use zero workers, then use that.\n _wq = WorkerQueue(_use_workers)", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def initialize(self, seed=None):\r\n self.seed(seed)", "def init_distributed(backend, world_size, rank, checkpoint_dir):\n # multi-gpu initial\n logger.debug(f'Initializing {world_size} workers')\n # Remove the init file from previous version\n init_dir = checkpoint_dir / 'shared_distributed'\n if init_dir.is_file():\n rm_file(init_dir)\n\n init_dir.mkdir(parents=True, exist_ok=True)\n init_file = init_dir / f'slurm-{slurm.job_id}'\n init_method = init_file.resolve().as_uri()\n dist.init_process_group(backend, world_size=world_size, rank=rank, init_method=init_method)\n logger.debug('Init finished')", "def _get_executor_init(self, workers):\n raise NotImplementedError", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, None, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n\n return pool_fn", "def __init__(self, n_neighbors=6, dim=2, seed=None, n_jobs=1):\n self.n_neighbors = n_neighbors\n self.dim = dim\n self.seed = seed\n self.n_jobs = n_jobs", "def init_worker(*shared_args_list):\n global SHARED_ARGS\n SHARED_ARGS = shared_args_list", "def evaluate_system__initialize_workers(opts, dictionary, features, labels):\n global evaluate_system__worker_cache\n evaluate_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary, \"features\": features, \"labels\": labels}", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def __init__(self, nn_architecture, seed=99):\n self.nn_architecture = nn_architecture\n self.seed = seed", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def _setup_przs():\n # Initialize RNG Generators\n comm.get().g0 = torch.Generator()\n comm.get().g1 = torch.Generator()\n\n # Generate random seeds for Generators\n # NOTE: Chosen seed can be any number, but we choose as a random 64-bit\n # integer here so other parties cannot guess its value.\n\n # We sometimes get here from a forked process, which causes all parties\n # to have the same RNG state. Reset the seed to make sure RNG streams\n # are different in all the parties. We use numpy's random here since\n # setting its seed to None will produce different seeds even from\n # forked processes.\n import numpy\n\n numpy.random.seed(seed=None)\n next_seed = torch.tensor(numpy.random.randint(-2 ** 63, 2 ** 63 - 1, (1,)))\n prev_seed = torch.LongTensor([0]) # placeholder\n\n # Send random seed to next party, receive random seed from prev party\n world_size = comm.get().get_world_size()\n rank = comm.get().get_rank()\n if world_size >= 2: # Otherwise sending seeds will segfault.\n next_rank = (rank + 1) % world_size\n prev_rank = (next_rank - 2) % world_size\n\n req0 = comm.get().isend(tensor=next_seed, dst=next_rank)\n req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)\n\n req0.wait()\n req1.wait()\n else:\n prev_seed = next_seed\n\n # Seed Generators\n comm.get().g0.manual_seed(next_seed.item())\n comm.get().g1.manual_seed(prev_seed.item())", "def init_pool_generator(gens, random_seed=None, id_queue=None):\n global _SHARED_SEQUENCES\n _SHARED_SEQUENCES = gens\n\n worker_proc = multiprocessing.current_process()\n\n # name isn't used for anything, but setting a more descriptive name is helpful\n # when diagnosing orphaned processes.\n worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)\n\n if random_seed is not None:\n np.random.seed(random_seed + worker_proc.ident)\n\n if id_queue is not None:\n # If a worker dies during init, the pool will just create a replacement.\n id_queue.put(worker_proc.ident, block=True, timeout=0.1)", "def initialize_weights(self, seed=None):\r\n if seed!=None:\r\n np.random.seed(seed)\r\n self.weights = np.random.randn(self.number_of_nodes,self.input_dimensions)" ]
[ "0.83711416", "0.82632536", "0.79382265", "0.79382265", "0.7899612", "0.78742003", "0.7825774", "0.7457807", "0.7354716", "0.7296145", "0.7219687", "0.6539588", "0.65001684", "0.63125265", "0.6237946", "0.6194707", "0.60962236", "0.6094651", "0.60454345", "0.6043736", "0.6035279", "0.60335994", "0.59992945", "0.59931916", "0.5985856", "0.5953267", "0.5953267", "0.5939413", "0.5929349", "0.59196913" ]
0.86481786
1
If file "database2.sqlite" or "database3.sqlite" is damaged, music are reset and new ones are created
def error_correction(): if not path.exists("Databases"): mkdir("Databases") def check_db(db_name): conn = sqlite3.connect(f"Databases/{db_name}") cursor = conn.cursor() try: cursor.execute("SELECT * FROM user_music WHERE name=?", (encode_text("test_name"),)).fetchone() except sqlite3.DatabaseError: conn.close() remove(f"Databases/{db_name}") conn = sqlite3.connect(f"Databases/{db_name}") cursor = conn.cursor() try: cursor.execute("SELECT * FROM user_playlists WHERE name=?", (encode_text("test_name"),)).fetchone() except sqlite3.DatabaseError: conn.close() remove(f"Databases/{db_name}") conn = sqlite3.connect(f"Databases/{db_name}") cursor = conn.cursor() try: cursor.execute("CREATE TABLE user_music (name, author, url, song_time, num, song_id)") except: pass try: cursor.execute("CREATE TABLE user_playlists (name, music, playlist_id)") except: pass conn.commit() conn.close() check_db("database2.sqlite") # added music & playlists check_db("database3.sqlite") # downloaded music
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def __handle_invalid_database_file(self, database_name: str) -> None:\n print(f'ERROR: {database_name} is not a sqlite3 file!')\n delete_file = input('Would you like to delete the old file? (y/n): ')\n\n if delete_file.lower() == 'y':\n print(f'Deleting {database_name}...')\n os.unlink(database_name)\n\n print('Creating new database file...')\n self.conn = self.__connect_to_database(database_name)\n self.__set_up_database(database_name)\n else:\n print(\n '\\nERROR: Cannot continue with invalid database file.\\n'\n 'Please delete or rename the file to continue.'\n )\n sys.exit(1)", "def all(self):\n\tglobal mode\n\tmode=\"./music/\"", "def checkingConnection(self):\r\n if QSqlDatabase.contains():\r\n self.db = QSqlDatabase.database()\r\n self.db.setDatabaseName('database.sqlite')\r\n self.db.open()\r\n else:\r\n self.db = QSqlDatabase.addDatabase(\"QSQLITE\")\r\n self.db.setDatabaseName('database.sqlite')\r\n self.db.open()", "def reset(self):\n os.chdir(self.destination) #Changes directory to the 'Enigma Settings' folder\n self.database = str(input('\\nEnter the name of the database: ' ) + '.db') #Prompts user to enter database name\n self.conn = sqlite3.connect(self.database) #Established a connection to the database\n self.c = self.conn.cursor() #Creates a cursor to traverse the data set", "def destroy(self):\n self.close()\n if self.wantAnyDbm:\n lt = time.asctime(time.localtime())\n trans = maketrans(': ','__')\n t = lt.translate(trans)\n head, tail = os.path.split(self.filepath)\n newFileName = 'UDStoreBak'+t\n if os.path.exists(self.filepath):\n try:\n os.rename(tail, newFileName)\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Creating backup of file: %s saving as: %s' %(tail, newFileName))\n except:\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Unable to create backup of file: %s ' %tail)\n else:\n # Remove the filename with all sufix's\n # .bak, .dir, .dat\n files = os.listdir(head)\n for file in files:\n if file.find(tail)>-1:\n filename, ext = os.path.splitext(file)\n try:\n os.rename(file, newFileName+ext)\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Creating backup of file: %s saving as: %s' %(file,newFileName+ext))\n except:\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Unable to create backup of file: %s ' %newFileName+ext)\n else:\n if os.path.exists(self.filepath + '.bu'):\n os.remove(self.filepath + '.bu')\n if os.path.exists(self.filepath):\n os.remove(self.filepath)", "def sqlite_db():\n smm.init_db(\"test.db\")\n yield smm.DATABASE\n os.remove(\"test.db\")", "def update():\n\tglobal songList\n\tglobal songs\n\tsongList=os.listdir(\"./music/\")\n\tsongs=['```']\n\tfor song in songList:\n\t\tif len(songs[-1])>1800:\n\t\t\tsongs[-1]+='```'\n\t\t\tsongs.append('```')\n\t\tif '.mp3' in song:\n\t\t\tsongs[-1]+=song.replace('.mp3','')\n\t\t\tsongs[-1]+='\\n'\n\tsongs[-1]+='```'", "def check_music(self):\n\t\tif self.menu_music_played == 0:\n\t\t\tif self.initial_menu_music_element == self.next_menu_music_element:\n\t\t\t\tself.ingame_music.extend(self.menu_music)\n\t\t\t\tself.music = self.ingame_music\n\t\t\t\tself.music_rand_element = random.randint(0, len(self.ingame_music) - 1)\n\t\t\t\tself.menu_music_played = 1\n\t\t\telse:\n\t\t\t\tself.music = self.menu_music\n\n\t\tif hasattr(self, '_bgsound_old_byte_pos') and hasattr(self, '_bgsound_old_sample_pos'):\n\t\t\tif self._bgsound_old_byte_pos == self.emitter['bgsound'].getCursor(fife.SD_BYTE_POS) and self._bgsound_old_sample_pos == self.emitter['bgsound'].getCursor(fife.SD_SAMPLE_POS):\n\t\t\t\tself.music_rand_element = self.music_rand_element + 1 if \\\n\t\t\t\t\t self.music_rand_element + 1 < len(self.music) else 0\n\t\t\t\tself.play_sound('bgsound', self.music[self.music_rand_element])\n\t\t\t\tif self.menu_music_played == 0:\n\t\t\t\t\tself.next_menu_music_element = self.music_rand_element\n\n\t\tself._bgsound_old_byte_pos, self._bgsound_old_sample_pos = \\\n\t\t\t self.emitter['bgsound'].getCursor(fife.SD_BYTE_POS), \\\n\t\t\t self.emitter['bgsound'].getCursor(fife.SD_SAMPLE_POS)", "def test_separating_existing_and_non_existing_mp3(\n requests_mock: rm_Mocker,\n json_db_mock: str,\n tmp_path: Path,\n lep_dl: LepDL,\n) -> None:\n filename_1 = \"[2021-08-03] # 733. A Summer Ramble.mp3\"\n filename_2 = \"[2017-03-11] # LEP on ZEP – My recent interview on Zdenek’s English Podcast [Part 05].mp3\" # noqa: E501,B950\n Path(tmp_path / filename_1).write_text(\"Here are mp3 1 bytes\")\n Path(tmp_path / filename_2).write_text(\"Here are mp3 2 bytes\")\n\n requests_mock.get(\n conf.JSON_DB_URL,\n text=json_db_mock,\n )\n lep_dl.get_remote_episodes()\n lep_dl.files = downloader.gather_all_files(lep_dl.db_episodes)\n audio_files = lep_dl.files.filter_by_type(Audio)\n lep_dl.detach_existed_files(tmp_path, audio_files)\n assert len(lep_dl.existed) == 2\n assert len(lep_dl.non_existed) == 16", "def test_reset_database(self):\r\n # Test resetting the default profile (no profile arguments passed)\r\n profile = self.profile_manager.get('default')\r\n open(profile.get_filepath('presentations.db'), 'w+')\r\n self.assertTrue(os.path.exists(profile.get_filepath('presentations.db')))\r\n reset_database(self.config_dir)\r\n self.assertFalse(os.path.exists(profile.get_filepath('presentations.db')))\r\n\r\n # Test resetting a non-default profile\r\n profile = self.profile_manager.get('not-default')\r\n open(profile.get_filepath('presentations.db'), 'w+')\r\n self.assertTrue(os.path.exists(profile.get_filepath('presentations.db')))\r\n reset_database(self.config_dir, 'not-default')\r\n self.assertFalse(os.path.exists(profile.get_filepath('presentations.db')))", "def play_wakeup_music(self):\n list_of_music_files = []\n for track in os.listdir(project_path + '/music'):\n if track.endswith(\".mp3\"):\n list_of_music_files.append(str(project_path + '/music/' + str(track)))\n\n # figure out random track of the found mp3 files\n random_track = randint(0, len(list_of_music_files)-1)\n\n self.play_mp3_file(list_of_music_files[random_track])", "def song_clear():\r\n try:\r\n # Drop all tables then recreate them.\r\n Base.metadata.drop_all(bind=engine)\r\n print colored.red(\"Database cleared successfully.\", bold=12)\r\n Base.metadata.create_all(bind=engine)\r\n except:\r\n session.rollback()", "def validate_database(self, con, filename):\n # Ensure that we have avalid connection\n con.execute('CREATE TABLE t(id INTEGER NOT NULL)')\n con.close()\n # Make sure that the default database file was created (and clean up)\n assert os.path.isfile(filename)\n os.remove(filename)", "def min_cleanup(self):\n self.past_songs_db.close()", "def first_launch():\r\n if os.path.exists('diary.db'):\r\n return False\r\n else:\r\n return True", "def openNewDB(self):\n if os.path.exists(\"setting.py\"):\n os.remove(\"setting.py\")\n self.application.exit(1)", "def create_new_db():\n global data_base, table\n data_base = asksaveasfilename(title=\"Select file\", filetypes=((\"DATA BASE\", \"*.db\"), (\"all files\", \"*.*\")),\n defaultextension='.db')\n\n if Path(data_base).suffix == '.db':\n create_win_create_table()\n else:\n mistake_db_file()", "def test_creation_when_invalid_database_exists_and_overwrite(self):\n database_filename = \"test.db\"\n\n # Delete the test database if it exists.\n test_database = os.path.join(os.getcwd(), database_filename)\n if os.path.exists(test_database):\n os.remove(test_database)\n\n # Create our pre-existing, _invalid_, database.\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME\n );\n \"\"\"\n\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)\n\n # Generate the database.\n database = app.database.Database(database_filename, overwrite=True)\n database.create_database()\n\n # Pull out the table names from the database we've created.\n column_names = extract_column_names(database_filename)\n\n # Assert that they are as expected:\n for column_name in app.database.database_columns:\n self.assertEqual(\n True,\n column_name in column_names,\n \"Database creation process did not yield the column names expected. Missing: {0}\".format(column_name)\n )", "def stop_music(self):\n self.load_music(None)", "def test_creation_when_invalid_database_exists_and_no_overwrite(self):\n database_filename = \"test.db\"\n\n # Delete the test database if it exists.\n test_database = os.path.join(os.getcwd(), database_filename)\n if os.path.exists(test_database):\n os.remove(test_database)\n\n # Create our pre-existing, _invalid_, database.\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME\n );\n \"\"\"\n\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)\n\n # Create the database object, build the database\n database = app.database.Database(database_filename)\n self.assertRaises(sqlite3.DatabaseError, database.create_database)", "def load_music(self, filename):\n self.music = filename\n self.music_playing = False\n if self.is_running:\n if filename is not None:\n cocos.audio.music.control.load(filename)\n else:\n cocos.audio.music.control.stop()", "def delete_all(self, database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n pointer.execute(\"DELETE FROM render_information\")\n\n connection.commit()\n connection.close()\n print(\"deleted render database\")\n\n except:\n print(\"was not able to delete render database\")\n\n if database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n pointer.execute(\"DELETE FROM object_information\")\n\n connection.commit()\n connection.close()\n print(\"deleted object database\")\n except:\n print(\"was not able to delete object database\")\n if database_type == \"output\":\n try:\n connection = sqlite3.connect(self.filepath_output_database)\n pointer = connection.cursor()\n\n pointer.execute(\"DELETE FROM camera_settings\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM general_settings\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM light_settings\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM objects\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM bounding_boxes\")\n connection.commit()\n\n connection.close()\n print(\"deleted output database\")\n except:\n print(\"was not able to delete output database\")\n\n if database_type == \"all\":\n \n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n pointer.execute(\"DELETE FROM render_information\")\n\n connection.commit()\n connection.close()\n print(\"deleted content of render database\")\n\n except:\n print(\"was not able to delete render database\")\n\n \n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n pointer.execute(\"DELETE FROM object_information\")\n\n connection.commit()\n connection.close()\n print(\"deleted content of object database\")\n except:\n print(\"was not able to delete object database\")\n\n try:\n connection = sqlite3.connect(self.filepath_output_database)\n pointer = connection.cursor()\n\n pointer.execute(\"DELETE FROM camera_settings\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM general_settings\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM light_settings\")\n connection.commit()\n\n pointer.execute(\"DELETE FROM objects\")\n connection.commit()\n\n connection.close()\n print(\"deleted content of output database\")\n except:\n print(\"was not able to delete output database\")", "def test_410_000_non_existant_db(self):\n with TDC() as temp_dir:\n file = Path(temp_dir) / 'database.db'\n self.assertFalse(file.exists(),'Database file exists pre test')\n eng = Engine(file)\n con = eng.connect()\n self.assertTrue(file.exists(), 'Database file does not exists post test')", "def cleanup(self):\n self.all_wav_to_mp3()\n self.past_songs_db.close()\n self.move_tracks_to_music_folder( )\n self.delete_leftovers()\n print \"Cleanup finished\"", "def test_missing_database_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n app = App(filename)", "def change_music(self, track):\n try:\n if self.bg_volume != 0:\n self.current = self.music_lib[track]\n pygame.mixer.music.load(self.current)\n pygame.mixer.music.play(-1)\n self.current = track\n else:\n pygame.mixer.music.stop()\n except:\n print \"Couldn't load track '\", track + \"'!\"", "def on_stop(self):\n self.songs.save_songs(FILE_NAME)", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)" ]
[ "0.69164497", "0.6316394", "0.59935206", "0.5793738", "0.57836765", "0.5769535", "0.5754988", "0.5712814", "0.5695106", "0.5601377", "0.55163604", "0.55131423", "0.54717577", "0.54681504", "0.54675", "0.5466998", "0.5455232", "0.5451918", "0.54459465", "0.54375535", "0.54357386", "0.54247993", "0.5422864", "0.5414518", "0.5400163", "0.5376687", "0.53686726", "0.5367531", "0.5350074", "0.5344231" ]
0.6690796
1
Return paramater score given its current value, max value and parameter weight.
def get_param_score(param, max_value, weight=1): return (math.log(1 + param) / math.log(1 + max(param, max_value))) * weight
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def best_value(self):\r\n return self._best_value", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "def get_learning_rate(opt, current, best, counter, learning_rate):\n if current > best:\n best = current\n counter = 0\n elif counter > opt['delay']:\n learning_rate = learning_rate / 10.\n counter = 0\n else:\n counter += 1\n return (best, counter, learning_rate)", "def max_weight(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def get_score(self):\r\n max_score = None\r\n score = None\r\n\r\n #The old default was None, so set to 1 if it is the old default weight\r\n weight = self.get_weight()\r\n if self.is_scored:\r\n # Finds the maximum score of all student attempts and keeps it.\r\n score_mat = []\r\n for i in xrange(0, len(self.task_states)):\r\n # For each task, extract all student scores on that task (each attempt for each task)\r\n last_response = self.get_last_response(i)\r\n score = last_response.get('all_scores', None)\r\n if score is not None:\r\n # Convert none scores and weight scores properly\r\n for z in xrange(0, len(score)):\r\n if score[z] is None:\r\n score[z] = 0\r\n score[z] *= float(weight)\r\n score_mat.append(score)\r\n\r\n if len(score_mat) > 0:\r\n # Currently, assume that the final step is the correct one, and that those are the final scores.\r\n # This will change in the future, which is why the machinery above exists to extract all scores on all steps\r\n scores = score_mat[-1]\r\n score = max(scores)\r\n else:\r\n score = 0\r\n\r\n if self._max_score is not None:\r\n # Weight the max score if it is not None\r\n max_score = self._max_score * float(weight)\r\n else:\r\n # Without a max_score, we cannot have a score!\r\n score = None\r\n\r\n score_dict = {\r\n 'score': score,\r\n 'total': max_score,\r\n }\r\n\r\n return score_dict", "def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def _get_lip_best(self) -> float:\n pass", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def personal_best(scores):\n return max(scores)", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product", "def get_weighted_stat(self,stat):\n if stat == 'score': statv=1\n elif stat.startswith('max'): statv=2\n elif stat.startswith('percent'): \n statv=3\n stat='points'\n else:\n raise ValueError, 'Gradebook.get_weighted_stat passed unaccepted argument.'\n wpoints=0\n wmax=0\n for cat in self.__categories.values():\n p = cat.grades.get_stat('points',weighted=True,counted=True)\n if p:\n wpoints+=p\n m = cat.grades.get_stat('weights',counted=True)\n if m:\n wmax+=m\n if statv is 1:\n return wpoints\n if statv is 2:\n return wmax\n else:#percent\n if wmax:\n return 1.0*wpoints/wmax\n else:\n return None", "def max_grade(self) -> float:\n return sum(self.test_weights.values())", "def getWeight(self) -> float:\n ...", "def personal_best(scores: list) -> int:\n return max(scores)", "def get_param_sample_weight(self, name):\n if name == 'negbin_r_0':\n weights = np.asarray(self.model.p_outlier_total[:, 0])\n elif name == 'negbin_r_1':\n weights = np.asarray(self.model.p_outlier_total[:, 1])\n elif name == 'betabin_M_0':\n weights = np.asarray(self.model.p_outlier_allele[:, 0])\n elif name == 'betabin_M_1':\n weights = np.asarray(self.model.p_outlier_allele[:, 1])\n elif name == 'negbin_hdel_mu':\n weights = self._get_hdel_weights()\n elif name == 'negbin_hdel_r_0':\n weights = self._get_hdel_weights() * np.asarray(self.model.p_outlier_total[:, 0])\n elif name == 'negbin_hdel_r_1':\n weights = self._get_hdel_weights() * np.asarray(self.model.p_outlier_total[:, 1])\n elif name == 'betabin_loh_p':\n weights = self._get_loh_weights()\n elif name == 'betabin_loh_M_0':\n weights = self._get_loh_weights() * np.asarray(self.model.p_outlier_allele[:, 0])\n elif name == 'betabin_loh_M_1':\n weights = self._get_loh_weights() * np.asarray(self.model.p_outlier_allele[:, 1])\n norm = weights.sum()\n if norm > 0.:\n return weights / norm\n else:\n print ('nothing for ' + name)\n return None", "def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/(self.votes+self.field.weight)", "def score(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n return approx_fprime_cs(params, self.loglike, args=(transformed,))", "def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]", "def get_vote_weight(var, target : users.User) -> int:\n # ensure we don't return negative numbers here;\n # we still track them for stacking purposes but anything below 0 counts as 0\n return max(WEIGHT.get(target, 1), 0)", "def weight(self) -> int:\n return pulumi.get(self, \"weight\")", "def weight(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"weight\")", "def pwm_max_score(self):\n if self.max_score is None:\n score = 0\n for row in self.pwm:\n score += log(max(row) / 0.25 + 0.01)\n self.max_score = score\n \n return self.max_score" ]
[ "0.6292394", "0.6178509", "0.6064886", "0.6044544", "0.6035095", "0.60076636", "0.5989297", "0.5986231", "0.5984959", "0.5937223", "0.59368867", "0.591551", "0.5853794", "0.5844487", "0.5820323", "0.58165556", "0.58165556", "0.5810112", "0.5798618", "0.5790173", "0.5771708", "0.5756893", "0.57520735", "0.57478666", "0.5738133", "0.57349163", "0.57243365", "0.5723798", "0.56969047", "0.5691449" ]
0.85167205
0
Return repository stats, including criticality score.
def get_repository_stats(repo, additional_params=[]): # Validate and compute additional params first. additional_params_total_weight = 0 additional_params_score = 0 for additional_param in additional_params: try: value, weight, max_threshold = [ int(i) for i in additional_param.split(':') ] except ValueError: print('Parameter value in bad format: ' + additional_param, file=sys.stderr) sys.exit(1) additional_params_total_weight += weight additional_params_score += get_param_score(value, max_threshold, weight) created_since = repo.created_since updated_since = repo.updated_since contributor_count = repo.contributors org_count = len(repo.get_contributor_orgs()) commit_frequency = repo.commit_frequency recent_releases_count = repo.recent_releases updated_issues_count = repo.updated_issues closed_issues_count = repo.closed_issues comment_frequency = repo.comment_frequency dependents_count = repo.get_dependents() total_weight = (CREATED_SINCE_WEIGHT + UPDATED_SINCE_WEIGHT + CONTRIBUTOR_COUNT_WEIGHT + ORG_COUNT_WEIGHT + COMMIT_FREQUENCY_WEIGHT + RECENT_RELEASES_WEIGHT + CLOSED_ISSUES_WEIGHT + UPDATED_ISSUES_WEIGHT + COMMENT_FREQUENCY_WEIGHT + DEPENDENTS_COUNT_WEIGHT + additional_params_total_weight) criticality_score = round( (get_param_score(created_since, CREATED_SINCE_THRESHOLD, CREATED_SINCE_WEIGHT) + get_param_score(updated_since, UPDATED_SINCE_THRESHOLD, UPDATED_SINCE_WEIGHT) + get_param_score(contributor_count, CONTRIBUTOR_COUNT_THRESHOLD, CONTRIBUTOR_COUNT_WEIGHT) + get_param_score(org_count, ORG_COUNT_THRESHOLD, ORG_COUNT_WEIGHT) + get_param_score(commit_frequency, COMMIT_FREQUENCY_THRESHOLD, COMMIT_FREQUENCY_WEIGHT) + get_param_score(recent_releases_count, RECENT_RELEASES_THRESHOLD, RECENT_RELEASES_WEIGHT) + get_param_score(closed_issues_count, CLOSED_ISSUES_THRESHOLD, CLOSED_ISSUES_WEIGHT) + get_param_score(updated_issues_count, UPDATED_ISSUES_THRESHOLD, UPDATED_ISSUES_WEIGHT) + get_param_score(comment_frequency, COMMENT_FREQUENCY_THRESHOLD, COMMENT_FREQUENCY_WEIGHT) + get_param_score(dependents_count, DEPENDENTS_COUNT_THRESHOLD, DEPENDENTS_COUNT_WEIGHT) + additional_params_score) / total_weight, 5) return { 'name': repo.name, 'url': repo.url, 'language': repo.language, 'created_since': created_since, 'updated_since': updated_since, 'contributor_count': contributor_count, 'org_count': org_count, 'commit_frequency': commit_frequency, 'recent_releases_count': recent_releases_count, 'closed_issues_count': closed_issues_count, 'updated_issues_count': updated_issues_count, 'comment_frequency': comment_frequency, 'dependents_count': dependents_count, 'criticality_score': criticality_score, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gitStatistics(self):\n self.vcs.gitStatistics(self.project.getProjectPath())", "def get_commit_stats(self):\n return self.commit_stats", "def get_stats():\r\n stats = {\r\n \"progress_precent\": 100.0*finished_work_units_amount/work_units_amount,\r\n \"results\": None if work_status == Db.WorkStatusNames.finished_work.value else Db.collect_results(),\r\n #If it's already finished, then all the results were already sent to the main server.\r\n }\r\n return stats", "def get_stats(self):\n return self.stats", "def community_stats(request):\n stats = cache.get(STATS_CACHE_KEY, None)\n if not stats:\n\n stats = fetch(PEOPLE_STATS_URL)\n packages_data = fetch(PACKAGES_STATS_URL)\n if 'meta' in packages_data:\n stats.update({'packages': packages_data['meta']['total_count']})\n\n stats = {'community_stats': stats}\n\n cache.add(STATS_CACHE_KEY, stats, 60 * 60 * 12) # for half a day\n\n return stats", "def get_stats(self):\n return self.manager.get_stats(self)", "def get_stats(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/stats\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Update object\n self.stats = _response.json()", "def stats(self):\n return self._solution", "def stats(self):\n return self._solution", "def stats(self):\n return self._solution", "def getStats(self):\n\n raise NotImplementedError", "def stats(self):\n return self._stats", "def get_core_stats(hass):\n return hass.data.get(DATA_CORE_STATS)", "def stats(self):\n return self.rpc.call(MsfRpcMethod.CoreModuleStats)", "def returnPlayerStats(self):\n\t\tplayerStats = [self.name, \n\t\t\t\t\t self.agility, \n\t\t\t\t\t self.personality, \n\t\t\t\t\t self.sanity, \n\t\t\t\t\t self.strength, \n\t\t\t\t\t self.progress]\n\t\treturn playerStats", "def stats(self):\n return super(NoneCache, self).stats()", "def get_general_stats() ->List[BaseStat]:\n return [PositionalTendencies(),\n SpeedTendencies(),\n ItemGoals(),\n DropshotGoals(),\n DropshotBallPhaseTimes(),\n DropshotStats()\n ]", "def get_stats(self):\n stats = \\\n 'cluster: %s\\ncount = %d, size = %d, minvar = %f, avg_dist = %s\\n'\\\n % (self.name, self.count, self.size, self.minvar, self.avg_dist)\n return stats", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def get_contribution_score(self):\n self._extract()\n for key in BitBucketEnum.USER_CONTRIBUTION_MAPPING:\n if key.high >= self.total_no_of_repos >= key.low:\n return BitBucketEnum.USER_CONTRIBUTION_MAPPING[key]\n return 0", "def stats(self):\n pass", "def stat_cartridge_health(self):\n raise NotImplementedError", "def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()", "def get_covers_stats(request):\n logger.debug(\"get_covers_stats called\")\n response_data = {}\n\n validation = init_validation(request)\n if 'error' in validation:\n return JsonResponse(validation['data'], status=validation['error'])\n\n headers = {'Content-Type': 'application/json'}\n response = requests.get(validation['mongo_url'] + \"/getStats\", auth=HTTPBasicAuth(MONGO_API_USER, MONGO_API_PWD), verify=MONGO_SERVER_CERTIFICATE, headers=headers)\n\n status_code = response.status_code\n response_body = response.text\n\n if str(status_code) == \"200\":\n return json.loads(response_body)\n\n response_body = {\"result\": \"failure\", \"message\": response.text, \"status_code\": status_code}\n return json.loads(response_body)", "def get_issues_stat(self) -> Tuple[int, int, int]:\n return self._summarize(self._repo.get_issues(), self._STALE_ISSUES_DAYS)", "def statistics(self):\n return self.get_statistics()", "def get_test_summary(repo:MLRepo):\n pass", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def getJson(repo):\n\n url='https://api.github.com/repos/' + repo + '/stats/punch_card'\n r = requests.get(url, headers={'Authorization': 'token %s' % getToken()})\n if r.status_code == 403:\n raise SystemExit('Rate limited!')\n return r.json()", "def percent_community(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.community_contribution * 100 / total_cost, 2)\n else:\n return 0" ]
[ "0.6341863", "0.60292643", "0.6003116", "0.5998877", "0.58837956", "0.5879955", "0.58450276", "0.57766205", "0.57766205", "0.57766205", "0.5751038", "0.57093024", "0.5699269", "0.56872267", "0.5619458", "0.561718", "0.560706", "0.55628246", "0.54854643", "0.5432927", "0.54281074", "0.5406419", "0.538952", "0.53869987", "0.5382136", "0.537858", "0.5370757", "0.53644127", "0.5338861", "0.5330255" ]
0.71026087
0
Return expiry information given a github token.
def get_github_token_info(g): rate_limit = g.get_rate_limit() near_expiry = rate_limit.core.remaining < 50 wait_time = (rate_limit.core.reset - datetime.datetime.utcnow()).seconds return near_expiry, wait_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_github_auth_token():\n global _cached_github_token\n if _cached_github_token:\n near_expiry, _ = get_github_token_info(_cached_github_token)\n if not near_expiry:\n return _cached_github_token\n\n github_auth_token = os.getenv('GITHUB_AUTH_TOKEN')\n assert github_auth_token, 'GITHUB_AUTH_TOKEN needs to be set.'\n tokens = github_auth_token.split(',')\n wait_time = None\n g = None\n for i, token in enumerate(tokens):\n g = github.Github(token)\n near_expiry, wait_time = get_github_token_info(g)\n if not near_expiry:\n _cached_github_token = g\n return g\n print(f'Rate limit exceeded, sleeping till reset: {wait_time} seconds.',\n file=sys.stderr)\n time.sleep(wait_time)\n return g", "def get_expiry():\n\n return get_or_append_details('expiry', \"Please enter your expiry date, two digits for the month and two digits for the year\")", "def get_token_expiry(public=True):\n if public:\n return now() + EXPIRE_DELTA_PUBLIC\n else:\n return now() + EXPIRE_DELTA", "def get_token_expiry(public=True):\n if public:\n return now() + EXPIRE_DELTA_PUBLIC\n else:\n return now() + EXPIRE_DELTA", "def _get_api_token_exp_from_config():\n return datetime.timedelta(\n **dict(zip(('hours', 'minutes', 'seconds'), map(int, config['app']['auth']['api_token_exp'].split(':'))))\n )", "def token_expiry_date(self):\n return self.__token_expiry_date", "def new_token_expiry_date():\n\treturn timezone.now() + datetime.timedelta(days=TOKEN_VALID_DATE)", "def get_code_expiry():\n return now() + EXPIRE_CODE_DELTA", "def get_code_expiry():\n return now() + EXPIRE_CODE_DELTA", "def organization_get_expired_token(self, client, id, expired_token):\n assert client.get('/organizations/' + id, headers={\n 'Authorization': 'Bearer ' + expired_token})\\\n .status == '401 UNAUTHORIZED'", "def get_data_of_token_holder(token):\n response = requests.get(\n f'{GITHUB_API_URL}/user',\n headers={\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': f'token {token}',\n },\n )\n response.raise_for_status()\n return response.json()", "def expires(self):\n return self._data.get('expires')", "def _get_expiration(self, headers):\n expiration_str = headers.get('expires')\n if not expiration_str:\n return 0\n expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')\n delta = (expiration - datetime.utcnow()).total_seconds()\n return math.ceil(abs(delta))", "def expiry(self):\n return self._expiry", "def get_headers(token):\n return {\n \"Accept\": \"application/vnd.github+json\",\n \"Authorization\": f\"Bearer {token}\",\n \"X-GitHub-Api-Version\": \"2022-11-28\",\n }", "def get_github_credentials():\n\n p = subprocess.Popen(\"git config github.accesstoken\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n rc = p.wait()\n if rc:\n # failure to get config, so return silently\n return None\n token = p.stdout.readline().strip()\n if token:\n log.debug(\"Found github accesstoken %r\", token)\n return token", "def getExpires(self):\n return self.base.get(\"expires\", [])", "def _get_expire(self):\n return self.__expire", "def mock_expired_data():\n return {\n \"CreditCardNumber\": \"123454567890123456\",\n \"CardHolder\": \"Test\",\n \"ExpirationDate\": \"2014-12-22T03:12:58.019077+00:00\",\n \"SecurityCode\": \"1234\",\n \"Amount\": 100\n }", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def expiration(self):\n return datetime(int(self.exp_year), int(self.exp_month),\n calendar.monthrange(int(self.exp_year), int(self.exp_month))[1],\n 23, 59, 59)", "def get_token(filename='config.ini'):\n cp = ConfigParser()\n cp.read(filename)\n token = cp.get('githubapi', 'token')\n return token", "def expiration(self) -> Optional[str]:\n return pulumi.get(self, \"expiration\")", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def expiration_date(self) -> str:\n return pulumi.get(self, \"expiration_date\")", "def invalidate_auth_token(auth_token):\n\n results = __gae_fetch('https://api.stackexchange.com/%s/access-tokens/%s/invalidate' % (__api_version, auth_token))\n response = simplejson.loads(results.content)\n return response", "def get_valid_expiration_from_request(\n expiry_param=\"expires_in\", max_limit=None, default=None\n):\n return get_valid_expiration(\n flask.request.args.get(expiry_param), max_limit=max_limit, default=default\n )", "def default_expiration_delta():\n return timezone.now() + const.EXPIRY_TOKEN_DELTA", "def test_refreshes_token_when_expired(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n original_token = badgr._token_data['access_token']\n with vcr.use_cassette('tests/vcr_cassettes/expired_auth_token.yaml'):\n badgr.get_from_server(self._sample_url)\n self.assertNotEqual(original_token,\n badgr._token_data['access_token'])", "def build_access_token_expired():\n return do_build_access_token(tenant_id='intility_tenant_id', expired=True)" ]
[ "0.6769654", "0.6433507", "0.6418123", "0.6418123", "0.57661664", "0.57641804", "0.5683738", "0.5673133", "0.5673133", "0.5588578", "0.5476521", "0.54721695", "0.54422075", "0.5384525", "0.53797305", "0.5364178", "0.5355047", "0.53395385", "0.53342307", "0.52983904", "0.5289847", "0.51974005", "0.5165407", "0.5151712", "0.51311725", "0.51303244", "0.50962037", "0.5083114", "0.50816303", "0.50736713" ]
0.705975
0
Return an unexpired github token if possible from a list of tokens.
def get_github_auth_token(): global _cached_github_token if _cached_github_token: near_expiry, _ = get_github_token_info(_cached_github_token) if not near_expiry: return _cached_github_token github_auth_token = os.getenv('GITHUB_AUTH_TOKEN') assert github_auth_token, 'GITHUB_AUTH_TOKEN needs to be set.' tokens = github_auth_token.split(',') wait_time = None g = None for i, token in enumerate(tokens): g = github.Github(token) near_expiry, wait_time = get_github_token_info(g) if not near_expiry: _cached_github_token = g return g print(f'Rate limit exceeded, sleeping till reset: {wait_time} seconds.', file=sys.stderr) time.sleep(wait_time) return g
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokens():\n return ['access token', 'refresh token']", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def check_token(token: str, secret: str | List[str], max_age_seconds: int = 60 * 60 * 24) -> Any:\n return URLSafeTimedSerializer(secret).loads(token, max_age=max_age_seconds, salt=\"token\")", "def build_access_token_expired():\n return do_build_access_token(tenant_id='intility_tenant_id', expired=True)", "def __find_unused_token(self, attempts=10):\n for _ in range(attempts):\n token = secrets.token_urlsafe(config.token_byte_length)\n\n try:\n self.get(token)\n except self.RedirectDoesNotExist:\n # This means that we do not already have the token associated\n # with a destination so we're free to use it\n return token # Early Return\n\n raise Exception(f\"Failed to find an unused token after {attempts} attempts...\")", "def look_up_a_token():\n try:\n data = request.get_json(force=True)\n except Exception:\n data = None\n if data:\n tok = data['token']\n else:\n tok = request.headers.get('TOK_ID')\n request.data\n\n try:\n creation_time = int(round(datetime.timestamp(tokens[tok]), 0))\n issue_time = tokens[tok].isoformat()\n except Exception:\n _now = datetime.now(UTC)\n creation_time = int(round(datetime.timestamp(_now)))\n issue_time = _now.isoformat()\n tokens[tok] = _now\n expire_time = datetime.fromtimestamp(creation_time + 2764790)\n\n return jsonify({\n \"data\": {\n \"accessor\": \"8609694a-cdbc-db9b-d345-e782dbb562ed\",\n \"creation_time\": creation_time,\n \"creation_ttl\": 2764800,\n \"display_name\": \"fooname\",\n \"entity_id\": \"7d2e3179-f69b-450c-7179-ac8ee8bd8ca9\",\n \"expire_time\": expire_time.isoformat(),\n \"explicit_max_ttl\": 0,\n \"id\": tok,\n \"identity_policies\": [\n \"dev-group-policy\"\n ],\n \"issue_time\": issue_time,\n \"meta\": {\n \"username\": \"tesla\"\n },\n \"num_uses\": 0,\n \"orphan\": True,\n \"path\": \"auth/kubernetes/login\",\n \"policies\": [\n \"default\"\n ],\n \"renewable\": True,\n \"ttl\": 2764790\n }\n })", "def get_github_token_info(g):\n rate_limit = g.get_rate_limit()\n near_expiry = rate_limit.core.remaining < 50\n wait_time = (rate_limit.core.reset - datetime.datetime.utcnow()).seconds\n return near_expiry, wait_time", "def prep_token(**kwargs):\n token = kwargs.get('token')\n if not token:\n token = oauth2_wrappers.gen_token()\n return token", "def get_read_token(repo_names: List[str]) -> None:\n github_app = get_default_app()\n print(github_app.get_read_token(repo_names))", "def get_token(headers):\n bearer = headers.get('Authorization')\n if bearer:\n try:\n token_type, token = bearer.rsplit(' ', 1)\n except ValueError:\n raise TokenError('Wrong bearer string: %s', bearer)\n\n if token_type != 'Bearer':\n raise TokenError('Wrong token type: %s, must be %s',\n token_type, 'Bearer')\n return token\n raise TokenError('No token is given in the Authorization header')", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def organization_get_expired_token(self, client, id, expired_token):\n assert client.get('/organizations/' + id, headers={\n 'Authorization': 'Bearer ' + expired_token})\\\n .status == '401 UNAUTHORIZED'", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def claim_token(self, allowed_failures=10):\n count = 0\n while count < allowed_failures:\n count += 1\n try:\n (key, ref) = self.client.get_token(self.view, \n view_params=self.view_params, window_size=100)\n document_index = ref\n if type(ref) == list:\n document_index = ref[0]\n record = self.client.db[document_index]\n modified_record = self.token_modifier.lock(record)\n return (key, ref, self.client.modify_token(modified_record) )\n except ResourceConflict:\n pass\n if count == allowed_failures:\n raise EnvironmentError(\"Unable to claim token.\")", "def get_stored_token():\n try:\n parser = SafeConfigParser()\n parser.read(OAUTH_FILE)\n user = parser.get('auth', 'user')\n token = parser.get('auth', 'token')\n token_date_str = parser.get('auth', 'token_date')\n except ConfigParser.Error as e:\n return None, None\n\n if user and token and token_date_str:\n date1 = datetime.datetime.strptime(token_date_str, '%Y-%m-%d').date()\n date2 = datetime.date.today()\n if (date2 - date1).days > OAUTH_EXP_DAYS:\n user, token = None, None\n\n return user, token", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def test_cleans_previous_token_before_fetching_new_one(self):\n new_token = deepcopy(self.token)\n past = time.time() - 7200\n now = time.time()\n self.token[\"expires_at\"] = past\n new_token[\"expires_at\"] = now + 3600\n url = \"https://example.com/token\"\n\n with mock.patch(\"time.time\", lambda: now):\n for client in self.clients:\n sess = OAuth2Session(client=client, token=self.token)\n sess.send = fake_token(new_token)\n if isinstance(client, LegacyApplicationClient):\n # this client requires a username+password\n # if unset, an error will be raised\n self.assertRaises(ValueError, sess.fetch_token, url)\n self.assertRaises(\n ValueError, sess.fetch_token, url, username=\"username1\"\n )\n self.assertRaises(\n ValueError, sess.fetch_token, url, password=\"password1\"\n )\n # otherwise it will pass\n self.assertEqual(\n sess.fetch_token(\n url, username=\"username1\", password=\"password1\"\n ),\n new_token,\n )\n else:\n self.assertEqual(sess.fetch_token(url), new_token)", "def test_token_cache(self, mock_check_token_not_revoked,\n mock_get_issuer_public_key):\n # Mock the external call to retrieve the IAM public key\n # used in the _verify_token and valid_token_to_id call\n mock_get_issuer_public_key.return_value = PUBLIC_KEY\n # Mock the external call to check the token has not been rejected\n # used in the valid_token_to_id call\n mock_check_token_not_revoked.return_value = CLIENT_ID\n\n payload_list = []\n\n # This payload will be valid as we will sign it with PRIVATE_KEY\n payload = self._standard_token()\n\n # Add the same token twice, this is what tests the cache functionality\n payload_list = [payload, payload]\n\n for payload in payload_list:\n token = self._create_token(payload, PRIVATE_KEY)\n with self.settings(IAM_HOSTNAME_LIST=['iam-test.idc.eu']):\n self.assertEqual(\n self._token_checker.valid_token_to_id(token), CLIENT_ID,\n \"Token with payload %s should not be accepted!\" % payload\n )", "def find_token_for_authorization(authorization):\n return None", "def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()", "def invalid_auth_token_header():\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"application/json\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"InvalidToken\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host,\n project_id=CFG.project_id,\n user_agent=CFG.user_agent)", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def get_master_token(user, repo, name, config):\n url = \"{}/repos/{}/{}/master_tokens\".format(config['url_base'], user, repo)\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n tokens = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n for token in tokens:\n if token['name'] == name:\n return token\n\n return None", "def get_write_token(repo_names: List[str]) -> None:\n github_app = get_default_app()\n print(github_app.get_write_token(repo_names))", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "def _gen_github_ses(github_login):\n if github_login == 'disabledloginfortesting':\n raise _gh_exception(gh.BadCredentialsException,\n 403, 'no login specified')\n\n # see if we have tokens - might be many. Doesn't cost us much so get at once\n tokens = unique(\n ensure_list(cfg.get(CONFIG_HUB_TOKEN_FIELD, None)),\n reverse=True\n )\n\n # Check the tokens. If login is provided, only the token(s) for the\n # login are considered. We consider oauth tokens as stored/used by\n # https://github.com/sociomantic/git-hub\n\n if github_login and tokens:\n # Take only the tokens which are Ok and correspond to that login\n tokens = _get_tokens_for_login(github_login, tokens)\n\n for token in tokens:\n try:\n yield gh.Github(token), _token_str(token)\n except gh.BadCredentialsException as exc:\n lgr.debug(\"Failed to obtain Github session for token %s: %s\",\n _token_str(token), exc_str(exc))\n\n # We got here so time to get/store token from credential store\n cred = _get_github_cred(github_login)\n while True:\n token = cred()['token']\n try:\n # ??? there was a comment # None for cred so does not get killed\n # while returning None as cred. Effect was not fully investigated from changing to return _token_str\n yield gh.Github(token), _token_str(token)\n except gh.BadCredentialsException as exc:\n lgr.debug(\"Failed to obtain Github session for token %s: %s\",\n _token_str(token), exc_str(exc))\n # if we are getting here, it means we are asked for more and thus\n # aforementioned one didn't work out :-/\n if ui.is_interactive:\n if cred is not None:\n if ui.yesno(\n title=\"GitHub credentials\",\n text=\"Do you want to try (re)entering GitHub personal access token?\"\n ):\n cred.enter_new()\n else:\n break\n else:\n # Nothing we could do\n lgr.warning(\n \"UI is not interactive - we cannot query for more credentials\"\n )\n break", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def get_token(*args, **kwargs):\n try:\n response = server.create_token_response()\n except (JWTError, JWTExpiredError) as e:\n # - in Authlib 0.11, create_token_response does not raise OAuth2Error\n # - fence.jwt.errors.JWTError: blacklisted refresh token\n # - JWTExpiredError (cdiserrors.AuthNError subclass): expired\n # refresh token\n # Returns code 400 per OAuth2 spec\n body = {\"error\": \"invalid_grant\", \"error_description\": e.message}\n response = flask.Response(\n json.dumps(body), mimetype=\"application/json\", status=400\n )\n return response", "def get_token(filename='config.ini'):\n cp = ConfigParser()\n cp.read(filename)\n token = cp.get('githubapi', 'token')\n return token" ]
[ "0.57277447", "0.56966573", "0.5588019", "0.55098516", "0.54732585", "0.545649", "0.54281145", "0.54081726", "0.5367577", "0.5320743", "0.5294111", "0.5282494", "0.5272418", "0.5269372", "0.5261628", "0.52006954", "0.5181814", "0.51739687", "0.5167998", "0.51490164", "0.51465154", "0.51203966", "0.5117964", "0.5072146", "0.5070808", "0.50661075", "0.50626916", "0.50423616", "0.50413275", "0.5033773" ]
0.67476416
0
Return repository object, given a url.
def get_repository(url): if not '://' in url: url = 'https://' + url parsed_url = urllib.parse.urlparse(url) if parsed_url.netloc.endswith('github.com'): g = get_github_auth_token() repo_url = parsed_url.path.strip('/') repo = GitHubRepository(g.get_repo(repo_url)) return repo raise Exception('Unsupported url!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRepo(session, name=None, url=None):\r\n\r\n try:\r\n # Look up repository by name\r\n if name is not None:\r\n return session.get_repo(name)\r\n\r\n # Look up repository by clone URL\r\n if url is not None:\r\n # Parse URL\r\n url = urlparse(url)\r\n\r\n # Check that this is a github URL\r\n if not url.hostname.endswith(\"github.com\"):\r\n return None\r\n\r\n # Get repository name from clone URL\r\n name = url.path\r\n if name.startswith(\"/\"):\r\n name = name[1:]\r\n if name.endswith(\".git\"):\r\n name = name[:-4]\r\n\r\n # Look up repository by name\r\n return getRepo(session, name=name)\r\n\r\n except:\r\n pass\r\n\r\n return None", "def new(url):\n from grit import Repo\n return Repo.new(url=url, bare=True)", "def parse(cls, potential_url: str) -> Optional[\"RepoUrl\"]:\n if not potential_url:\n return None\n\n repo = RepoUrl(None)\n parsed_url = cls._prepare_url(potential_url)\n if not parsed_url:\n return None\n\n repo._set_hostname_and_scheme(parsed_url)\n if not repo._parse_username(parsed_url):\n # failed parsing username\n return None\n\n return repo if repo._parsed_path(*cls._prepare_path(parsed_url)) else None", "def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]", "def repo(self, user, repo):\r\n return repositories.Repo(self, user, repo)", "async def fetch_repository(self, name: str) -> \"Repository\":\n\n # prevent cyclic imports\n from github.objects import Repository\n\n data = await self.http.fetch_repository(self.login, name)\n return Repository.from_data(data, self.http)", "def repo(self, user, repo):\r\n return repos.Repo(self, user, repo)", "def repository(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository\", host, owner, repo)", "def getRepository(self, name, access_verb):\n access_verb = access_verb.upper()\n name = urllib.quote_plus(name)\n exists = name in self.listRepositories();\n if access_verb == Repository.RENEW:\n if exists:\n self.deleteRepository(name)\n return self.createRepository(name)\n\n if access_verb == Repository.CREATE:\n if exists:\n raise ServerException(\n \"Can't create triple store named '%s' because a store with that name already exists.\",\n name)\n return self.createRepository(name)\n\n if access_verb == Repository.OPEN:\n if not exists:\n raise ServerException(\n \"Can't open a triple store named '%s' because there is none.\", name)\n\n return Repository(self, name, self.mini_catalog.getRepository(name))\n\n if access_verb == Repository.ACCESS:\n if not exists:\n return self.createRepository(name)\n\n return Repository(self, name, self.mini_catalog.getRepository(name))", "async def repository(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"repository\"], *args, **kwargs)", "def get_repository(name: str = None):\n try:\n local_dir = os.environ['GE_REPO_DIR']\n except KeyError:\n local_dir = \"./\"\n local_path = os.path.join(local_dir, name)\n local_repo = gecore.github_repo.load_repository(local_path)\n repo = Repository()\n repo.url = local_repo.url\n repo.name = local_repo.name\n repo.original_author = local_repo.original_author\n repo.branches = local_repo.branches\n repo.commits = map(get_commit, local_repo.commits)\n\n return repo", "def make_pull(db,url):\n result = db.product_mstator.find_one({\"url\":url})\n return result", "def get_own_repo():\n own_repo = GitClass(name='self', url='https://github.com/meganhmoore/github-api-covid-data', owner='meganhmoore',\n repo='github-api-covid-data', branch='develop/new_data')\n return own_repo", "def fusion_api_get_repository(self, uri=None, param='', api=None, headers=None):\n return self.repository.get(uri=uri, param=param, api=api, headers=headers)", "def fromurl(cls, url: str):\n return cls.parse_obj(requests.get(url).json())", "def RepositoryName(url, add_if_missing=False):\n if url.endswith('.git'):\n url = url[:-4]\n\n repositories = Repository.query(Repository.urls == url).fetch()\n if repositories:\n return repositories[0].key.id()\n\n if add_if_missing:\n return _AddRepository(url)\n\n raise KeyError('Unknown repository URL: %s' % url)", "def get_or_create_url(url, session=session, model=URL):\n\n instance = session.query(model).filter_by(**{'text': url}).first()\n if instance:\n return instance\n else:\n instance = create_url(url)\n return instance", "def get_repository(self, model_name):\n model = self.get_model(model_name)\n return model._repository", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def _AddRepository(url):\n name = url.split('/')[-1]\n\n if ndb.Key(Repository, name).get():\n raise AssertionError(\"Attempted to add a repository that's already in the \"\n 'Datastore: %s: %s' % (name, url))\n\n Repository(id=name, urls=[url]).put()\n return name", "def get_from_uri(self, url, skip_cache=False, *args, **kwargs):\n\n cleaned_url = handle_slash(url, self.model._meta['add_slash'])\n\n if skip_cache:\n cached_response = None\n else:\n cached_response = self.get_from_cache('GET', cleaned_url)\n\n if cached_response:\n response = cached_response\n else:\n response = self._request('GET', cleaned_url, *args, **kwargs)\n\n self.validate_get_response(response)\n self.handle_get_response(response)\n\n # should this be handled by handle_get_response? i think probably.\n obj = self.obj_from_response(response)\n\n obj._full_url = cleaned_url\n\n return obj", "def get_repository(\n self, repository_id_or_slug: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Repository\":\n\n return communicator.Repository(self.__requester).from_id_or_slug(\n repository_id_or_slug=repository_id_or_slug, parameters=params\n )", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def get_repo(self):\n\n # gets information about repository\n repo_endpoint = f'/repos/{self.repo}'\n response = self._get_request(repo_endpoint)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n return None\n # deserialize\n repo = response.json()\n\n return {\n 'id': repo['id'],\n 'repo_name': repo['name'],\n 'creation_date': None,\n 'owner': None,\n 'url': repo['links']['self'][0]['href']\n }", "def parse_git_repo(potential_url: str) -> Optional[RepoUrl]:\n return RepoUrl.parse(potential_url)", "def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)", "def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")", "def mock_github_get(url):\n mock_repo_key = url.split(\"/\")[-1]\n\n result = requests.Response()\n result.status_code = 200\n result.encoding = \"utf-8\"\n result._content = repos[mock_repo_key].encode()\n\n return result", "def get_at_url(self, url):\n class NullDevice():\n def write(self, s):\n pass\n\n def get_gallery_item(id):\n \"\"\"\n Special helper method to get gallery items.\n\n The problem is that it's impossible to distinguish albums and\n images from each other based on the url. And there isn't a common\n url endpoints that return either a Gallery_album or a Gallery_image\n depending on what the id represents. So the only option is to\n assume it's a Gallery_image and if we get an exception then try\n Gallery_album. Gallery_image is attempted first because there is\n the most of them.\n \"\"\"\n try:\n # HACK: Problem is that send_request prints the error message\n # from Imgur when it encounters an error. This is nice because\n # this error message is more descriptive than just the status\n # code that Requests give. But since we first assume the id\n # belong to an image, it means we will get an error whenever\n # the id belongs to an album. The following code temporarily\n # disables stdout to avoid give a cryptic and incorrect error.\n\n # Code for disabling stdout is from\n # http://coreygoldberg.blogspot.dk/2009/05/\n # python-redirect-or-turn-off-stdout-and.html\n original_stdout = sys.stdout # keep a reference to STDOUT\n sys.stdout = NullDevice() # redirect the real STDOUT\n return self.get_gallery_image(id)\n # TODO: Add better error codes so I don't have to do a catch-all\n except Exception:\n return self.get_gallery_album(id)\n finally:\n sys.stdout = original_stdout # turn STDOUT back on\n\n if not self.is_imgur_url(url):\n return None\n\n objects = {'album': {'regex': \"a/(?P<id>[\\w.]*?)$\",\n 'method': self.get_album},\n 'comment': {'regex': \"gallery/\\w*/comment/(?P<id>[\\w.]*?)$\",\n 'method': self.get_comment},\n 'gallery': {'regex': \"(gallery|r/\\w*?)/(?P<id>[\\w.]*?)$\",\n 'method': get_gallery_item},\n # Valid image extensions: http://imgur.com/faq#types\n # All are between 3 and 4 chars long.\n 'image': {'regex': \"(?P<id>[\\w.]*?)(\\\\.\\w{3,4})?$\",\n 'method': self.get_image},\n 'user': {'regex': \"user/(?P<id>[\\w.]*?)$\",\n 'method': self.get_user}\n }\n parsed_url = urlparse(url)\n for obj_type, values in objects.items():\n regex_result = re.match('/' + values['regex'], parsed_url.path)\n if regex_result is not None:\n obj_id = regex_result.group('id')\n initial_object = values['method'](obj_id)\n if obj_type == 'image':\n try:\n # A better version might be to ping the url where the\n # gallery_image should be with a requests.head call. If\n # we get a 200 returned, then that means it exists and\n # this becomes less hacky.\n original_stdout = sys.stdout\n sys.stdout = NullDevice()\n if getattr(initial_object, 'section', None):\n sub = initial_object.section\n return self.get_subreddit_image(sub, obj_id)\n return self.get_gallery_image(obj_id)\n except Exception:\n pass\n finally:\n sys.stdout = original_stdout\n return initial_object", "def parse(cls, url, treeishes=None):\n parsed = urllib.parse.urlparse(url)\n path_match = RGX_URL_PATH.match(parsed.path)\n if not path_match:\n raise ValueError('Invalid Gitiles repo url: %s' % url)\n\n hostname = parsed.netloc\n project = path_match.group(1)\n if project.startswith('a/'):\n project = project[len('a/'):]\n project = project.strip('/')\n if project.endswith('.git'):\n project = project[:-len('.git')]\n\n treeish_and_path = (path_match.group(3) or '').strip('/').split('/')\n treeish_and_path = [] if treeish_and_path == [''] else treeish_and_path\n treeish = treeish_and_path[:]\n if len(treeish) == 1:\n pass\n elif not treeishes:\n if treeish[:2] == ['refs', 'heads']:\n treeish = treeish[:3]\n else:\n treeish = treeish[:1]\n else:\n treeishes = set(tuple(t.split('/')) for t in treeishes)\n while treeish and tuple(treeish) not in treeishes:\n treeish.pop()\n if not treeish:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n path = treeish_and_path[len(treeish):]\n\n # if not HEAD or a hash, should be prefixed with refs/heads/\n treeish = treeish or ['HEAD']\n if (treeish[:2] != ['refs', 'heads']\n and treeish != ['HEAD']\n and not (len(treeish) == 1 and RGX_HASH.match(treeish[0]))):\n treeish = ['refs', 'heads'] + treeish\n\n treeish_str = '/'.join(treeish)\n path_str = '/' + '/'.join(path) # must start with slash\n # Check yourself.\n _validate_args(hostname, project, treeish_str, path_str, path_required=True)\n return cls(hostname, project, treeish_str, path_str)" ]
[ "0.7281594", "0.6778706", "0.6539018", "0.6533035", "0.6382873", "0.63770306", "0.63320506", "0.6298444", "0.62905043", "0.6267841", "0.6234613", "0.6186023", "0.6170094", "0.610516", "0.6103195", "0.6099141", "0.60270405", "0.6025164", "0.6019437", "0.59717953", "0.59647065", "0.5948611", "0.59439063", "0.59066236", "0.58834296", "0.58797646", "0.58714926", "0.5861096", "0.5856227", "0.5848084" ]
0.76957023
0
Build an oauth client with which callers can query Allura.
def make_oauth_client(base_url) -> requests.Session: config_file = os.path.join(os.environ['HOME'], '.allurarc') cp = ConfigParser() cp.read(config_file) REQUEST_TOKEN_URL = base_url + '/rest/oauth/request_token' AUTHORIZE_URL = base_url + '/rest/oauth/authorize' ACCESS_TOKEN_URL = base_url + '/rest/oauth/access_token' oauth_key = option(cp, base_url, 'oauth_key', 'Forge API OAuth Consumer Key (%s/auth/oauth/): ' % base_url) oauth_secret = option(cp, base_url, 'oauth_secret', 'Forge API Oauth Consumer Secret: ') try: oauth_token = cp.get(base_url, 'oauth_token') oauth_token_secret = cp.get(base_url, 'oauth_token_secret') except NoOptionError: oauthSess = OAuth1Session(oauth_key, client_secret=oauth_secret, callback_uri='oob') request_token = oauthSess.fetch_request_token(REQUEST_TOKEN_URL) pin_url = oauthSess.authorization_url(AUTHORIZE_URL, request_token['oauth_token']) if isinstance(webbrowser.get(), webbrowser.GenericBrowser): print("Go to %s" % pin_url) else: webbrowser.open(pin_url) oauth_verifier = input('What is the PIN? ') access_token = oauthSess.fetch_access_token(ACCESS_TOKEN_URL, oauth_verifier) oauth_token = access_token['oauth_token'] oauth_token_secret = access_token['oauth_token_secret'] cp.set(base_url, 'oauth_token', oauth_token) cp.set(base_url, 'oauth_token_secret', oauth_token_secret) # save oauth token for later use cp.write(open(config_file, 'w')) print(f'Saving oauth tokens in {config_file} for later re-use') print() else: oauthSess = OAuth1Session(oauth_key, client_secret=oauth_secret, resource_owner_key=oauth_token, resource_owner_secret=oauth_token_secret) return oauthSess
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, oauth=None, client_id=None):\n\t\tself.oauth = oauth\n\t\tself.client_id = client_id or self.default_client_id", "def oauth_config(url_base):\n return {\n \"credentials\": {\n \"auth_type\": \"oauth2_confidential_application\",\n \"client_secret\": \"test_client_secret\",\n \"client_id\": \"test_client_id\",\n \"audience\": f\"{url_base}/api/v2\",\n },\n \"base_url\": url_base,\n }", "def oauth():\n return {\"consumer_key\": \"Insert consumer key HERE\",\n \"consumer_secret\": \"Insert consumer secret HERE\",\n \"token_key\": \"Insert token key HERE\",\n \"token_secret\": \"Insert token secret HERE\"}", "def _get_user_client(self):\n return api.OAuthClient(settings.CLIENT_ID, settings.CLIENT_SECRET, settings.USER, settings.PASSWORD)", "def oauth2(self):\n from hubspot3.oauth2 import OAuth2Client\n\n return OAuth2Client(**self.auth, **self.options)", "def client():\n return Client(**common_data.AUTH_ARGS)", "def create_api_client(base_path, access_token):\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(header_name=\"Authorization\",\n header_value=f\"Bearer {access_token}\")\n return api_client", "def _get_auth_client(self, request):\n if self._auth_client is None:\n # Use PyFxa defaults if not specified\n server_url = fxa_conf(request, 'oauth_uri')\n auth_cache = self._get_cache(request)\n self._auth_client = OAuthClient(server_url=server_url, cache=auth_cache)\n\n return self._auth_client", "def __init__(self, client_id=None, client=None, auto_refresh_url=None,\n auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,\n state=None, token_updater=None, **kwargs):\n super(Oauth, self).__init__(**kwargs)\n self._client = client or WebApplicationClient(client_id, token=token)\n self.token = token or {}\n self.scope = scope\n self.redirect_uri = redirect_uri\n # self.state = state or generate_token\n # self._state = state\n self.auto_refresh_url = auto_refresh_url\n self.auto_refresh_kwargs = auto_refresh_kwargs or {}\n self.token_updater = token_updater\n\n # Allow customizations for non compliant providers through various\n # hooks to adjust requests and responses.\n self.compliance_hook = {\n 'access_token_response': set(),\n 'refresh_token_response': set(),\n 'protected_request': set(),\n }", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def get_client():\n return Client(__address, authkey='strumamor')", "def _jira_client(self, frm):\n request_key = 'oauth_request_{}'.format(frm)\n access_key = 'oauth_access_{}'.format(frm)\n logging.warn(\"FROM: %s\", frm)\n logging.debug(\"Config %s\", config.JIRA_OAUTH_URL)\n if self.metadata.get(request_key):\n oauth = JiraOauth()\n state = self.metadata[request_key]\n try:\n self.metadata[access_key] = oauth.accepted(state)\n except KeyError:\n self._jira_req_auth(frm)\n del self.metadata[request_key]\n if not self.metadata.get(access_key):\n self._jira_req_auth(frm)\n token, secret = self.metadata[access_key]\n oauth_config = {\n 'access_token': token,\n 'access_token_secret': secret,\n 'consumer_key': config.JIRA_OAUTH_KEY,\n 'key_cert': config.JIRA_OAUTH_PEM,\n }\n\n return JIRA(config.JIRA_BASE_URL, oauth=oauth_config)", "def __init__(self, api_key, client_id=None, client_secret=None):\n self.api = API(api_key)\n self._manifest = Manifest(self.api)\n self.oauth = OAuth(client_id, client_secret)", "def __init__(self, api_version=_BIGQUERY_API_VERSION):\n self.http = httplib2.Http(cache=memcache)\n self.service = discovery.build('bigquery',\n api_version,\n http=self.http,\n discoveryServiceUrl=DISCOVERY_URL)\n if _CREDENTIALS is None:\n raise BigQueryClientException(\n 'Needed Credentials are missing from this source code!')\n credentials = Credentials.new_from_json(_CREDENTIALS)\n logging.info('Authorizing...')\n self.http = credentials.authorize(self.http)", "def auth(self):\n auth = OAuthHandler(self._api_key, self._api_secret)\n auth.set_access_token(self._access_token, self._access_secret)\n return auth", "def __init__(self, client_id, client_secret, refresh_token=None, code=None, callback_url=None):\n\n self.base_url = 'https://api.rd.services'\n self.client_id = client_id\n self.client_secret = client_secret\n self.refresh_token = refresh_token\n if code:\n self.access_token = self._get_access_token(code)\n elif refresh_token:\n self.access_token = self._refresh_access_token()\n else:\n authorization_url = self._get_authorization_url(callback_url)\n print(authorization_url)\n\n self.headers = {\n \"Authorization\": f\"Bearer {self.access_token}\",\n \"Content-Type\": \"application/json\"\n }", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def build_client(config, auth_token = None):\n if auth_token:\n pass\n\n elif not auth_token and config.get(\"auth_token\"):\n auth_token = config.get(\"auth_token\")\n\n elif not auth_token and not config.get(\"auth_token\"):\n auth_token, config = start_auth_flow(config)\n\n __log__.debug(\"Creating the dropbox client!\")\n client = DropboxClient(auth_token)\n __log__.debug(\"Successfully created client!\")\n\n # Put the information on a copy of config object\n configClone = config.copy()\n configClone.update({\n \"auth_token\": auth_token,\n \"client\": client,\n })\n\n return (client, configClone)", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def __init__(self, base_url, client_id, client_secret, client_scope, api_json = None):\n # type: (str, str, str, str, str) -> None\n\n self.base_url = base_url\n self.client_id = client_id\n self.client_secret = client_secret\n self.client_scope = client_scope\n\n # If the user doesn't pass an alternate API file use the included one\n if not api_json:\n api_json = pkg_resources.resource_filename(__name__, 'apis.json')\n\n with open(api_json, encoding='utf-8') as api_file:\n apis = json.loads(api_file.read())\n\n if client_scope in apis: \n api = apis.get(client_scope)\n self.token_url = api.get('token_url')\n self.api_call = sleep_and_retry(limits(calls=api.get('limits_calls'), period=api.get('limits_period'))(self._api_call))\n self.access_token = self.get_access_token(self.token_url)\n else: \n raise Exception(f\"Scope {client_scope} not in known API dict\")", "def authorize_api(self):\n\n log.debug('computing Google authentification process for \"{}\"'.format(self.school_year))\n flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, SCOPE)\n storage = Storage('credentials.dat')\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, tools.argparser.parse_args())\n\n # Create an httplib2.Http object to handle our HTTP requests, and authorize it\n # using the credentials.authorize() function.\n http = httplib2.Http()\n http = credentials.authorize(http)\n httplib2.debuglevel = 0\n\n return build('calendar', 'v3', http=http)", "def setup_oauth():\n # Request token\n oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)\n r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # Authorize\n authorize_url = AUTHORIZE_URL + resource_owner_key\n print 'Please go here and authorize: ' + authorize_url\n\n verifier = raw_input('Please input the verifier: ')\n oauth = OAuth1(CONSUMER_KEY,\n client_secret=CONSUMER_SECRET,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret,\n verifier=verifier)\n\n # Finally, Obtain the Access Token\n r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n token = credentials.get('oauth_token')[0]\n secret = credentials.get('oauth_token_secret')[0]\n\n return token, secret", "def get_oauth():\n\n # initial app authorization request - not tied to specific user\n request_token, request_token_secret = goodreads.get_request_token(header_auth=True)\n\n # assign request tokens to session for future use\n session['request_token'] = request_token\n session['request_token_secret'] = request_token_secret\n\n # url takes user to Goodreads and presents them with option to authorize readerboard\n authorize_url = goodreads.get_authorize_url(request_token)\n\n # send user to goodreads\n return redirect(authorize_url)", "def app_oauth_client_fixture():\n return AppOAuthClient({\n 'oauthClientId': str(uuid4()),\n 'oauthClientSecret': str(uuid4())\n })", "def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret", "def create_oauth(self, user):\r\n from oauth_provider.models import Consumer, Token, Resource\r\n\r\n # Necessary setup for ``oauth_provider``.\r\n resource, _ = Resource.objects.get_or_create(url='test', defaults={\r\n 'name': 'Test Resource'\r\n })\r\n consumer, _ = Consumer.objects.get_or_create(key='123', defaults={\r\n 'name': 'Test',\r\n 'description': 'Testing...'\r\n })\r\n token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={\r\n 'consumer': consumer,\r\n 'resource': resource,\r\n 'secret': '',\r\n 'user': user,\r\n })\r\n\r\n # Then generate the header.\r\n oauth_data = {\r\n 'oauth_consumer_key': '123',\r\n 'oauth_nonce': 'abc',\r\n 'oauth_signature': '&',\r\n 'oauth_signature_method': 'PLAINTEXT',\r\n 'oauth_timestamp': str(int(time.time())),\r\n 'oauth_token': 'foo',\r\n }\r\n return 'OAuth %s' % ','.join([key + '=' + value for key, value in oauth_data.items()])", "def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth", "def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth", "def buildAPICal(self, credentials):\n from googleapiclient.discovery import build\n return build('calendar', 'v3', credentials=self.creds)", "def get_client(public_key: str, secret_key: str, **_):\n razorpay_client = razorpay.Client(auth=(public_key, secret_key))\n return razorpay_client" ]
[ "0.6535492", "0.6519864", "0.6420498", "0.6407966", "0.6387362", "0.6379229", "0.62640995", "0.62335783", "0.6118638", "0.60628766", "0.6022355", "0.6002151", "0.59875053", "0.5984816", "0.5984286", "0.5928542", "0.59119326", "0.59105897", "0.5902255", "0.58838856", "0.58485657", "0.5847549", "0.58125097", "0.5807864", "0.580306", "0.5788053", "0.5775526", "0.5775526", "0.5767101", "0.5766962" ]
0.6976381
0
Count how many times the phrase appears in the text
def count_phrase_in_text(phrase: str, text: str): count = 0 # Remove leading and trailing white spaces phrase = phrase.strip() # Substitute multiple whitespace with single whitespace phrase = ' '.join(phrase.split()) if text.startswith(phrase + " "): count += 1 if text.endswith(" " + phrase + "\n") or text.endswith(" " + phrase) or \ text.endswith(" " + phrase + "\r\n") or text.endswith(phrase): count += 1 count += len(text.split(" " + phrase + " ")) - 1 return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(text):\n return len(text)", "def word_count(phrase):\n return collections.Counter(phrase.split())", "def text_count(self, text):\n res = 0\n for intv in self:\n if intv._text == text:\n res += 1\n return res", "def total_phrases(target_text):\n\n nbphrase = 0\n separators = '.!?;'\n for char in target_text:\n if char in separators:\n nbphrase = nbphrase + 1\n return nbphrase", "def countWord(self,phrase):\n return self._support.countWord(phrase)", "def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work", "def count(self, word):\n pass", "def loop_through_text(phrase_length):\n\n # get text\n tanach = get_all_text()\n tanach = tanach.split()\n\n results = {}\n\n for index in range(len(tanach)):\n query = ' '.join(tanach[index:index+phrase_length])\n\n if query in results:\n results[query] += 1\n\n else:\n results[query] = 1\n\n return results", "def word_count(phrase):\n word_dict = {}\n\n for word in phrase.split():\n word_dict[word] = word_dict.get(word, 0) + 1\n\n return word_dict", "def count_sentences(text):\n count = 0\n terminals = '.;?!'\n for character in text:\n \n if character in terminals:\n count += 1\n\n return count", "def loop_through_text(phrase_length):\n\n # get text\n tanach = get_all_text()\n tanach = tanach.split()\n\n results = {}\n\n for index in xrange(len(tanach)):\n query = ' '.join(tanach[index:index+phrase_length])\n\n if query in results:\n results[query] += 1\n\n else:\n results[query] = 1\n\n return results", "def count_words(phrase):\n ## initialize empty dictionary to track word counts as key:val pairs\n word_count = {}\n\n ## split the input phrase on spaces, iterate over the resulting list on\n ## word by word basis\n for word in phrase.split():\n ## look up the value associated with word as key in dictionary;\n ## if not there, will return 0 as default then increment by 1 for a new\n ## value of 1 for the first occurrence of word; if it is there, the\n ## value will be incremented by 1\n word_count[word] = word_count.get(word, 0) + 1\n\n ## return final word_count dictionary\n return word_count", "def duplicate_count(text):\n n = 0\n for c in set(text.lower()):\n if text.lower().count(c) > 1:\n n += 1\n return n", "def word_count(phrase):\n Wordlist = phrase.replace(\"\\n\", ' ') # Creating a list without escape codes\n Wordlist = Wordlist.split(\" \") # Split the sentence in words\n dictionary = {} # Create an empty dictionary to store the results\n for i in Wordlist:\n if i != '': # unless is a ''\n dictionary[i] = Wordlist.count(i)\n return dictionary", "def count_words(phrase):\n # split the input string at spaces\n phrase_split = phrase.split()\n\n # initiate empty dictionary\n word_count = {}\n\n # iterate over words in the phrase\n for word in phrase_split:\n if word in word_count:\n\n # if the word is already a key in the dictionary, increase the value by 1\n word_count[word] += 1\n\n else:\n # if the word is not a key in the dictionary, set its value to 1\n word_count[word] = 1\n\n return word_count", "def count_words(text, words):\n count = 0\n for word in words: # probably should make words lowercase too\n if word in text.lower():\n count += 1\n return count", "def keyword_count(searches, doc):\n for search in searches:\n print \"\\\"{0}\\\": {1}\".format(search, len(re.findall(searches[search], doc)))", "def word_count(text):\n\n # Tokenize text on whitespace / newline\n words = text.strip().split()\n\n # Create a dictionary from the set of tokens, initializing each count to 0\n counts = dict.fromkeys(words, 0)\n\n # Iterate over the text to count occurences of each token\n for word in words:\n counts[word] += 1\n\n # Return the counts\n return counts", "def main():\n text = str(input(\"Sentence: \"))\n print(\"text: {}\".format(text))\n text_dict = {}\n text_list = text.split()\n text_list.sort()\n for text in text_list:\n if text in text_dict:\n text_dict[text] += 1\n else:\n text_dict[text] = 1\n # for text2 in text_dict:\n # if text_list[text] == text_dict[text2]:\n # text_dict[text2] += 1\n # else:\n # text_dict[text_list[text]] += 1\n for text in text_dict:\n print(text, \":\", text_dict[text])", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def word_count(text):\n # Use a dictionary to store the words\n words = {}\n\n # Simple way to strip extra whitespace\n text = ' '.join(text.split())\n\n # Now iterate through, splitting on space\n for word in text.split(\" \"):\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n\n return words", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def analyze(self, text):\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent", "def custom_count(string1, search_string):\n count = 0\n for index in range(0, len(string1)):\n phrase = string1[index:index + len(search_string)]\n count += (phrase == search_string)\n return count", "def count_word(word, titles):\n word = word.lower()\n count = 0\n for title in titles:\n if word in title.lower():\n count += 1\n return count", "def count_words_and_dublicates(novel):", "def duplicate_count(text):\n return len([c for c in set(text.lower()) if text.lower().count(c) > 1])", "def count_words():\n paragraph = \"a distinct section of a piece of writing,\"\n # 替换\n paragraph.replace(\",\", \" \").replace(\":\", \" \").replace(\";\", \" \").replace(\".\", \" \").replace(\"?\", \" \")\n words = paragraph.split(\" \")\n nums = {}\n\n for word in words:\n nums[word] = nums[word]+1 if word in nums else 1\n # nums[word] = nums.get(word, 0) + 1\n\n for word, num in nums.items():\n print(word, \": \", num)", "def count_words(word, words):\n same_words_in_message = 0\n for element in words:\n if element == word:\n same_words_in_message += 1\n return same_words_in_message" ]
[ "0.7900522", "0.7784761", "0.7549326", "0.7538776", "0.75155354", "0.7307728", "0.7293747", "0.7270723", "0.7217096", "0.72156507", "0.7180605", "0.71126467", "0.71083605", "0.7090928", "0.7031768", "0.70294064", "0.7003735", "0.69982064", "0.69665307", "0.69554585", "0.693203", "0.68255585", "0.6824166", "0.6820178", "0.6807597", "0.68031436", "0.6790428", "0.67581016", "0.67140055", "0.6674359" ]
0.8243048
0
Downloads and unzips an online csv file.
def download_and_unzip(url, zip_path, csv_path, data_folder): download_from_url(url, zip_path) unzip(zip_path, csv_path, data_folder) print('Done.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()", "def download_and_unzip_dataset(url, path):\n dl = urllib.urlretrieve(url)\n zf = zipfile.ZipFile(dl[0])\n zf.extractall(path)\n return zf", "def download_and_unzip_data(\n url=\"https://storage.googleapis.com/simpeg/em_examples/tdem_groundedsource/tdem_groundedsource.tar\",\n):\n # download the data\n downloads = utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory", "def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)", "def download_extract_zip(url):\n response = requests.get(url)\n with ZipFile(BytesIO(response.content)) as thezip:\n for zipinfo in thezip.infolist():\n with thezip.open(zipinfo) as thefile:\n df = pd.read_csv(thefile)\n return (df)", "def _fetch_and_unzip(url, file_name):\n res = requests.get(url, stream=True, verify=False)\n # get dataset size\n total_size = int(res.headers[\"Content-Length\"])\n temp_size = 0\n with open(file_name, \"wb+\") as f:\n for chunk in res.iter_content(chunk_size=1024):\n temp_size += len(chunk)\n f.write(chunk)\n f.flush()\n done = int(100 * temp_size / total_size)\n # show download progress\n sys.stdout.write(\"\\r[{}{}] {:.2f}%\".format(\"█\" * done, \" \" * (100 - done), 100 * temp_size / total_size))\n sys.stdout.flush()\n print(\"\\n============== {} is already ==============\".format(file_name))\n _unzip(file_name)\n os.remove(file_name)", "def unzip() -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # define the destination\n destination = project_dir / 'data' / 'raw'\n\n # extract zip\n zip_file = ZipFile(destination / \"original.zip\")\n zip_file.extractall(destination)", "def download_and_unzip(url, extract_to='.'):\n http_response = urlopen(url)\n zipfile = ZipFile(BytesIO(http_response.read()))\n zipfile.extractall(path=extract_to)", "def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")", "def _download_and_uncompress_dataset(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)", "def downloadData(url):\n response = urllib2.urlopen(url)\n html = response.read()\n localfile = open('hitdata.csv', 'wb')\n localfile.write(html)\n localfile.close()", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def fetch_csv_from_url(url):\n\t\n\t#cache avoidance.\n\twith requests_cache.disabled():\n\t\tr = requests.get(url)\n\t\tif r.status_code == 200:\n\t\t\treturn r.iter_lines()", "def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()", "def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)", "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def downloading_csv(self, url_address):\n cache.clear()\n url = URL(url_address)\n f = open(self.cur_quotes_csvfile, 'wb') # save as test.gif\n f.write(url.download())\n f.close()", "def download_csv_file(self, url_seed):\n\n # os.path.join shits itself if the second string has a leading slash.\n if url_seed[0] == '/':\n url_seed = url_seed[1:]\n\n full_url = os.path.join(self.base_url, url_seed)\n save_location = os.path.join(self.temp_loc, os.path.basename(url_seed))\n try:\n opened = urllib2.urlopen(full_url)\n with open(save_location, 'wb') as w:\n w.write(opened.read())\n except Exception:\n # We keep going as the links can be dead.\n print \"Had Difficulties downloading %s, continuing anyway\" % full_url\n save_location = None\n\n return save_location", "def download_data(url, dest, *a, **kw):\n pth = os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'allCountries.zip'\n )\n\n open(dest, 'w').write(open(pth).read())", "def download_one_zip(data_url, data_dir):\r\n\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n r = requests.get(data_url, stream=True)\r\n with open(zipfile_path, \"wb\") as py_file:\r\n for chunk in r.iter_content(chunk_size=1024): # 1024 bytes\r\n if chunk:\r\n py_file.write(chunk)\r\n unzip_nested_zip(zipfile_path, unzip_dir), download_small_file", "def download_small_zip(data_url, data_dir):\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.mkdir(unzip_dir)\r\n zipfile_path, _ = urllib.request.urlretrieve(data_url, zipfile_path)\r\n unzip_nested_zip(zipfile_path, unzip_dir)", "def download_dataset(url=DATASET_URL):\n df = pd.read_csv(url, index_col=0)\n \n # ディレクトリが無ければ,作成する\n if not os.path.isdir(BASE_DIR):\n os.makedirs(BASE_DIR)\n \n df.to_csv(LOCAL_FILE_NAME)", "def download_mp3_by_csv(s, username, passwd, csv_path, download_dir=None):\n\n s = login(s, username, passwd)\n refs = pd.read_csv(csv_path, sep=';').Name\n length = len(refs)\n for i, ref in enumerate(refs):\n sys.stdout.write('\\r')\n sys.stdout.write('downloading: %s/%s' % (i+1, length))\n sys.stdout.flush()\n s = search_by_ref(s, ref)\n mp3_path = None\n if download_dir != None:\n file_name = '%s.mp3' % ref\n mp3_path = os.path.join(download_dir, file_name)\n result = download_mp3(s, mp3_path, ref)\n if result == 1:\n return 1\n sys.stdout.write('\\n')\n sys.stdout.flush()\n s.driver.close()", "def _csv_download(page):\n # gc = gspread.login(page.timetable.google_user, page.timetable.google_passwd)\n gc = googleoauth.authenticate_google_docs()\n csv_file = gc.open('WebValley2019')\n\n # gsession = gss.Client(page.timetable.google_user, page.timetable.google_passwd)\n # ss = gss.Spreadsheet(page.timetable.spreadsheet)\n # csv_file = gsession.download(ss, gid=page.timetable.spreadsheet_gid)\n # read = csv_file.read()\n read = csv_file.worksheet('TIMETABLE').get_all_values()\n # print \"csv\", read\n return read", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def main(url, localfile):\n ph.download_file(url, localfile)", "def unzip_citibike_data(zip_dir):\n# zip_dir = \"data/citibike-tripdata-nyc/\"\n# csv_dir = \"data/citibike-tripdata-nyc/csv\"\n extension = \".zip\"\n\n # for each zip file in zip_dir extract data\n for item in os.listdir(zip_dir):\n if item.endswith(extension):\n\n # create zipfile object and extract\n file_name = zip_dir + item\n with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n zip_ref.extractall(zip_dir)\n print(item + \" done\")", "def download_zip_file(zip_remote, save_dir, force_overwrite, cleanup=False):\n zip_download_path = download_from_remote(zip_remote, save_dir, force_overwrite)\n unzip(zip_download_path, cleanup=cleanup)", "def create_df_from_remote_csv(url):\n if url is None:\n return None\n response = requests.get(url)\n if response.status_code == 200:\n if response.headers['content-type'] == \"text/csv\":\n response.encoding = 'utf-8'\n data = pd.read_csv(io.StringIO(response.text))\n return data\n else:\n print('Error. '\n 'The file is encoded using unsupported content-type {}'\n .format(response.headers['content-type']))\n else:\n print('Error. '\n 'The file could not be downloaded. Returned HTTP status code: {}'\n .format(response.status_code))\n\n return None", "def download_compressed_dataset(url):\n raise NotImplementedError" ]
[ "0.70533955", "0.7049977", "0.6632082", "0.6481793", "0.64712685", "0.63593525", "0.62945485", "0.6238677", "0.61125064", "0.61080784", "0.6092125", "0.60683024", "0.6016457", "0.6001136", "0.60005647", "0.59859836", "0.5955988", "0.59508663", "0.5920118", "0.59149975", "0.58934593", "0.58716804", "0.58705574", "0.58572906", "0.5828687", "0.5795689", "0.5774817", "0.57631016", "0.57530797", "0.5747161" ]
0.8117438
0
Get the new suggested trials with random search.
def get_new_suggestions(self, study_id, trials=[], number=1): search_space = hyperopt.hp.uniform('x', -10, 10) search_space_instance = search_space rstate = np.random.RandomState() trials = hyperopt.Trials() domain = hyperopt.Domain(None, search_space_instance, pass_expr_memo_ctrl=None) algorithm = hyperopt.tpe.suggest rval = hyperopt.FMinIter(algorithm, domain, trials, max_evals=-1, rstate=rstate, verbose=0) rval.catch_eval_exceptions = False algorithm = rval.algo new_ids = rval.trials.new_trial_ids(1) rval.trials.refresh() random_state = rval.rstate.randint(2**31-1) new_trials = algorithm(new_ids, rval.domain, trials, random_state) rval.trials.refresh() # Example: {'x': [8.721658602103911]} vals = new_trials[0]['misc']['vals'] #import ipdb;ipdb.set_trace() """ parameter = dict() for key in vals: try: parameter[key] = vals[key][0].item() except Exception: parameter[key] = None """ """ trials =rval.trials trial = trials.new_trial_docs([new_id], rval_specs, rval_results, rval_miscs)[0] trial['result'] = {'loss': reward, 'status': 'ok'} trial['state'] = hp.JOB_STATE_DONE trials.insert_trial_docs([trial]) trials.refresh() """ """ def _choose_tuner(self, algorithm_name): if algorithm_name == 'tpe': return hp.tpe.suggest if algorithm_name == 'random_search': return hp.rand.suggest if algorithm_name == 'anneal': return hp.anneal.suggest raise RuntimeError('Not support tuner algorithm in hyperopt.') """ return_trial_list = [] study = Study.objects.get(id=study_id) study_configuration_json = json.loads(study.study_configuration) params = study_configuration_json["params"] for i in range(number): trial = Trial.create(study.id, "TpeTrial") parameter_values_json = {} for param in params: if param["type"] == "INTEGER" or param["type"] == "DISCRETE" or param["type"] == "CATEGORICAL": pass elif param["type"] == "DOUBLE": # TODO: Get the specified value from hyperopt suggest_value = vals["x"][0] parameter_values_json[param["parameterName"]] = suggest_value parameter_values_json[param["parameterName"]] = suggest_value trial.parameter_values = json.dumps(parameter_values_json) trial.save() return_trial_list.append(trial) return return_trial_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_suggestions(self, space, old_trials, num_suggestions=10, budget=50):\n new_trials = []\n\n # Use 3 times the dimensionality of the space for training the GP.\n if len(old_trials) < len(space) * 3:\n opt = RandomSearch()\n return opt.get_suggestions(space, old_trials, num_suggestions, budget)\n\n converter = Converter(space)\n bounds = self.compute_bounds(space)\n\n old_trials_params = [trial['parameters'] for trial in old_trials]\n X = np.array([converter.encode(t) for t in old_trials_params])\n Y = np.array([trial['score'] for trial in old_trials])\n\n # Fit a GPR to the completed trials\n self.gpr.fit(X, Y)\n\n for i in range(num_suggestions):\n\n # Obtain next sampling point from the acquisition function\n x_trial = self.propose_location(X, Y, bounds, budget)\n\n new_trial = converter.decode(x_trial)\n if new_trial not in new_trials and \\\n new_trial not in old_trials_params:\n new_trials.append(new_trial)\n\n return new_trials", "def get_new_suggestions(self, study, trials=[], number=1):\n # Construct search space, example: {\"x\": hyperopt.hp.uniform('x', -10, 10), \"x2\": hyperopt.hp.uniform('x2', -10, 10)}\n hyperopt_search_space = {}\n\n # study = Study.objects.get(name=study_name)\n study_configuration_json = json.loads(study.study_configuration)\n params = study_configuration_json[\"params\"]\n\n for param in params:\n param_name = param[\"parameterName\"]\n\n if param[\"type\"] == \"INTEGER\":\n # TODO: Support int type of search space)\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n hyperopt_search_space[param_name] = hyperopt.hp.uniform(\n param_name, param[\"minValue\"], param[\"maxValue\"]\n )\n\n elif param[\"type\"] == \"DISCRETE\" or param[\"type\"] == \"CATEGORICAL\":\n feasible_point_list = [\n value.strip() for value in param[\"feasiblePoints\"].split(\",\")\n ]\n hyperopt_search_space[param_name] = hyperopt.hp.choice(\n param_name, feasible_point_list\n )\n\n # New hyperopt variables\n hyperopt_rstate = np.random.RandomState(random.randint(1, 2 ** 31 - 1))\n hyperopt_domain = hyperopt.Domain(\n None, hyperopt_search_space, pass_expr_memo_ctrl=None\n )\n\n hyperopt_trial_specs = []\n hyperopt_trial_results = []\n # Example: # Example: [{'tid': 0, 'idxs': {'l1_normalization': [0], 'learning_rate': [0], 'hidden2': [0], 'optimizer': [0]}, 'cmd': ('domain_attachment', 'FMinIter_Domain'), 'vals': {'l1_normalization': [0.1], 'learning_rate': [0.1], 'hidden2': [1], 'optimizer': [1]}, 'workdir': None}]\n hyperopt_trial_miscs = []\n hyperopt_trial_new_ids = []\n\n # Update hyperopt for trained trials with completed advisor trials\n completed_hyperopt_trials = hyperopt.Trials()\n\n # completed_advisor_trials = Trial.objects.filter(\n # study_name=study_name, status=\"Completed\")\n completed_advisor_trials = [i for i in trials if i.status == \"Completed\"]\n\n for index, advisor_trial in enumerate(completed_advisor_trials):\n # Example: {\"learning_rate\": 0.01, \"optimizer\": \"ftrl\"}\n parameter_values_json = json.loads(advisor_trial.parameter_values)\n\n # Example: {'l1_normalization': [0], 'learning_rate': [0], 'hidden2': [0], 'optimizer': [0]}\n hyperopt_trial_miscs_idxs = {}\n # Example: {'l1_normalization': [0.1], 'learning_rate': [0.1], 'hidden2': [1], 'optimizer': [1]}\n hyperopt_trial_miscs_vals = {}\n new_id = index\n hyperopt_trial_new_ids.append(new_id)\n hyperopt_trial_misc = dict(\n tid=new_id, cmd=hyperopt_domain.cmd, workdir=hyperopt_domain.workdir\n )\n\n for param in params:\n\n if param[\"type\"] == \"INTEGER\":\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n parameter_value = parameter_values_json[param[\"parameterName\"]]\n hyperopt_trial_miscs_idxs[param[\"parameterName\"]] = [index]\n hyperopt_trial_miscs_vals[param[\"parameterName\"]] = [\n parameter_value\n ]\n\n elif param[\"type\"] == \"DISCRETE\":\n feasible_points_string = param[\"feasiblePoints\"]\n feasible_points = [\n float(value.strip())\n for value in feasible_points_string.split(\",\")\n ]\n parameter_value = parameter_values_json[param[\"parameterName\"]]\n index_of_value_in_list = feasible_points.index(parameter_value)\n hyperopt_trial_miscs_idxs[param[\"parameterName\"]] = [index]\n hyperopt_trial_miscs_vals[param[\"parameterName\"]] = [\n index_of_value_in_list\n ]\n\n elif param[\"type\"] == \"CATEGORICAL\":\n feasible_points_string = param[\"feasiblePoints\"]\n feasible_points = [\n value.strip() for value in feasible_points_string.split(\",\")\n ]\n # Example: \"ftrl\"\n parameter_value = parameter_values_json[param[\"parameterName\"]]\n index_of_value_in_list = feasible_points.index(parameter_value)\n hyperopt_trial_miscs_idxs[param[\"parameterName\"]] = [index]\n hyperopt_trial_miscs_vals[param[\"parameterName\"]] = [\n index_of_value_in_list\n ]\n\n hyperopt_trial_specs.append(None)\n\n hyperopt_trial_misc[\"idxs\"] = hyperopt_trial_miscs_idxs\n hyperopt_trial_misc[\"vals\"] = hyperopt_trial_miscs_vals\n hyperopt_trial_miscs.append(hyperopt_trial_misc)\n\n # TODO: Use negative objective value for loss or not\n\n loss_for_hyperopt = advisor_trial.objective_value\n if study_configuration_json[\"goal\"] == \"MAXIMIZE\":\n # Now hyperopt only supports fmin and we need to reverse objective value for maximization\n loss_for_hyperopt = -1 * advisor_trial.objective_value\n\n hyperopt_trial_result = {\n \"loss\": loss_for_hyperopt,\n \"status\": hyperopt.STATUS_OK,\n }\n hyperopt_trial_results.append(hyperopt_trial_result)\n\n if len(completed_advisor_trials) > 0:\n # Example: {'refresh_time': datetime.datetime(2018, 9, 18, 12, 6, 41, 922000), 'book_time': datetime.datetime(2018, 9, 18, 12, 6, 41, 922000), 'misc': {'tid': 0, 'idxs': {'x2': [0], 'x': [0]}, 'cmd': ('domain_attachment', 'FMinIter_Domain'), 'vals': {'x2': [-8.137088361136204], 'x': [-4.849028446711832]}, 'workdir': None}, 'state': 2, 'tid': 0, 'exp_key': None, 'version': 0, 'result': {'status': 'ok', 'loss': 14.849028446711833}, 'owner': None, 'spec': None}\n hyperopt_trials = completed_hyperopt_trials.new_trial_docs(\n hyperopt_trial_new_ids,\n hyperopt_trial_specs,\n hyperopt_trial_results,\n hyperopt_trial_miscs,\n )\n for current_hyperopt_trials in hyperopt_trials:\n current_hyperopt_trials[\"state\"] = hyperopt.JOB_STATE_DONE\n\n completed_hyperopt_trials.insert_trial_docs(hyperopt_trials)\n completed_hyperopt_trials.refresh()\n\n rval = hyperopt.FMinIter(\n self.hyperopt_algorithm,\n hyperopt_domain,\n completed_hyperopt_trials,\n max_evals=-1,\n rstate=hyperopt_rstate,\n verbose=0,\n )\n rval.catch_eval_exceptions = False\n\n new_ids = rval.trials.new_trial_ids(number)\n\n rval.trials.refresh()\n\n random_state = rval.rstate.randint(2 ** 31 - 1)\n new_trials = self.hyperopt_algorithm(\n new_ids, rval.domain, completed_hyperopt_trials, random_state\n )\n rval.trials.refresh()\n\n # Construct return advisor trials from new hyperopt trials\n return_trial_list = []\n\n for i in range(number):\n\n # Example: {u'hidden2': [2], u'learning_rate': [0.04633366105812467], u'l1_normalization': [0.16858448611765364], u'optimizer': [3]}\n vals = new_trials[i][\"misc\"][\"vals\"]\n\n new_advisor_trial = Trial.create(study.name, \"TpeTrial\")\n parameter_values_json = {}\n\n for param in params:\n\n if param[\"type\"] == \"INTEGER\":\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n suggest_value = vals[param[\"parameterName\"]][0]\n parameter_values_json[param[\"parameterName\"]] = suggest_value\n\n elif param[\"type\"] == \"DISCRETE\":\n feasible_point_list = [\n float(value.strip())\n for value in param[\"feasiblePoints\"].split(\",\")\n ]\n suggest_index = vals[param[\"parameterName\"]][0]\n suggest_value = feasible_point_list[suggest_index]\n\n elif param[\"type\"] == \"CATEGORICAL\":\n feasible_point_list = [\n value.strip() for value in param[\"feasiblePoints\"].split(\",\")\n ]\n suggest_index = vals[param[\"parameterName\"]][0]\n suggest_value = feasible_point_list[suggest_index]\n\n parameter_values_json[param[\"parameterName\"]] = suggest_value\n\n new_advisor_trial.parameter_values = json.dumps(parameter_values_json)\n # new_advisor_trial.save()\n return_trial_list.append(new_advisor_trial)\n\n return return_trial_list", "def suggest(self, num: int) -> list[Trial] | None:\n # Only sample up to `n_initial_points` and after that only sample one at a time.\n num_samples = min(num, max(self.n_initial_points - self.n_suggested, 1))\n\n samples: list[Trial] = []\n while len(samples) < num_samples and self.n_suggested < self.space.cardinality:\n if self.n_observed < self.n_initial_points:\n candidates = self._suggest_random(num_samples)\n logger.debug(\"Random candidates: %s\", candidates)\n else:\n v = max(num_samples - len(samples), 0)\n candidates = self._suggest_bo(v)\n logger.debug(\"BO candidates: %s\", candidates)\n\n if not candidates:\n # Perhaps the BO algo wasn't able to suggest any points? Break in that case.\n break\n for candidate in candidates:\n self.register(candidate)\n samples.append(candidate)\n return samples", "def suggest(suggestions):\n weight_sum = sum(suggestions.values())\n prob_ranges = []\n lower_bound = 0.0\n\n # generate probability ranges\n for task, weight in suggestions.iteritems():\n upper_bound = lower_bound + weight / weight_sum\n prob_ranges.append((task, (lower_bound, upper_bound)))\n\n # update lower bound\n lower_bound = upper_bound\n\n rand_number = random.random()\n\n for task, (low, high) in prob_ranges:\n if low <= rand_number < high:\n return task\n\n raise AssertionError('Should not be here. O_O');", "def recommend_random(self, num:int)->list:\n results = random.sample(self.items, k = num)\n results = [i.lower() for i in results]\n return results", "def suggest(self, n_suggestions=1):\n \n if self.batch_size is None:\n next_guess = GPyOpt.experiment_design.initial_design('random', self.space, n_suggestions)\n else:\n next_guess = self.bo._compute_next_evaluations()#in the shape of np.zeros((n_suggestions, self.dim))\n \n\n\n\n # preprocess the array from GpyOpt for unwarpping\n if self.hasCat == True:\n new_suggest = []\n cat_vec_pos = self.cat_vec[:,0]\n cat_vec_len = self.cat_vec[:,1]\n \n # for each suggstion in the batch\n for i in range(len(next_guess)):\n index = 0\n single_suggest = []\n # parsing through suggestions to replace the cat ones to the usable format \n for j in range(len(next_guess[0])):\n if j != cat_vec_pos[index]:\n single_suggest.append(next_guess[0][j])\n else:\n # if a cat varible\n value = next_guess[i][j]\n vec = [0.]*cat_vec_len[index]\n vec[value] = 1.\n single_suggest.extend(vec)\n index += 1\n index = min(index, len(cat_vec_pos)-1)\n # asserting the desired length of the suggestion\n assert len(single_suggest) == len(next_guess[0])+sum(cat_vec_len)-len(self.cat_vec)\n new_suggest.append(single_suggest)\n assert len(new_suggest) == len(next_guess)\n \n new_suggest = np.array(new_suggest).reshape(len(suggest), len(suggest[0])+sum(cat_vec_len)-len(self.cat_vec))\n next_guess = new_suggest\n \n\n suggestions = self.space_x.unwarp(next_guess)\n \n return suggestions", "def suggest(self, num):\n trials = []\n with self.get_client() as client:\n _trials, _ = client.get_next_trials(num)\n for trial_index, parameters in _trials.items():\n parameters = AxOptimizer.reverse_params(parameters, self.space)\n\n # Ax does not support Fidelity dimension type so fake it with\n # its max\n if self.fidelity_index is not None:\n # Convert 0-dim arrays into python numbers so their type can\n # be validated by Ax\n fidelity_dim = self.space[self.fidelity_index]\n while isinstance(fidelity_dim, TransformedDimension):\n fidelity_dim = fidelity_dim.original_dimension\n assert isinstance(fidelity_dim, Fidelity)\n parameters[self.fidelity_index] = float(fidelity_dim.high)\n\n new_trial = format_trials.dict_to_trial(parameters, self.space)\n\n if not self.has_suggested(new_trial):\n self.register(new_trial)\n trials.append(new_trial)\n self._trials_map[self.get_id(new_trial)] = trial_index # tmp\n\n return trials", "def _suggest(self, trial_id: int) -> Optional[TrialSuggestion]:\n raise NotImplementedError", "def suggest(self, trial_id: str) -> Optional[Dict]:\r\n raise NotImplementedError", "def generate_random_search_trials(params, nb_trials):\n if nb_trials is None:\n raise TypeError(\n '`random_search` strategy requires nb_trails to be an int.')\n results = []\n\n # ensures we have unique results\n seen_trials = set()\n\n # shuffle each param list\n potential_trials = 1\n for param in params:\n random.shuffle(param)\n potential_trials *= len(param)\n\n # we can't sample more trials than are possible\n max_iters = min(potential_trials, nb_trials)\n\n # then for the nb of trials requested, create a new param tuple\n # by picking a random integer at each param level\n while len(results) < max_iters:\n trial = []\n for param in params:\n sampled_param = random.sample(param, 1)[0]\n trial.append(sampled_param)\n\n # verify this is a unique trial so we\n # don't duplicate work\n trial_str = json.dumps(trial)\n if trial_str not in seen_trials:\n seen_trials.add(trial_str)\n results.append(trial)\n\n return results", "def get_suggestions(reviewer: Any, graph: Graph, threshold: int = 10) -> list[Any]:\n reviewers_so_far = helper(reviewer, graph)\n\n sim_scores = {}\n\n for user in reviewers_so_far:\n sim_score = round(graph.get_similarity_score(user, reviewer), 2)\n\n if sim_score > 0:\n if sim_score not in sim_scores:\n sim_scores[sim_score] = [user]\n else:\n sim_scores[sim_score].append(user)\n\n recommendations_so_far = set()\n\n while len(recommendations_so_far) < threshold and len(sim_scores) > 0:\n similar_reviewers = sim_scores[max(sim_scores)]\n\n if similar_reviewers != []:\n sim_user = similar_reviewers.pop(random.randint(0, len(similar_reviewers) - 1))\n rec_movies = graph.suggest_movies(reviewer, sim_user)\n for movie in rec_movies:\n recommendations_so_far.add(movie)\n\n else:\n sim_scores.pop(max(sim_scores))\n\n recommendations = list(recommendations_so_far)\n\n while len(recommendations) > threshold:\n recommendations.pop()\n\n if len(recommendations) == 0:\n return [' recommendations not found. Try adding more movies!']\n\n else:\n return recommendations", "def select_random_trial(completed_trials, possible_trials):\n if completed_trials is None:\n completed_trials = []\n if len(completed_trials) == len(possible_trials):\n return None, None\n\n incomplete_trials = np.setdiff1d(possible_trials, completed_trials)\n rand_trial_idx = np.random.randint(0, len(incomplete_trials))\n trial = incomplete_trials[rand_trial_idx]\n\n return select_trial(trial)", "def get_winners(self):\n\n if self.optimal is not None:\n return self.optimal\n clean_proposals = self.cleaner.create_scenarios(self.proposals)\n self.optimal = self.optimizer.optimize(clean_proposals)\n return self.optimal", "def random_search():\n\tgamma = 0.7\n\talpha = 0.3\n\tepsilon = 1\n\texploration_rate_decay = 0.87\n\n\tmax_tries = 10\n\tbest_score = -1000\n\tscores = {}\n\n\tfor attempt in range(max_tries):\n\n\t\tagent = Q_Agent(epsilon=1, alpha=alpha, gamma=gamma, exploration_rate_decay=exploration_rate_decay)\n\t\t_, rewards, steps = agent.train(env, iter_n=300, policy='ε–greedy', print_results=False)\n\t\tprint(np.mean(rewards))\n\t\tscores[attempt] = np.mean(rewards)\n\n\t\tprint(\n\t\t\t\"Score:{}, gamma {}, alpha {}, epsilon {}, e_decay_rate{}\".format(\n\t\t\t\tscores[attempt], gamma, alpha, epsilon, exploration_rate_decay))\n\n\t\tif scores[attempt] > best_score:\n\t\t\tbest_score = scores[attempt]\n\t\t\tprint(best_score)\n\t\t\tbest_gamma = gamma\n\t\t\tbest_alpha = alpha\n\t\t\tbest_epsilon = epsilon\n\t\t\tbest_decay = exploration_rate_decay\n\n\t\tgamma = best_gamma + (np.random.randint(-1, 2) / 10)\n\t\tgamma = min(1, gamma)\n\t\tgamma = max(0, gamma)\n\t\talpha = best_alpha + (np.random.randint(-1, 2) / 10)\n\t\talpha = min(1, alpha)\n\t\talpha = max(0, alpha)\n\t\tepsilon = 1\n\t\texploration_rate_decay = best_decay + np.random.randint(-1, 2) / 100\n\t\texploration_rate_decay = min(0.99, exploration_rate_decay)\n\t\texploration_rate_decay = max(0.7, exploration_rate_decay)\n\n\tprint(\"Best validation_accuracy:\", best_score)\n\tprint(\"Best settings:\")\n\tprint(\"best gamma:\", best_gamma)\n\tprint(\"best alpha:\", best_alpha)\n\tprint(\"best epsilon:\", best_epsilon)\n\tprint(\"best decay:\", best_decay)", "def suggested_search(search_text):\n threshold = 0.6\n global model\n\n search_text = remove_stop_words(search_text)\n tmp_search = search_text.split()\n\n new_search = []\n for word in tmp_search:\n similar_words = get_similar_words(model, word)\n new_search = select_top_words(similar_words, new_search, threshold)\n\n new_search = list(set(new_search))\n new_search = ' '.join(new_search)\n\n return new_search + ' ' + search_text", "def test_get_suggestion_compare_interest_suggestion(self):\n\t\t# Client type Request\n\t\tclient = Client()\n\t\t# Bob the social aware has no interests in common with Bob the artist\n\t\t# Bob the social aware selects a selectable item from page 1\n\t\tclient.post(self.uri_add_selectable+self.bob_the_socialaware_twin_profile.token+'/',\n\t\t content_type='application/json', data=page_one_objects(self.bob_the_socialaware_twin_auth, 0))\n\t\t# Alice has a lot in common with bob the artist, but has a different gender.\n\t\t# Alice selects another object\n\t\tclient.post(self.uri_add_selectable+self.alice_the_artist_profile.token+'/',\n\t\t content_type='application/json', data=page_one_objects(self.alice_the_artist_auth, 1))\n\n\t\tbob_the_artist_twin_page1_suggestion = client.post(self.uri_suggestion+self.bob_the_artist_twin_profile.token+'/', \n\t\t content_type='application/json', data=page_one(self.bob_the_artist_twin_auth))\n\n\t\tcontx = json.loads(bob_the_artist_twin_page1_suggestion.content.decode('utf-8'))['recommendation']\n\t\t# First element of the given list\n\t\tself.assertEqual(contx, '[1, 0]')", "def sample_best(self, start=\"\", max_len=20, times=5):\n samples = [self.sample_many(start, max_len) for i in range(times)]\n # filter out empty completions\n samples = [sample for sample in samples if sample[1]]\n if not samples:\n text = (\"LOOK BEHIND YOU A THREE-HEADED MONKEY\",)\n ngrams = []\n else:\n text, ngrams = min(samples, key=lambda sample: abs(len(sample[0]) - max_len))\n if start:\n text = (start,) + text\n return \" \".join(text), ngrams", "def question_with_suggested_answers(text, default, suggest):\n\n reply = question(text, default)\n while reply not in suggest:\n report(_(\"\"\"The value you have chosen is not among the suggested values.\nYou have chosen '%s'.\"\"\" % reply))\n report(_(\"The suggested values are \" + str(suggest)))\n correct = question(_(\"Do you want to correct your answer?\"), True)\n if correct:\n reply = question(text, default)\n else:\n return reply\n return reply", "def get_hint(self, data):\r\n # First, validate our inputs.\r\n try:\r\n answer = self.answer_to_str(data)\r\n except (ValueError, AttributeError):\r\n # Sometimes, we get an answer that's just not parsable. Do nothing.\r\n log.exception('Answer not parsable: ' + str(data))\r\n return\r\n if not self.validate_answer(answer):\r\n # Answer is not in the right form.\r\n log.exception('Answer not valid: ' + str(answer))\r\n return\r\n if answer not in self.user_submissions:\r\n self.user_submissions += [answer]\r\n\r\n # For all answers similar enough to our own, accumulate all hints together.\r\n # Also track the original answer of each hint.\r\n matching_answers = self.get_matching_answers(answer)\r\n matching_hints = {}\r\n for matching_answer in matching_answers:\r\n temp_dict = copy.deepcopy(self.hints[matching_answer])\r\n for key, value in temp_dict.items():\r\n # Each value now has hint, votes, matching_answer.\r\n temp_dict[key] = value + [matching_answer]\r\n matching_hints.update(temp_dict)\r\n # matching_hints now maps pk's to lists of [hint, votes, matching_answer]\r\n\r\n # Finally, randomly choose a subset of matching_hints to actually show.\r\n if not matching_hints:\r\n # No hints to give. Return.\r\n return\r\n # Get the top hint, plus two random hints.\r\n n_hints = len(matching_hints)\r\n hints = []\r\n # max(dict) returns the maximum key in dict.\r\n # The key function takes each pk, and returns the number of votes for the\r\n # hint with that pk.\r\n best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])\r\n hints.append(matching_hints[best_hint_index][0])\r\n best_hint_answer = matching_hints[best_hint_index][2]\r\n # The brackets surrounding the index are for backwards compatability purposes.\r\n # (It used to be that each answer was paired with multiple hints in a list.)\r\n self.previous_answers += [[best_hint_answer, [best_hint_index]]]\r\n for _ in xrange(min(2, n_hints - 1)):\r\n # Keep making random hints until we hit a target, or run out.\r\n while True:\r\n # random.choice randomly chooses an element from its input list.\r\n # (We then unpack the item, in this case data for a hint.)\r\n (hint_index, (rand_hint, _, hint_answer)) =\\\r\n random.choice(matching_hints.items())\r\n if rand_hint not in hints:\r\n break\r\n hints.append(rand_hint)\r\n self.previous_answers += [[hint_answer, [hint_index]]]\r\n return {'hints': hints,\r\n 'answer': answer}", "def FoodSuggest(sc, event):\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n query = 'SELECT * FROM foodlist ORDER BY RAND() LIMIT 1'\n cursor.execute(query)\n suggestion = cursor.fetchall()\n db.close()\n sc.api_call('chat.postMessage', as_user='true', channel=event['channel'],\n text='On %s, %s had: %s' % suggestion[0])", "def choose_problems():\n\n problems = make_dict()\n\n res = random.sample(list(problems.items()), k = 3)\n # res2 = random.choice(list(problems.items()))\n # res3 = random.choice(list(problems.items()))\n\n res0 = res[0]\n res1 = res[1]\n res2 = res[2]\n\n return res0, res1, res2", "def GetSuggestions(self, request, context):\n algorithm_name, config = OptimizerConfiguration.convertAlgorithmSpec(\n request.experiment.spec.algorithm)\n if algorithm_name != \"bayesianoptimization\":\n raise Exception(\"Failed to create the algorithm: {}\".format(algorithm_name))\n\n if self.is_first_run:\n search_space = HyperParameterSearchSpace.convert(request.experiment)\n self.base_service = BaseSkoptService(\n base_estimator=config.base_estimator,\n n_initial_points=config.n_initial_points,\n acq_func=config.acq_func,\n acq_optimizer=config.acq_optimizer,\n random_state=config.random_state,\n search_space=search_space)\n self.is_first_run = False\n\n trials = Trial.convert(request.trials)\n new_trials = self.base_service.getSuggestions(trials, request.request_number)\n return api_pb2.GetSuggestionsReply(\n parameter_assignments=Assignment.generate(new_trials)\n )", "def _compute_n_recommendations(self, listings_dict, target, n):\n recommended_listings = []\n # check if dict already contains target\n if target.id not in listings_dict:\n # add it\n listings_dict[target.id] = [\n target.longitude, target.latitude, self.map_rt(target.room_type), target.price,\n target.min_nights, target.num_of_reviews, target. availability\n ]\n # need to keep track of id and index\n id_list = list(listings_dict.keys())\n target_idx = id_list.index(target.id)\n # we need a listing set of least n+1 to compute n recommendations (the listing itself is the +1)\n if len(id_list) >= n + 1:\n # scale rows between [0,1]\n scaled_rows = MinMaxScaler().fit_transform(list(listings_dict.values()))\n # compute euclidean distance of each row to target\n [self._euclidean_distance(row, scaled_rows[target_idx]) for row in scaled_rows]\n # compute recommendations: We want to compute more recommendations and then randomly sample a subset\n # first element is target itself, therefore we compute one additional rec.\n k = n * 4 + 1\n if k > len(listings_dict):\n # k must not exceed number of listings\n k = len(listings_dict)\n # compute recommendations and save their indices\n rec_idx = NearestNeighbors(n_neighbors=k, algorithm='ball_tree') \\\n .fit(scaled_rows) \\\n .kneighbors([scaled_rows[target_idx]], k, return_distance=False)\n # gather index of recommendations\n rec_ids = [id_list[rec] for rec in rec_idx[0]]\n # randomly sample n (excluding the target itself)\n recommended_listings = random.sample(rec_ids[1:], n)\n\n return recommended_listings", "def knearest( self, restaurant_id, set_of_restaurants, k = 7, reg = 3.0 ):\t\t\n\t\tsimilar = []\t\t\n\t\tfor other_rest_id in set_of_restaurants:\n\t\t\tif other_rest_id != restaurant_id:\n\t\t\t\tsim, n_common = self.get( other_rest_id, restaurant_id )\n\t\t\t\tsim = self.shrunk_sim( sim = sim, n_common = n_common, reg = reg )\n\t\t\t\tsimilar.append( ( other_rest_id, sim, n_common ) )\n\n\t\tsimilars = sorted( similar, key = itemgetter(1), reverse = True )\t\n\t\treturn similars[0:k]", "def test_get_scored_recommendations_post(self):\n pass", "def propose(self, num=1):\n suggestions = self.hebo.suggest(n_suggestions=num)\n recs = suggestions.to_dict()\n suggestions.drop(suggestions.index, inplace=True)\n self.suggest_template = suggestions\n out = []\n for index in list(recs.values())[0].keys():\n rec = {key: value[index] for key, value in recs.items()}\n rec = self.search_space.verify_constraints(rec)\n out.append(rec)\n return out", "async def dummy_predict(item: Item):\n predictions = ['HomeDepot', 'DunderMifflin', 'hometheater', 'EnterTheGungeon',\n 'cinematography', 'Tinder', 'LearnJapanese',\n 'futarp', 'OnePieceTC', 'Firefighting', 'fleshlight', 'lotr',\n 'knifeclub', 'sociopath', 'bleach', 'SCCM', 'GhostRecon',\n 'Ayahuasca', 'codes', 'preppers', 'grammar', 'NewSkaters',\n 'Truckers', 'southpark', 'Dreams', 'JUSTNOMIL',\n 'EternalCardGame', 'evangelion', 'mercedes_benz', 'Cuckold',\n 'writing', 'afinil', 'synology', 'thinkpad', 'MDMA', 'sailing',\n 'cfs', 'siacoin', 'ASUS', 'OccupationalTherapy', 'biology',\n 'thelastofus', 'lonely', 'swrpg', 'acting', 'transformers',\n 'vergecurrency', 'Beekeeping']\n\n recs = {} # store in dict\n\n n_results = 5 # fix to 5 results\n\n recommendations = random.sample(predictions, n_results)\n return {'subreddits': recommendations}", "def get_five_random(self):\r\n if self.get_length() > 5:\r\n random_selection = []\r\n\r\n from random import randrange\r\n\r\n for i in range(0, 5):\r\n while True:\r\n rnd = randrange(0, self.get_length())\r\n if self.get_tweet(rnd) not in random_selection:\r\n random_selection.append(self.get_tweet(rnd))\r\n break\r\n return random_selection\r\n else:\r\n return self.tweets", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen" ]
[ "0.673707", "0.66433907", "0.62491417", "0.62107706", "0.6049886", "0.5957509", "0.5921522", "0.5920681", "0.5868315", "0.58326787", "0.5820831", "0.57852846", "0.57586384", "0.5753827", "0.5749022", "0.57412815", "0.56640464", "0.56435263", "0.56167614", "0.56019133", "0.5593852", "0.5582535", "0.55751276", "0.5567644", "0.55143523", "0.55049855", "0.5480294", "0.54784423", "0.5468226", "0.5468226" ]
0.6908943
0
This method removes the message received from the list of messages, then routes \n the message to the appropriate function
def process(self, message): try: self.messages.remove(message) except ValueError: pass # nothing to see here, just a message that was already processed and is not on the list any more except Exception as e: print('error removing message from self.message:', e) try: if message['type'] in ["ticker"]: self.process_tickers(message) elif message['type'] in ["snapshot", "l2update"]: self.process_orderbook(message) elif message['type'] in ["received","open","done","match","change","activate"] and 'user' in self.data: self.process_orders(message) except Exception as e: raise Exception("Process raised an error: {}\n\t{}".format(e,message))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_new_line(self, message):\n if message.endswith('\\n'):\n return message[:-1]\n return message", "def del_old_msg(self, line):\n if not isinstance(line, list):\n raise ValueError\n lines_occupied = sum(line)\n while lines_occupied >= self.h - 2: # Last 2 lines are for input\n self.curr_y = 0 # Need to re-render to get rid of old messages\n self.msg_count = 0\n lines_occupied -= line.pop(0)\n self.message_log.pop(0)", "def handleMessage(msg):", "def process_messages(self):\n pass", "def process_message(msg):\n\n if message_already_processed(msg):\n print(\"%s is already processed\" % msg.message_id)\n else:\n _tally_message(msg)\n msg.delete()", "def delete(self):\n for i, message in enumerate(self.owner.messages):\n if message == self.body:\n del self.owner.messages[i]\n break", "def message(self, message):\n if '\\n' not in message:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)\n else:\n self.multiple_messages(message.split('\\n'))", "def found_terminator(self):\r\n self.msg = ''.join(self.msg_buffer)\r\n self.msg_split = self.msg.split(client_api[\"delimiter\"])\r\n cmd = self.msg_split[0]\r\n try:\r\n self.msg_handler[cmd]()\r\n except KeyError as e:\r\n server_log.info('Unhandled command received from client id {}: {}'.format(self.client_id, cmd))\r\n except Exception as e:\r\n server_log.info('Exception raised in server when receiving message from client: {!r}'.format(e))\r\n raise e\r\n finally:\r\n self.msg_buffer = []\r\n self.msg = ''\r\n self.msg_split = []", "def process_received_message(self, rmsg):\n msgtype = rmsg.msgtype\n msgbody = rmsg.message\n try:\n try:\n nli = msgbody.index('\\n')\n strid = msgbody[:nli]\n except IndexError:\n strid = msgbody\n id_ = int(strid, 16)\n _TRACE(\"RECVMSG Received message with id %d\", id_)\n mrec = self.msgmap.get(id_)\n self.harness.assertTrue(mrec)\n # Need to convert empty strings to None for 2 comparisions below\n self.harness.assertEqual(mrec.msgtype or None, msgtype)\n self.harness.assertEqual(mrec.msgbody or None, msgbody)\n self.harness.assertFalse(mrec.alwaysdrop)\n self.msgmap.remove_instance(id_, mrec)\n except Exception as exp:\n _LOGGER.exception(\"error attempting to parse received message\")\n raise exp", "def handle_message(self, message):", "def _process_win_msgs(self):\n message = wintypes.MSG()\n while True:\n res = win32functions.PeekMessageW(pointer(message), 0, 0, 0, win32con.PM_REMOVE)\n if not res:\n break\n if message.message == win32con.WM_QUIT:\n self.stop()\n sys.exit(0)\n else:\n win32functions.TranslateMessage(byref(message))\n win32functions.DispatchMessageW(byref(message))", "def message_delete(self):\r\n SlTrace.lg(\"Destroying timed message\", \"message\")\r\n if self.cur_message is not None:\r\n SlTrace.lg(\"Found message to destroy\", \"message\")\r\n self.cur_message.destroy()\r\n self.cur_message = None", "def handle(self, message):", "async def on_raw_message_delete(self, payload):\n\t\tif payload.guild_id is not None:\n\t\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\t\tleaderboards = self.leaderboards[str(guild.id)]\n\n\t\t\tif payload.cached_message is not None:\n\t\t\t\tmessage = payload.cached_message\n\n\t\t\t\tif not message.author.bot:\n\t\t\t\t\tleaderboards[\"messageLeaderboard\"][str(message.author.id)] -= 1\n\n\t\t\t\t\tif str(message.channel.id) == leaderboards[\"quotesChannel\"]:\n\t\t\t\t\t\tfor user in message.mentions:\n\t\t\t\t\t\t\tleaderboards[\"quotesChannel\"][str(user.id)] -= 1\n\n\t\t\t\t\tfor emoji in self.bot.emojis:\n\t\t\t\t\t\temojiName = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\t\tfor index in range(0, message.content.count(emojiName)):\n\t\t\t\t\t\t\tleaderboards[\"emojiLeaderboard\"][str(emoji.id)] -= 1\n\n\t\t\t\tleaderboards[\"lastUpdate\"] = message.created_at.isoformat()\n\t\t\t\tawait self.update_state()", "async def handle_remove(message: types.Message):\n chat_id = message[\"chat\"][\"id\"]\n text = message[\"text\"].strip().lower()\n if len(text.split()) > 1:\n await remove_subscriptions(chat_id, text.split()[1:])\n else:\n subreddits = subscriptions_manager.user_subreddits(chat_id)\n subreddits.sort()\n if not subreddits:\n await reply(\n message,\n \"You are not subscribed to any subreddit, press /add to subscribe\",\n )\n else:\n await StateMachine.asked_remove.set()\n markup = sub_list_keyboard(chat_id, \"remove\")\n await reply(\n message,\n \"Which subreddit would you like to unsubscribe from?\",\n reply_markup=markup,\n )", "def process_quit(message):\n try:\n Resident.objects.get(phone_number=message.sender).delete()\n except Resident.DoesNotExist:\n pass\n \n # TODO - wording...\n message.respond('You have been removed from our system and will no longer get text messages.')\n \n return TropoOkResponse()", "def handle_message(self, msg):\n pass", "def message_remove(request, undo=False):\n message_pks = request.POST.getlist('message_pks')\n redirect_to = request.REQUEST.get('next', False)\n\n if message_pks:\n # Check that all values are integers.\n valid_message_pk_list = set()\n for pk in message_pks:\n try: valid_pk = int(pk)\n except (TypeError, ValueError): pass\n else:\n valid_message_pk_list.add(valid_pk)\n\n # Delete all the messages, if they belong to the user.\n now = datetime.datetime.now()\n changed_message_list = set()\n for pk in valid_message_pk_list:\n message = get_object_or_404(Message, pk=pk)\n\n # Check if the user is the owner\n if message.sender == request.user:\n if undo:\n message.sender_deleted_at = None\n else:\n message.sender_deleted_at = now\n message.save()\n changed_message_list.add(message.pk)\n\n # Check if the user is a recipient of the message\n if request.user in message.recipients.all():\n mr = message.messagerecipient_set.get(user=request.user,\n message=message)\n if undo:\n mr.deleted_at = None\n else:\n mr.deleted_at = now\n mr.save()\n changed_message_list.add(message.pk)\n\n # Send messages\n if (len(changed_message_list) > 0):\n if undo:\n message = ungettext('Message is succesfully restored.',\n 'Messages are succesfully restored.',\n len(changed_message_list))\n else:\n message = ungettext('Message is successfully removed.',\n 'Messages are successfully removed.',\n len(changed_message_list))\n\n messages.success(request, message, fail_silently=True)\n\n if redirect_to: return redirect(redirect_to)\n else: return redirect(reverse('socialapps_messages_list'))", "def endMessage(self):", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def processMessage(self, *args, **kwargs):\r\n pass", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def receive_message(self, message):", "def read_messages(message_file):\n line = message_file.readline()\n messages = []\n \n while line != '':\n line = clean_message(line)\n line = line.strip('\\n')\n messages.append(line)\n line = message_file.readline()\n return messages\n\t\n # Function will go through each line removing occurences of '/n'", "def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)", "def get_and_delete_messages (self):\n return []", "def get_and_delete_messages (self):\n return []", "def decode_message(self, message):\r\n\r\n\t\tprint(\"Decoding message '{}'\".format(message))\r\n\r\n\t\tmessage_split = message[1:-1].split('||')\r\n\r\n\t\tif len(message_split) > 1: # Several messages are queued\r\n\t\t\tfor m in message_split:\r\n\t\t\t\tself.decode_message('|' + m + '|')\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tmessage = message_split[0]\r\n\r\n\t\tmessage_split = message.split('|')\r\n\r\n\t\tif message_split[0] == 'LA':\r\n\r\n\t\t\tlist_bars = message_split[1].split(',')\r\n\t\t\tself.send_bar_names.emit(list_bars) # Sending the list to the UI\r\n\r\n\t\telif message_split[0] == 'ME':\r\n\r\n\t\t\tprint(\"New message received : '{}'\".format(message))\r\n\r\n\t\t\tif len(message_split) == 3: # Author was found\r\n\t\t\t\tinfos = (message_split[2], message_split[1])\r\n\t\t\telif len(message_split) == 2: # No author\r\n\t\t\t\tinfos = (message_split[1],)\r\n\t\t\ttry:\r\n\t\t\t\tself.message_received.emit(infos)\r\n\t\t\texcept UnboundLocalError:\r\n\t\t\t\tself._window.open_dialog(\"Message de chat incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t\t \"Le message de chat suivant n'a pas pu être décodé : {}\".format(message),\r\n\t\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\r\n\t\telif message_split[0] == 'LO': # Message is '|LO|' so just ignoring it\r\n\r\n\t\t\tself.name_set.emit() # Warning the UI about the name being set\r\n\r\n\t\telif message_split[0] == \"CH\":\r\n\r\n\t\t\tpass\r\n\t\t\r\n\t\telif message_split[0] == 'UR':\r\n\r\n\t\t\tprint(\"New message received : '{}'\".format(message))\r\n\r\n\t\t\tif len(message_split) == 3: # Author was found\r\n\t\t\t\tinfos = (message_split[2], message_split[1])\r\n\t\t\telif len(message_split) == 2: # No author\r\n\t\t\t\tinfos = (message_split[1],)\r\n\t\t\ttry:\r\n\t\t\t\tself.urgent_message_received.emit(infos)\r\n\t\t\texcept UnboundLocalError:\r\n\t\t\t\tself._window.open_dialog(\"Message de chat incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t\t \"Le message de chat suivant n'a pas pu être décodé : {}\".format(message),\r\n\t\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\t\r\n\t\telif message_split[0] == \"LE\": # Getting the list of products\r\n\r\n\t\t\tif message_split[1]:\r\n\t\t\t\ttuples = message_split[1].split(',')\r\n\t\t\t\tfor t in tuples:\r\n\t\t\t\t\ti, f = t.split(':')\r\n\t\t\t\t\tself.__food[int(i)] = f\r\n\r\n\t\telif message_split[0] == \"RS\": # A new order for Restal\r\n\r\n\t\t\ttry:\r\n\t\t\t\tfood = self.__food[int(message_split[2])]\r\n\t\t\texcept KeyError:\r\n\t\t\t\tfood = \"Inconnue\"\r\n\t\t\t\tprint(\"Unable to get the name of food '{}'\".format(message_split[2]))\r\n\t\t\tprint(message_split[1],message_split[3],message_split[2])\r\n\t\t\tself.add_order.emit(message_split[1], food, int(message_split[3]))\r\n\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Message du serveur incompréhensible\",\r\n\t\t\t\t\t\t\t\t\t \"Le message suivant n'a pas pu être décodé : {}\".format(message), type=\"warning\")\r\n\t\t\tprint(\"Error : message '{}' could not be decoded\".format(message))", "def _process_messages(self, room, new_messages):\n\t\tfor message in new_messages:\n\t\t\tself._log.info(\"handling message {}\".format(message[\"id\"]))\n\n\t\t\tfor reactive in self._reactives:\n\t\t\t\ttry:\n\t\t\t\t\treactive(room, message, self, self._hipchat)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself._log.error(\"reactive {!r} errored while handling message\".format(reactive), exc_info=True)", "def send_message_list(message_lst: list, reciever: str, receiver_data: dict,\n users_to_remove: list) -> None:\n new_prev_mes: list = []\n final_message = ''\n for message in message_lst:\n if len(new_prev_mes) == 5:\n break\n if message not in receiver_data['usr_prevs_mes']:\n receiver_data['mes_limit'] -= 1\n final_message += f'\\n{message}'\n new_prev_mes.append(message)\n receiver_data['usr_prevs_mes'] = new_prev_mes\n final_message += '\\nReply stop to stop these notifications.'\n if len(new_prev_mes) != 0:\n send_message(reciever,\n 'New Vaccine Locations Detected!',\n final_message,\n receiver_data['carrier'])\n if receiver_data['mes_limit'] <= 0:\n users_to_remove.append(reciever)" ]
[ "0.6690043", "0.64363915", "0.6417138", "0.6370209", "0.63638574", "0.6278583", "0.614606", "0.6113393", "0.61117715", "0.60682833", "0.60108536", "0.60044193", "0.59817094", "0.5978349", "0.59654874", "0.5923463", "0.5904784", "0.58949006", "0.5865086", "0.5831896", "0.5815616", "0.58014965", "0.58005863", "0.57879466", "0.57691026", "0.5765672", "0.5765672", "0.5760521", "0.57448834", "0.5742689" ]
0.6683044
1
Opens a new connection to the websocket
def open(self): try: self.error_count = 0 self.conn_thread = Thread(target=self.connect, name='Websocket Connection') self.conn_thread.start() except Exception as e: self.conn_thread.join() self.on_error(self.ws, "Error from openning connection. Error -> {}".format(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def connect(self, url: str):\n logger.info(\"Opening connection to {}\".format(url))\n self.websocket = await WSClient.connect(url)\n logger.info(\"Connected to gateway!\")\n self._open = True\n return self.websocket", "async def _open_market_data_websocket(self):\n market_data_url = self._wss_url_base + \\\n '/v1/marketdata/BTCUSD?heartbeat=true'\n self._market_data_sock_info.ws = await websockets.client.connect(\n market_data_url)\n self._market_data_sock_info.connected_event.set()", "def _ws_connect(self):\n\n return websocket.websocket_connect(\n 'ws://localhost:{}{}'.format(self.get_http_port(), self.request)\n )", "def connect(self):\n headers = httputil.HTTPHeaders({'Content-Type': 'application/json'})\n request = httpclient.HTTPRequest(\n url=self.url,\n connect_timeout=self.connect_timeout,\n request_timeout=self.request_timeout,\n headers=headers\n )\n ws_conn = websocket.WebSocketClientConnection(\n ioloop.IOLoop.current(),\n request\n )\n ws_conn.connect_future.add_done_callback(self._connect_callback)", "def open(self, *args, **kwargs):\n self._open = True\n self._stat.websocket_stream_open += 1\n # Create subscription for the stream\n url = self.request.uri\n self._logger.info(\"Websocket connection %s %s\", url, self)\n\n async_future = asyncio.async(\n self.netconf_subscribe(\n self.request.uri,\n self.request.headers.get(\"Authorization\")), \n loop=self._asyncio_loop)\n yield tornado.platform.asyncio.to_tornado_future(async_future)", "async def _open_connection(self, conn_name):\n path = WS_ENDPOINT_REMOTE_CONTROL if conn_name == WS_REMOTE else WS_ENDPOINT_APP_CONTROL\n token = (await self._store.get(ATTR_TOKEN)) if conn_name == WS_REMOTE else None\n url = format_websocket_url(self.host, path, self.name, token)\n _LOGGER.debug(f\"{conn_name}: Attempting connection to {url}\")\n try:\n self._connected = False\n self._is_connecting = True\n async with websockets.connect(url, ssl=INSECURE_SSL_CTX) as ws:\n setattr(self, f\"_ws_{conn_name}\", ws)\n async for msg in ws:\n try:\n await self._handle_message(conn_name, msg)\n except AuthorizationError:\n _LOGGER.error(f\"{conn_name}: Authorization refused\")\n break\n except Exception as exc:\n _LOGGER.error(f\"Error while handling message: {exc}\", exc_info=True)\n except (websockets.WebSocketException, asyncio.CancelledError, ConnectionError) as exc:\n _LOGGER.debug(f\"{conn_name}: {exc}\", exc_info=True)\n except Exception as exc:\n _LOGGER.error(f\"{conn_name}: {exc}\", exc_info=True)\n finally:\n _LOGGER.debug(f\"{conn_name}: disconnected\")\n setattr(self, f\"_ws_{conn_name}\", None)\n self._connected = False\n self._is_connecting = False\n self._current_app = None\n self._installed_apps = {}", "def websocket(self) -> Websocket:\n self.__http_client.data_snapshot()\n host_uri = f'ws://{self.__http_client.host_ip}/api/v1/data/stream'\n subprotocols = [f'SessionToken_{self.__http_client.session_token}', \"object\"]\n return Websocket(host_uri, subprotocols, timeout=self.__http_client.request_timeout)", "def create_connection(url, timeout=None, **options):\r\n websock = WebSocket()\r\n websock.settimeout(timeout != None and timeout or default_timeout)\r\n websock.connect(url, **options)\r\n return websock", "async def connect(self):\n ssl = True if self._uri.startswith(\"wss\") else False\n async for websocket in websockets.connect(\n self._uri, ssl=ssl\n ) if ssl else websockets.connect(self._uri):\n # Try-except-continue used for automatic reconnection with exponential backoff\n try:\n self._connection = websocket\n async for message in self._connection:\n json_obj = json.loads(message.decode())\n item = Item(\n json_obj[\"type\"], json_obj[\"manufacturer\"], json_obj[\"model\"]\n )\n request = Request(self._connection, item)\n await self.on_message_handler(request)\n except websockets.ConnectionClosed:\n continue", "def open(self, pysession_id):\n self.id = id(self)\n self.funcserver = self.application.funcserver\n self.pysession_id = pysession_id\n\n # register this connection with node\n self.state = self.funcserver.websocks[self.id] = {\"id\": self.id, \"sock\": self}", "def websocket_init(self, payload, *args, **kwargs):\n data = json.loads(str(payload, \"utf-8\"))\n self.is_connecting = False\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")", "def add_ws(self):\n def on_message(ws, message):\n print(message)\n\n def on_error(ws, error):\n pass\n\n def on_close(ws):\n pass\n\n def on_open(ws):\n thread.start_new_thread(self.run, ())\n\n ws = websocket.WebSocketApp(self.url + '/',\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n\n ws.on_open = on_open\n self.ws = ws", "def on_websocket_open(self) -> None:\n raise NotImplementedError() # pragma: no cover", "async def ws_connect(self):\n await self._client.connect()", "async def connect(self):\n\n self.socket = await self._session.ws_connect(str(self._url))\n self._create_task(self.__handle_connection())", "async def ws_connect(self, client):\n if self.ws is None:\n team_ws_build = GuildedWebSocket.build(client, loop=client.loop, teamId=self.id)\n self.ws = await asyncio.wait_for(team_ws_build, timeout=60)", "async def websocket_client(self):\n return await websocket(CLIENT, \"/websocket\")", "def connect_to_websocket(self):\n conn = yield websocket_connect(\"wss://api.bitfinex.com/ws\")\n\n req = {\n \"event\": \"subscribe\",\n \"channel\": \"book\",\n \"pair\": \"BTCUSD\",\n \"freq\": \"F0\",\n }\n conn.write_message(json.dumps(req))\n while True:\n msg = yield conn.read_message()\n response = json.loads(msg)\n if response:\n if self.snapshot_received:\n # Perform update in database\n # and emit update to client\n self.perform_update(response)\n\n if isinstance(response, list) and not self.snapshot_received: # If true, store snapshot in database\n\n for data in response[1]: # here data is of form [price, count, amount]\n item_type = \"bid\" if data[2] > 0 else \"ask\" # bid if amt > 0, else ask\n item = self.add_new_bitfinex_item(item_type, data[0], data[1])\n self.session.add(item)\n self.session.commit()\n print(\"Bitfinex Snapshot Received\")\n self.snapshot_received = True # Set flag\n else:\n break", "def _on_open(self):\n\n print(\"WebSocket successfully connected for \" + self.session_name + \"!\")\n self.web_socket_open = True\n self._send_login_request(sts_token, False)", "def connect(self):\n response = self._login(self._username, self._password)\n ssid = response.cookies[\"ssid\"]\n self._set_session_cookies()\n self._websocket_client = WebsocketClient(self.wss_url, self._on_message_callback)\n\n websocket_thread = threading.Thread(target=self.websocket.run_forever)\n websocket_thread.daemon = True\n websocket_thread.start()\n\n time.sleep(5)\n\n self.set_ssid(ssid)", "def __init__(self, websocket_ip, port=9090):\n print(\"Connecting to websocket: {}:{}\".format(websocket_ip, port))\n self.ws = websocket.create_connection(\n 'ws://' + websocket_ip + ':' + str(port))\n self._advertise_dict = {}", "def open(self):\n APP.clients.append(self)\n # self.send_status()\n log(\"WebSocket opened. {0} child(s) connected\".\n format(len(APP.clients)))", "def _connect(self):\n\t\tself.log.info(\"Trying to connect to OBS Websockets...\")\n\n\t\ttry:\n\t\t\t\tself.client = obswebsocket.obsws(self.host, self.port, self.password)\n\t\t\t\tself.client.connect()\n\t\t\t\tself.log.info(\"...Connected to OBS Websockets at {}:{}\".format(self.host, self.port))\n\t\texcept Exception as e:\n\t\t\tself.log.error(\"Could not initialize connection at {}:{} to OBS Websockets! Exception: {}\".format(self.host, self.port, e))\n\t\t\traise", "def connect(self):\n self.ws.connect()", "def on_open(self, request):\n\n # Find the right endpoint and create th connection\n dest = destinations[self.endpoint]\n\n name = self.session.handler.name if self.session.handler else '??'\n logger.info('New %s client for endpoint %s on port %s' %\n (name, self.endpoint, dest[1]))\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n self.endpoint_stream = iostream.IOStream(s)\n self.endpoint_stream.connect(dest, self.on_endpoint_connected)", "def _connect(self, reconnecting=False):\n # The lock is used to ensure only a single connection can be made\n with (yield self._ws_connecting_lock.acquire()):\n self._disconnect_issued = False\n websocket_url = (yield self.get_sitemap())['websocket']\n if not self.is_connected:\n self._logger.debug(\n \"Connecting to websocket %s\", websocket_url)\n try:\n if self._heart_beat_timer.is_running():\n self._heart_beat_timer.stop()\n self._ws = yield websocket_connect(\n websocket_url,\n on_message_callback=self._websocket_message,\n connect_timeout=WS_CONNECT_TIMEOUT)\n if reconnecting:\n yield self._resend_subscriptions_and_strategies()\n self._logger.info(\"Reconnected :)\")\n self._heart_beat_timer.start()\n except Exception:\n self._logger.exception(\n 'Could not connect websocket to %s',\n websocket_url)\n if reconnecting:\n self._logger.info(\n 'Retrying connection in %s seconds...', WS_RECONNECT_INTERVAL)\n self._connect_later(WS_RECONNECT_INTERVAL)\n if not self.is_connected and not reconnecting:\n self._logger.error(\"Failed to connect!\")", "async def _open_orders_websocket(self):\n orders_path = '/v1/order/events'\n headers = self._create_headers(orders_path, encoding=\"utf-8\")\n # Filter order events so that only events from this key are sent.\n creds = self._api_credentials\n order_events_url = self._wss_url_base + orders_path + \\\n f'?heartbeat=true&apiSessionFilter={creds.api_key}'\n\n # Uncommented until we have the orders websocket working correctly.\n self._orders_sock_info.ws = await websockets.client.connect(\n order_events_url, extra_headers=headers)\n self._orders_sock_info.connected_event.set()", "def ws_request(self, ws_url):\n url = \"wss://stream.binance.com:9443/ws/%s\" % (ws_url)\n\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(url,\n on_error=self.ws_on_error,\n on_close=self.ws_on_close)\n\n return ws", "def initSocket(self):\n \n # Check WebSocket support\n if self.nodejs:\n try:\n WebSocket = require('ws')\n except Exception:\n # Better error message\n raise \"FAIL: you need to 'npm install -g ws' (or 'websocket').\"\n else:\n WebSocket = window.WebSocket\n if (WebSocket is undefined):\n window.document.body.innerHTML = 'Browser does not support WebSockets'\n raise \"FAIL: need websocket\"\n # Open web socket in binary mode\n self.ws = ws = WebSocket(window.flexx.ws_url)\n #ws.binaryType = \"arraybuffer\" # would need utf-decoding -> slow\n \n def on_ws_open(evt):\n window.console.info('Socket connected')\n ws.send('hiflexx ' + flexx_session_id)\n def on_ws_message(evt):\n window.flexx.last_msg = msg = evt.data or evt\n #msg = window.flexx.decodeUtf8(msg)\n window.flexx.command(msg)\n def on_ws_close(evt):\n self.ws = None\n msg = 'Lost connection with server'\n if evt and evt.reason: # nodejs-ws does not have it?\n msg += ': %s (%i)' % (evt.reason, evt.code)\n if (not window.flexx.is_notebook) and (not self.nodejs):\n window.document.body.innerHTML = msg\n else:\n window.console.info(msg)\n def on_ws_error(self, evt):\n self.ws = None\n window.console.error('Socket error')\n \n # Connect\n if self.nodejs:\n ws.on('open', on_ws_open)\n ws.on('message', on_ws_message)\n ws.on('close', on_ws_close)\n ws.on('error', on_ws_error)\n else:\n ws.onopen = on_ws_open\n ws.onmessage = on_ws_message\n ws.onclose = on_ws_close\n ws.onerror = on_ws_error", "def setup_websocket(ws_url, service_account_file, audience, router_password, source_port, dest_ip, dest_port):\n def on_message(ws, message):\n \"\"\"Handle a message\"\"\"\n handle_message(ws, message, router_password, source_port, dest_ip, dest_port)\n\n def on_error(ws, error):\n \"\"\"Handle an error by exiting or closing if it is a KeyboardInterrupt (Ctrl+C)\"\"\"\n if type(error) is KeyboardInterrupt:\n logger.info('Cancel requested (Ctrl+C), closing connection.')\n ws.close()\n else:\n logger.error(\"The following error occurred:\\n{error}\".format(error=error))\n sys.exit(1)\n\n def on_close(ws):\n \"\"\"Handle the WebSocket close\"\"\"\n logger.info('WebSocket closed.')\n\n def on_open(ws):\n \"\"\"Handle the WebSocket opening\"\"\"\n logger.info('WebSocket open, sending authentication.')\n authenticate(ws, service_account_file, audience)\n ws.send(STATUS_COMMAND_FORMAT.format(status_payload=json.dumps(get_status(router_password, source_port, dest_ip, dest_port))))\n\n return websocket.WebSocketApp(ws_url,\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)" ]
[ "0.7578339", "0.7381387", "0.73584044", "0.727567", "0.7079712", "0.6967358", "0.69503075", "0.6941937", "0.69125676", "0.6901161", "0.68965214", "0.6851598", "0.68342113", "0.68273735", "0.6776803", "0.6750949", "0.6748201", "0.6728414", "0.67280823", "0.6713812", "0.66815436", "0.66737914", "0.6556752", "0.6553084", "0.65497094", "0.6535536", "0.6525553", "0.6516232", "0.65153223", "0.64958584" ]
0.764353
0
Receives the level 2 snapshot and the subsequent updates and updates the orderbook
def update(self, message): try: if message['type'] == 'l2update': if self.snapshot_received: self.l2update(message) else: self.backlog += message['changes'] elif message['type'] == 'snapshot': self.snapshot(message) except Exception as e: raise Exception("Error processing {} OrderBook update: Message -> {}".format(message['product_id'], e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _snapshot(self, msg) -> None:\n\n product_id = msg[\"product_id\"]\n logging.debug(\"Received snapshot for {}\".format(product_id))\n price_precision = \"%.{}f\".format(self.products[product_id])\n\n self.book[product_id] = {}\n\n for book_side in [BOOK_BIDS, BOOK_ASKS]:\n self.book[product_id][book_side] = \\\n {(price_precision % float(level[0])): float(level[1]) for level in msg[book_side]}", "def slot_fulldepth(self, dummy_sender, data):\r\n (depth) = data\r\n self.debug(\"### got full depth, updating orderbook...\")\r\n self.bids = []\r\n self.asks = []\r\n self.total_ask = 0\r\n self.total_bid = 0\r\n if \"error\" in depth:\r\n self.debug(\"### \", depth[\"error\"])\r\n return\r\n for order in depth[\"data\"][\"asks\"]:\r\n price = int(order[\"price_int\"])\r\n volume = int(order[\"amount_int\"])\r\n self._update_total_ask(volume)\r\n self.asks.append(Level(price, volume))\r\n for order in depth[\"data\"][\"bids\"]:\r\n price = int(order[\"price_int\"])\r\n volume = int(order[\"amount_int\"])\r\n self._update_total_bid(volume, price)\r\n self.bids.insert(0, Level(price, volume))\r\n\r\n # update own volume cache\r\n for order in self.owns:\r\n self._update_level_own_volume(\r\n order.typ, order.price, self.get_own_volume_at(order.price, order.typ))\r\n\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n\r\n self._valid_ask_cache = -1\r\n self._valid_bid_cache = -1\r\n self.ready_depth = True\r\n self.signal_fulldepth_processed(self, None)\r\n self.signal_changed(self, None)", "def update_orderbook(self, existing_orderbook_obj, instrument, market_place, market_segment, market_capability, \\\n tick_size_list, round_lot, day_count, orderbook_name, tiering_level, orderbook_curr=None):\n logger.DLOG(\"Updating orderbook...\") \n clone_obj = existing_orderbook_obj.Clone()\n clone_obj.Instrument = instrument\n if orderbook_curr:\n clone_obj.Currency = orderbook_curr\n else:\n clone_obj.Currency = instrument.Currency()\n clone_obj.Quotation = instrument.Quotation()\n clone_obj.MarketPlace = market_place\n clone_obj.RoundLot = self.get_round_lot(instrument, round_lot)\n #clone_obj.PhysicalMarketSegment(market_segment)\n clone_obj.Name = orderbook_name\n clone_obj.QuoteFactor = 1\n clone_obj.TickSizeList = self.get_tick_size_list(tick_size_list, market_capability)\n if str(tiering_level):\n clone_obj.ExternalType = tiering_level\n clone_obj.ExternalId = orderbook_name\n\n try: \n existing_orderbook_obj.Apply(clone_obj)\n existing_orderbook_obj.Commit() \n \n #group_map = self.get_list_leaf(clone_obj,market_segment) \n #if group_map and clone_obj.GroupMaps().IndexOf(group_map) <0 :\n # clone_obj.GroupMaps().Add(group_map) \n # clone_obj.GroupMaps().Commit() \n \n logger.LOG(\"**Successfully** updated orderbook information: <%s> for instrument <%s>\"%(orderbook_name, instrument.Name()))\n except Exception as e:\n logger.ELOG('**Error** while updating OrderBook %s : %s'%(orderbook_name, e))", "def update_order():", "def update_order():", "def _send_order_book_snapshot(state, client, symbol):\n state.lock.acquire()\n\n # Try to find order book corresponding to symbol\n try:\n lob = state.get_current_lob_state(symbol)\n except KeyError as exc:\n state.lock.release()\n # TODO: create error message\n return\n\n messages = []\n # Send sell orders\n if (lob.asks is not None) and (len(lob.asks) > 0):\n for price, order_list in reversed(lob.asks.price_map.items()):\n head_order = order_list.get_head_order()\n for _ in range(0, order_list.length):\n messages.append(_create_add_message_from_order(head_order))\n head_order = head_order.next_order\n\n # Send buy orders\n if (lob.bids is not None) and (len(lob.bids) > 0):\n for price, order_list in reversed(lob.bids.price_map.items()):\n head_order = order_list.get_head_order()\n for _ in range(0, order_list.length):\n messages.append(_create_add_message_from_order(head_order))\n head_order = head_order.next_order\n\n for message in messages:\n message = json.dumps(message)\n messaging.send_data(client.socket, message, client.encoding)\n time.sleep(0.0001)\n\n client.snapshot_sent = True\n\n state.lock.release()", "def order_book_builder(self, data, timestamp, datetime, symbol):\n if isinstance(data[1], list):\n data = data[1]\n # Price, Count, Amount\n bids = {\n str(level[0]): [str(level[1]), str(level[2])]\n for level in data if level[2] > 0\n }\n asks = {\n str(level[0]): [str(level[1]), str(abs(level[2]))]\n for level in data if level[2] < 0\n }\n self.orderbooks[symbol].update({'bids': bids})\n self.orderbooks[symbol].update({'asks': asks})\n self.orderbooks[symbol].update({'timestamp': timestamp})\n self.orderbooks[symbol].update({'datetime': datetime})\n\n else:\n # Example update message structure [1765.2, 0, 1] where we have [price, count, amount].\n # Update algorithm pseudocode from Bitfinex documentation:\n # 1. - When count > 0 then you have to add or update the price level.\n # 1.1- If amount > 0 then add/update bids.\n # 1.2- If amount < 0 then add/update asks.\n # 2. - When count = 0 then you have to delete the price level.\n # 2.1- If amount = 1 then remove from bids\n # 2.2- If amount = -1 then remove from asks\n data = data[1:]\n data = [str(data[0]), str(data[1]), str(data[2])]\n if int(data[1]) > 0: # 1.\n\n if float(data[2]) > 0: # 1.1\n self.orderbooks[symbol]['bids'].update({data[0]: [data[1], data[2]]})\n\n elif float(data[2]) < 0: # 1.2\n self.orderbooks[symbol]['asks'].update({data[0]: [data[1], str(abs(float(data[2])))]})\n\n elif data[1] == '0': # 2.\n\n if data[2] == '1': # 2.1\n if self.orderbooks[symbol]['bids'].get(data[0]):\n del self.orderbooks[symbol]['bids'][data[0]]\n\n elif data[2] == '-1': # 2.2\n if self.orderbooks[symbol]['asks'].get(data[0]):\n del self.orderbooks[symbol]['asks'][data[0]]", "def sync(self):\n\n new_book = {}\n update_list = [self.book[WAIT_OPEN], self.book[OPEN]]\n\n for status, booklet in self.book.items():\n new_book[status] = {}\n\n for status, booklet in self.book.items():\n for pos_id, position in booklet.items():\n\n position.update()\n new_status = position.status\n\n if status == new_status:\n new_book[status][pos_id] = position\n else:\n new_book[new_status][pos_id] = position\n\n self.book = new_book", "async def _book(self, msg: dict, timestamp: float):\n # PERF perf_start(self.id, 'book_msg')\n\n delta = {BID: [], ASK: []}\n # if we reset the book, force a full update\n forced = False\n pair = self.exchange_symbol_to_std_symbol(msg['data'][0]['symbol'])\n if not self.partial_received[pair]:\n # per bitmex documentation messages received before partial\n # should be discarded\n if msg['action'] != 'partial':\n return\n self.partial_received[pair] = True\n forced = True\n\n if msg['action'] == 'partial':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n price = Decimal(data['price'])\n size = Decimal(data['size'])\n order_id = data['id']\n\n self._l2_book[pair][side][price] = size\n self.order_id[pair][side][order_id] = price\n elif msg['action'] == 'insert':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n price = Decimal(data['price'])\n size = Decimal(data['size'])\n order_id = data['id']\n\n self._l2_book[pair][side][price] = size\n self.order_id[pair][side][order_id] = price\n delta[side].append((price, size))\n elif msg['action'] == 'update':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n update_size = Decimal(data['size'])\n order_id = data['id']\n\n price = self.order_id[pair][side][order_id]\n\n self._l2_book[pair][side][price] = update_size\n self.order_id[pair][side][order_id] = price\n delta[side].append((price, update_size))\n elif msg['action'] == 'delete':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n order_id = data['id']\n\n delete_price = self.order_id[pair][side][order_id]\n del self.order_id[pair][side][order_id]\n del self._l2_book[pair][side][delete_price]\n delta[side].append((delete_price, 0))\n\n else:\n LOG.warning(\"%s: Unexpected l2 Book message %s\", self.id, msg)\n return\n # PERF perf_end(self.id, 'book_msg')\n # PERF perf_log(self.id, 'book_msg')\n\n await self.book_callback(self._l2_book[pair], L2_BOOK, pair, forced, delta, timestamp, timestamp)", "def _update_level_own_volume(self, typ, price, own_volume):\r\n\r\n if price == 0:\r\n # market orders have price == 0, we don't add them\r\n # to the orderbook, own_volume is meant for limit orders.\r\n # Also a price level of 0 makes no sense anyways, this\r\n # would only insert empty rows at price=0 into the book\r\n return\r\n\r\n (index, level) = self._find_level_or_insert_new(typ, price)\r\n if level.volume == 0 and own_volume == 0:\r\n if typ == \"ask\":\r\n self.asks.pop(index)\r\n else:\r\n self.bids.pop(index)\r\n else:\r\n level.own_volume = own_volume", "def update_OpenOrders(self, market):\n mid = self.marketid(market)\n o_orders = self.Request.fetch('marketorders',params={'marketid':mid})\n ##check the form of o_orders\n \n print o_orders\n #self.OpenOrders[self.Pairs[mid]] = \n return 0", "def bin_book_update(binfile, book):\n trade_update_fmt = \"II\"\n trade_update_data = [0, 0]\n order_book_level_fmt = \"IIIIII\"\n levels = [\n (book.bid[-(i+1)].price * DECIMAL_CONVERT,\n book.bid[-(i+1)].qty,\n book.bid[-(i+1)].order_count,\n book.offer[i].price * DECIMAL_CONVERT,\n book.offer[i].qty,\n book.offer[i].order_count) for i in range(5)]\n order_book_level_data = []\n for data in levels:\n order_book_level_data += list(data)\n order_book_level_data = [int(v) for v in order_book_level_data]\n valids_fmt = \"I\"\n valids_data = [2]\n the_data = [now_nanos(), book.security] + \\\n trade_update_data + order_book_level_data + valids_data\n data = struct.pack(\"<QI\" + trade_update_fmt + order_book_level_fmt * 5 + valids_fmt,\n *the_data)\n binfile.write(data)", "def slot_orderbook_changed(self, _sender, _data):\r\n self.change_type = TYPE_ORDERBOOK\r\n self.do_paint()\r\n self.change_type = None", "def _update_book(self, typ, price, total_vol):\r\n (lst, index, level) = self._find_level(typ, price)\r\n if total_vol == 0:\r\n if level == None:\r\n return False\r\n else:\r\n voldiff = -level.volume\r\n lst.pop(index)\r\n else:\r\n if level == None:\r\n voldiff = total_vol\r\n level = Level(price, total_vol)\r\n lst.insert(index, level)\r\n else:\r\n voldiff = total_vol - level.volume\r\n if voldiff == 0:\r\n return False\r\n level.volume = total_vol\r\n\r\n # now keep all the other stuff in sync with it\r\n self.last_change_type = typ\r\n self.last_change_price = price\r\n self.last_change_volume = voldiff\r\n if typ == \"ask\":\r\n self._update_total_ask(voldiff)\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n self._valid_ask_cache = min(self._valid_ask_cache, index - 1)\r\n else:\r\n self._update_total_bid(voldiff, price)\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n self._valid_bid_cache = min(self._valid_bid_cache, index - 1)\r\n\r\n return True", "async def _check_order_update(self, *args, **kwargs):\n order_nos = list(self._orders.keys())\n if not order_nos:\n return\n for order_no in order_nos:\n success, error = await self._rest_api.get_order_status(order_no)\n if error:\n return\n await self._update_order(success[\"data\"][0])", "def test_updating_the_po(self):\n print '\\n'\n logger.debug('Updating PO')\n print '\\n'\n \n #Verifying po in database\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n self.assertEqual(self.po.grand_total, Decimal('129.58'))\n self.assertEqual(timezone('Asia/Bangkok').normalize(self.po.order_date).date(), datetime.datetime.now().date())\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.quantity, 10)\n self.assertEqual(item.total, Decimal('121.1'))\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n del modified_po_data['items'][1]\n modified_po_data['id'] = 1\n modified_po_data['items'][0]['id'] = 1\n modified_po_data['items'][0]['comments'] = 'test change'\n modified_po_data['items'][0]['quantity'] = 3\n modified_po_data['items'][0]['description'] = 'test description change'\n\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po_data)\n \n #Verify the response\n self.assertEqual(resp.status_code, 200, msg=resp)\n po = resp.data\n self.assertEqual(po['id'], 1)\n self.assertEqual(po['supplier']['id'], 1)\n self.assertEqual(po['vat'], 7)\n self.assertEqual(Decimal(po['grand_total']), Decimal('38.87'))\n self.assertEqual(po['discount'], 0)\n self.assertEqual(po['revision'], 1)\n self.assertEqual(len(po['items']), 1)\n #self.assertEqual(po['status'], 'PAID')\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(po['pdf']['url'])\n \n item2 = po['items'][0]\n \n self.assertEqual(item2['id'], 1)\n self.assertEqual(item2['quantity'], Decimal('3.0000000000'))\n self.assertEqual(item2['comments'], 'test change')\n self.assertEqual(item2['description'], 'test description change')\n self.assertEqual(Decimal(item2['unit_cost']), Decimal('12.11'))\n self.assertEqual(Decimal(item2['total']), Decimal('36.33'))\n \n #Verify database record\n po = PurchaseOrder.objects.get(pk=1)\n \n self.assertEqual(po.supplier.id, 1)\n #self.assertEqual(timezone('Asia/Bangkok').normalize(po.order_date), datetime.datetime.now().date())\n self.assertEqual(po.vat, 7)\n self.assertEqual(po.grand_total, Decimal('38.87'))\n self.assertEqual(po.items.count(), 1)\n \n item2 = po.items.all().order_by('id')[0]\n self.assertEqual(item2.id, 1)\n self.assertEqual(item2.description, 'test description change')\n self.assertEqual(item2.comments, 'test change')\n self.assertEqual(item2.quantity, 3)\n self.assertEqual(item2.unit_cost, Decimal('12.11'))\n self.assertEqual(item2.total, Decimal('36.33'))", "def put(self, order_id):\n body = request.get_json()\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if order is None:\n return 'Order id not found', 400\n borrower = body.get('borrower')\n borrower = query_user_by_name(borrower)\n if borrower is None:\n return 'User does not exit in the system', 404\n # if invalid_user(borrower.username):\n # return 'Unauthorized user, please login as a user/borrower', 401\n copy_id = body.get('copy_id')\n print(body)\n print(copy_id)\n copy = db.session.query(models.Copy).filter_by(id=copy_id).first()\n if copy is None:\n return 'Copy ID {} not found in system'.format(copy_id), 409\n elif copy.id != copy_id and copy.status == BOOK_COPY_STATUS_UNAVAILABLE:\n return 'The copy of the book is not available', 400\n copy_owner = body.get('copy_owner')\n owner = query_user_by_name(copy_owner)\n if owner is None:\n return 'Copy owner not found in the system'.format(copy_owner), 409\n # return_date = body.get('return_date')\n # if datetime.strptime(return_date, \"%y%m%d\") < datetime.strptime(datetime.utcnow().strftime(\"%Y-%m-%d\"), \"%y%m%d\"):\n # return 'Return date should be later than today', 400\n status = body.get('order_status')\n if status is not None and status < 0 or status > 4:\n return 'Status should between 0-4', 400\n order.parse_body_status(body)\n copy = db.session.query(models.Copy).filter_by(id=order.copy).first()\n if order.status == ORDER_STATUS_COMPLETED or order.status == ORDER_STATUS_DECLINED:\n copy.status = BOOK_COPY_STATUS_AVAILABLE\n else:\n copy.status = BOOK_COPY_STATUS_UNAVAILABLE\n db.session.commit()\n return order.serialize(), 200", "def update_book(self):\n while self.lowest_sell is not None and self.highest_buy is not None and self.lowest_sell <= self.highest_buy:\n sell = self.sell_levels[self.lowest_sell].head_order\n buy = self.buy_levels[self.highest_buy].head_order\n self.execute_trade(sell, buy)", "def test_updating_to_receive_items(self):\n modified_po = copy.deepcopy(base_purchase_order)\n del modified_po['items'][1]\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'RECEIVED'\n modified_po['status'] = 'RECEIVED'\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.put('/api/v1/purchase-order/1/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['id'], 1)\n self.assertEqual(po_data['status'], 'RECEIVED')\n \n item1 = po_data['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['status'], 'RECEIVED')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.id, 1)\n self.assertEqual(po.status, 'RECEIVED')\n for item in po.items.all():\n self.assertEqual(item.status, \"RECEIVED\")\n \n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 20)\n log = Log.objects.all().order_by('-id')[0]\n self.assertEqual(log.action, \"ADD\")\n self.assertEqual(log.quantity, 10)\n self.assertEqual(log.supplier.id, 1)\n self.assertEqual(log.message, \"Received 10m of Pattern: Maxx, Col: Blue from Zipper World\")", "def test_updating_the_supply_price(self):\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.unit_cost, Decimal('12.11'))\n self.assertEqual(Log.objects.all().count(), 0)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['items'][0]['unit_cost'] = Decimal('10.05')\n modified_po['items'][0]['id'] = 1\n modified_po['status'] = 'PROCESSED'\n del modified_po['items'][1]\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n self.assertEqual(resp.status_code, 200, msg=resp)\n resp_obj = resp.data\n self.assertEqual(resp_obj['revision'], 1)\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(resp_obj['pdf']['url'])\n \n self.assertEqual(resp_obj['id'], 1)\n self.assertEqual(resp_obj['supplier']['id'], 1)\n self.assertEqual(resp_obj['vat'], 7)\n self.assertEqual(resp_obj['discount'], 0)\n self.assertEqual(resp_obj['revision'], 1)\n self.assertEqual(Decimal(resp_obj['grand_total']), Decimal('107.54'))\n self.assertEqual(len(resp_obj['items']), 1)\n item1 = resp_obj['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['quantity'], Decimal('10.0000000000'))\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('10.05'))\n self.assertEqual(Decimal(item1['total']), Decimal('100.50'))\n \n #Confirm cost change for item and supply in the database\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.grand_total, Decimal('107.54'))\n item1 = po.items.order_by('id').all()[0]\n self.assertEqual(item1.id, 1)\n self.assertEqual(item1.quantity, 10)\n self.assertEqual(item1.unit_cost, Decimal('10.05'))\n supply = item1.supply\n supply.supplier = po.supplier\n self.assertEqual(supply.cost, Decimal('10.05'))\n \n self.assertEqual(Log.objects.all().count(), 1)\n log = Log.objects.all()[0]\n self.assertEqual(log.cost, Decimal('10.05'))\n self.assertEqual(log.supply, supply)\n self.assertEqual(log.supplier, po.supplier)\n self.assertEqual(log.message, \"Price change from 12.11USD to 10.05USD for Pattern: Maxx, Col: Blue [Supplier: Zipper World]\")\n\n # Confirm that there is still only one product for this supply and supplier\n # in the database\n products = Product.objects.filter(supply=supply, supplier=po.supplier)\n self.assertEqual(len(products), 1)", "def order_book(self, order_details):\n order_date = datetime.date.today()\n self.cursor.execute(\"INSERT INTO orderlog (loginID, orderDate) VALUES (%s, %s)\",\n (order_details['loginID'], order_date))\n order_id = self.cursor.lastrowid\n for i in range(len(order_details['ISBN'])):\n self.cursor.execute(\"INSERT INTO productof Values (%s, %s, %s)\",\n (order_details['ISBN'][i], order_id, order_details['quantity'][i]))\n self.cursor.execute(\"UPDATE book SET stock=stock-%s WHERE ISBN=%s\",\n (order_details['quantity'][i], order_details['ISBN'][i]))\n self.db.commit()\n return order_id", "def subscribe_order_book(self, symbol, update_handler=None, **kwargs):\n pass", "async def _order_book_snapshot_router(self):\n while True:\n try:\n ob_message: OrderBookMessage = await self._order_book_snapshot_stream.get()\n trading_pair: str = ob_message.trading_pair\n if trading_pair not in self._tracking_message_queues:\n continue\n message_queue: asyncio.Queue = self._tracking_message_queues[trading_pair]\n await message_queue.put(ob_message)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unknown error. Retrying after 5 seconds.\", exc_info=True)\n await asyncio.sleep(5.0)", "async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"ORDER_TRADE_UPDATE\": # Order update.\n self._update_order(msg[\"o\"])", "def add_order(self, order):\n if order.is_bid:\n if order.price in self.buy_levels:\n limit = self.buy_levels[order.price]\n if limit.size == 0:\n self.buy_tree.size += 1\n limit.add(order)\n self.buy_map[order.uid] = order\n order.parent_limit = limit\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.buy_map[order.uid] = order\n self.buy_tree.insert(limit)\n self.buy_tree.size += 1\n self.buy_levels[order.price] = limit\n order.parent_limit = self.buy_levels[order.price]\n if self.highest_buy is None or order.price > self.highest_buy:\n self.highest_buy = order.price\n else:\n if order.price in self.sell_levels:\n limit = self.sell_levels[order.price]\n if limit.size == 0:\n self.sell_tree.size += 1\n limit.add(order)\n self.sell_map[order.uid] = order\n order.parent_limit = self.sell_levels[order.price]\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.sell_map[order.uid] = order\n self.sell_tree.insert(limit)\n self.sell_tree.size += 1\n self.sell_levels[order.price] = limit\n order.parent_limit = self.sell_levels[order.price]\n if self.lowest_sell is None or order.price < self.lowest_sell:\n self.lowest_sell = order.price\n self.update_book()", "def update(self, context, data):\n self.context = context\n self.data = data\n\n dt = get_datetime()\n\n for tkt, bo in self._d_orders['trades'].items():\n price = self.data[bo.symbol].price\n bo.update(price, dt)", "def m_ps_FieldsUpdated(self, sender, e):\r\n if e.Error == None:\r\n # Make sure that there is a valid bid\r\n if e.Fields.GetBestBidPriceField().HasValidValue:\r\n if self.m_orderKey == \"\":\r\n # If there is no order working, submit one through the first valid order feed.\r\n # You should use the order feed that is valid for your purposes.\r\n op = ttapi.OrderProfile(e.Fields.Instrument.GetValidOrderFeeds()[0], e.Fields.Instrument)\r\n op.BuySell = ttapi.BuySell.Buy\r\n op.AccountName = \"12345678\"\r\n op.AccountType = ttapi.AccountType.A1\r\n op.OrderQuantity = ttapi.Quantity.FromInt(e.Fields.Instrument, 1)\r\n op.OrderType = ttapi.OrderType.Limit\r\n op.LimitPrice = e.Fields.GetBestBidPriceField().Value\r\n if not self.m_ts.SendOrder(op):\r\n print(\"Send new order failed. {0}\".format(op.RoutingStatus.Message))\r\n self.Dispose()\r\n else:\r\n self.m_orderKey = op.SiteOrderKey\r\n print(\"Send new order succeeded.\")\r\n elif self.m_ts.Orders.ContainsKey(self.m_orderKey) and self.m_ts.Orders[self.m_orderKey].LimitPrice != e.Fields.GetBestBidPriceField().Value:\r\n # If there is a working order, reprice it if its price is not the same as the bid\r\n op = self.m_ts.Orders[self.m_orderKey].GetOrderProfile()\r\n op.LimitPrice = e.Fields.GetBestBidPriceField().Value\r\n op.Action = ttapi.OrderAction.Change\r\n if not self.m_ts.SendOrder(op):\r\n print(\"Send change order failed. {0}\".format(op.RoutingStatus.Message))\r\n else:\r\n print(\"Send change order succeeded.\")\r\n else:\r\n if e.Error.IsRecoverableError == False:\r\n print(\"Unrecoverable price subscription error: {0}\".format(e.Error.Message))\r\n self.Dispose()", "def _add_own(self, order):\r\n if not self.have_own_oid(order.oid):\r\n self.owns.append(order)\r\n\r\n # update own volume in that level:\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )", "async def on_order_updated(self, order: MetatraderOrder):\n for i in range(len(self._orders)):\n if self._orders[i]['id'] == order['id']:\n self._orders[i] = order\n break\n else:\n self._orders.append(order)", "def subscribe_user_orders(self, update_handler):\n pass" ]
[ "0.6120915", "0.60521984", "0.60405105", "0.5957573", "0.5957573", "0.5955088", "0.59534407", "0.57923406", "0.5773802", "0.574019", "0.56705797", "0.56300354", "0.5538148", "0.55151826", "0.5482091", "0.5431966", "0.5430278", "0.54130715", "0.5404102", "0.5397799", "0.5380575", "0.5376238", "0.53519374", "0.5331502", "0.5329531", "0.53237534", "0.53153956", "0.52909106", "0.52894837", "0.5273043" ]
0.69247013
0
This is the function to send the robot the new target We publish the information of the the target position of the robot to topic /move_base/goal. Message type is MoveBaseActionGoal
def send_destination(self): print('send the target to the robot') move_base_action_goal=MoveBaseActionGoal() move_base_action_goal.goal.target_pose.header.frame_id="map" move_base_action_goal.goal.target_pose.pose.orientation.w=1 move_base_action_goal.goal.target_pose.pose.position.x=self.x_des move_base_action_goal.goal.target_pose.pose.position.y=self.y_des print('des_x='+str(self.x_des)) print('des_y='+str(self.y_des)) self.des_pub.publish(move_base_action_goal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _go_to_goal(self, new_goal):\n\n # Waits until the action server has started up and started\n # listening for goals.\n rospy.loginfo(\"Waiting for move_base to come up . . . \")\n self._MoveBaseClient.wait_for_server()\n rospy.loginfo(\"move_base is UP!\")\n\n # Wait for tf_listener to be ready.\n # NOTE: I'm not sure this is required anymore or not\n # If you call self.tf_listener too soon it has no data in the listener buffer!\n # http://answers.ros.org/question/164911/move_base-and-extrapolation-errors-into-the-future/\n # We could put a static delay in here, but this is faster.\n rospy.loginfo(\"Waiting for tf_listener to be ready . . . \")\n tf_listener_ready = False\n # http://wiki.ros.org/tf2/Tutorials/Writing%20a%20tf2%20listener%20%28Python%29\n while not tf_listener_ready:\n try:\n self.tf_Buffer.lookup_transform(\"map\", \"base_link\", rospy.Time())\n tf_listener_ready = True\n except tf2_ros.ExtrapolationException:\n rospy.loginfo(\"tf_listener not ready . . . \")\n rospy.sleep(0.1)\n rospy.loginfo(\"tf_listener READY!\")\n\n # Create a variable to hold our goal\n goal = move_base_msgs.msg.MoveBaseGoal()\n # Note that move_base will not go to an all zero target.\n\n # we'll create a goal to send to move_base\n # If you are just sending commands to the robot with no map use base_link\n # goal.target_pose.header.frame_id = \"base_link\"\n # But if you have SLAM or Localization active and are using a map, you need to use the map!\n goal.target_pose.header.frame_id = \"map\"\n\n goal.target_pose.pose = new_goal.pose\n\n rospy.loginfo(\"Populated goal.\")\n\n #######################################\n # This is the part that sends the goal!#\n #######################################\n\n rospy.loginfo(\"Sending goal\")\n # Sends the goal to the action server.\n result = -1\n timeoutSeconds = 60 # TODO: Should this be sent as part of the call?\n if not rospy.is_shutdown():\n self._MoveBaseClient.cancel_goals_at_and_before_time(rospy.Time.now())\n # NOTE: Do not use cancel_all_goals here as it can cancel future goals sometimes!\n goal.target_pose.header.stamp = rospy.Time.now()\n if not rospy.is_shutdown():\n self._MoveBaseClient.send_goal(goal)\n count = 0\n finished = False\n # Wait for action to finish or timeout to run out\n # Use double timeout, but cancel if timeout is met\n while (\n count < (timeoutSeconds * 2)\n and not finished\n and not rospy.is_shutdown()\n ):\n if count > timeoutSeconds:\n finished = True\n rospy.loginfo(\n \"Time-out reached while attempting to reach goal, canceling!\"\n )\n # NOTE: If the robot tends to get stuck without moving at all,\n # 1. Subscribe to cmd_vel\n # 2. Increment a timer.\n # 3. Zero it out whenever cmd_vel is updated.\n # 4. Cancel this if the timer gets too high.\n count += 1\n rospy.sleep(\n 1\n ) # Set this delay as you see fit. If the robot is extremely fast this could be slowing you down!\n result = self._MoveBaseClient.get_state()\n resultText = \"\"\n # http://docs.ros.org/indigo/api/actionlib_msgs/html/msg/GoalStatus.html\n if result == GoalStatus.PENDING:\n resultText = \"PENDING\"\n if result == GoalStatus.ACTIVE:\n resultText = \"ACTIVE\"\n if result == GoalStatus.PREEMPTED:\n finished = True\n resultText = \"PREEMPTED\"\n if result == GoalStatus.SUCCEEDED:\n finished = True\n resultText = \"SUCCEEDED\"\n if result == GoalStatus.ABORTED:\n finished = True\n resultText = \"ABORTED\"\n if result == GoalStatus.REJECTED:\n finished = True\n resultText = \"REJECTED\"\n if result == GoalStatus.PREEMPTING:\n resultText = \"PREEMPTING\"\n if result == GoalStatus.RECALLING:\n resultText = \"RECALLING\"\n if result == GoalStatus.RECALLED:\n finished = True\n resultText = \"RECALLED\"\n if result == GoalStatus.LOST:\n finished = True\n resultText = \"LOST\"\n rospy.loginfo(\n \"Pending result:\"\n + str(result)\n + \" \"\n + resultText\n + \" Time-out in :\"\n + str(timeoutSeconds - count)\n )\n # If it was determined that we are \"finished\" then cancel\n # any pending goals right now, because the loop will not\n # repeat.\n if finished:\n # NOTE: Do not use cancel_all_goals here as it can cancel future goals sometimes!\n self._MoveBaseClient.cancel_goal()\n\n trans = self.tf_Buffer.lookup_transform(\"map\", \"base_link\", rospy.Time())\n\n rospy.loginfo(\"New Position: \")\n rospy.loginfo(str(trans.transform.translation))\n rospy.loginfo(\" New Orientation: \")\n rospy.loginfo(str(trans.transform.rotation))\n\n if result == GoalStatus.SUCCEEDED:\n return True\n else:\n return False", "def Run(self):\n # Waits until the action server has started up and started\n # listening for goals.\n self._MoveBaseClient.wait_for_server()\n rospy.loginfo(\"move_base is UP!\")\n\n goal = move_base_msgs.msg.MoveBaseGoal()\n # print(\"Empty goal:\")\n # print(goal)\n # Note that move_base will not go to an all zero target.\n\n # Grab a static copy of the current pose to work with\n # Otherwise it might change under our feet!\n \"\"\"\n Note, the actual pose on the map is not the same as this,\n but there is not map based pose.\n What there is the odometry based pose, and then a transform\n from the odometry to the map.\n Retriving the transform, combining it with the odom pose\n and making use of it is a future exercise.\n \"\"\"\n current_odom = self.currentOdom\n # print(\"Current odom:\")\n # print(current_odom)\n print(\"current_odom.pose:\")\n print(current_odom.pose)\n # rospy.Subscriber(\"cmd_vel\", Twist, self._HandleVelocityCommand)\n\n rosNow = rospy.Time.now()\n # we'll create a goal to send to move_base\n goal.target_pose.header.frame_id = \"base_link\"\n goal.target_pose.header.stamp = rosNow\n\n # This will move forward 1 meter from 0\n # goal.target_pose.pose.position.x = 0.0\n # goal.target_pose.pose.orientation.w = 1.0\n\n # Set new pose to same as current pose\n \"\"\"\n You have to set .position and .orientation,\n not .pose because the current_odom.pose\n includes covariance, the other cannot take\n \"\"\"\n goal.target_pose.pose.position = current_odom.pose.pose.position\n goal.target_pose.pose.orientation = current_odom.pose.pose.orientation\n \"\"\"\n If the odometry, which is tied to /base_link, was identical\n to the map location, this would tell it to go nowhere,\n but what we actually end up doing here is telling move_base\n to move the robot the difference between the odom (/base_link)\n and the map. :)\n \"\"\"\n \"\"\"\n a quick and easy way to get the transform from the /map to /base_link is to use the command-line tool:\n rosrun tf tf_echo /map /base_link\n So how do I combine this myself?\n \"\"\"\n\n # Rotate currentPose by 90 degrees\n quaternion_difference = tf2_ros.transformations.quaternion_about_axis(\n 0.123, (1, 0, 0)\n )\n # print(\"quaternion_difference:\")\n # print(quaternion_difference)\n\n print(\"Populated goal:\")\n print(goal.target_pose.pose)\n\n rospy.loginfo(\"Sending goal\")\n # Sends the goal to the action server.\n self._MoveBaseClient.send_goal(goal)\n\n rospy.loginfo(\"Waiting for response . . .\")\n # Waits for the server to finish performing the action.\n self._MoveBaseClient.wait_for_result()\n # This could wait a VERY long time,\n # if the move_base doesn't have a timeout it will never come back,\n # in most cases it does, but it seems in some cases it will retry forever.\n # http://docs.ros.org/api/actionlib/html/classactionlib_1_1simple__action__client_1_1SimpleActionClient.html#a460c9f52fd650f918cb287765f169445\n\n result = self._MoveBaseClient.get_result()\n # rospy.loginfo(result)\n result = self._MoveBaseClient.get_state()\n # rospy.loginfo(result)\n\n current_odom = self.currentOdom\n print(\"New odom:\")\n print(current_odom.pose)\n\n rospy.loginfo(\"Ok, now what?\")", "def publish_goal(self, pose):\n # type: (Pose) -> None\n # Elias way\n # client = actionlib.SimpleActionClient('move_base', MoveBaseAction)\n # client.wait_for_server()\n # self.goal = MoveBaseGoal()\n # self.goal.target_pose.header.frame_id = \"map\"\n # self.goal.target_pose.header.stamp = rospy.Time.now()\n # self.goal.target_pose.pose = pose\n\n # client.send_goal(self.goal)\n # wait = client.wait_for_result()\n # if not wait: \n # rospy.logerr(\"Action server not available!\")\n # rospy.signal_shutdown(\"Action server not available!\")\n\n \n\n\n # arena-rosnav way\n print(\"test\")\n self._global_path = Path()\n self._old_global_path_timestamp = self._global_path.header.stamp\n goal = PoseStamped()\n goal.header.stamp = rospy.Time.now()\n goal.header.frame_id = \"map\"\n goal.pose = pose\n self._goal_pub.publish(goal)\n # added by Elias for communication with move_base\n #self.pub_mvb_goal.publish(goal)", "def set_goal(self, robot_id, task, pub_msg): \n pub_names = self.goal_pubs.keys()\n pub_objs = self.goal_pubs.values()\n for i in range(len(pub_names)):\n if robot_id == int(pub_names[i]):\n Goal = MoveBaseActionGoal()\n Goal.header.stamp = rospy.Time.now()\n Goal.header.frame_id = ''\n Goal.goal_id.stamp = rospy.Time.now()\n Goal.goal_id.id = str(int(task[0]))\n Goal.goal.target_pose.header.stamp = rospy.Time.now()\n Goal.goal.target_pose.header.frame_id = 'map'\n Goal.goal.target_pose.pose.position.x = task[1]\n Goal.goal.target_pose.pose.position.y = task[2]\n z_rot_rad = task[3] * np.pi / 180\n q = quaternion_from_euler(0, 0, z_rot_rad)\n Goal.goal.target_pose.pose.orientation.z = q[2]\n Goal.goal.target_pose.pose.orientation.w = q[3]\n pub_obj = pub_objs[i]\n pub_obj.publish(Goal)\n print(\"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \".\")\n msg_str = \"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \". Time: %s\" % rospy.Time.now().to_sec()\n pub_msg.publish(msg_str)\n break\n else:\n pass", "def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)", "def call_move_base(self, x, y):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.pose.position.x = x\n goal.target_pose.pose.position.y = y\n goal.target_pose.pose.position.z = 0\n goal.target_pose.pose.orientation.z = 1\n self.moveBaseClient.send_goal(goal)\n rospy.loginfo(self.name + \": driving\")", "def on_goal(self, goal):\n\n # Python2 way to enforce type checking\n if not isinstance(goal, TeleopGoal):\n result = TeleopResult()\n rospy.logerr(\"Unknown goal received\")\n self._server.set_aborted(result) # Returns failure\n return\n\n self.executing_goal = True\n\n msg = Twist()\n\n operation = goal.operation\n duration = goal.duration\n\n rospy.loginfo(\n \"new action: %s, duration: %f\",\n operation,\n duration,\n )\n\n if operation == TeleopGoal.MOVE_FORWARD_OPERATION:\n msg.linear.x = 1\n self.wheel_instructions.publish(msg)\n\n elif operation == TeleopGoal.MOVE_BACKWARD_OPERATION:\n msg.linear.x = -1\n self.wheel_instructions.publish(msg)\n\n elif operation == TeleopGoal.ROTATE_LEFT_OPERATION:\n msg.angular.z = 1\n self.wheel_instructions.publish(msg)\n\n elif operation == TeleopGoal.ROTATE_RIGHT_OPERATION:\n msg.angular.z = -1\n self.wheel_instructions.publish(msg)\n\n elif operation == TeleopGoal.RAISE_FRONT_ARM_OPERATION:\n self.front_arm_instructions.publish(1.0)\n\n elif operation == TeleopGoal.LOWER_FRONT_ARM_OPERATION:\n self.front_arm_instructions.publish(-1.0)\n\n elif operation == TeleopGoal.RAISE_BACK_ARM_OPERATION:\n self.back_arm_instructions.publish(1.0)\n\n elif operation == TeleopGoal.LOWER_BACK_ARM_OPERATION:\n self.back_arm_instructions.publish(-1.0)\n\n elif operation == TeleopGoal.DIG_FRONT_DRUM_OPERATION:\n self.front_drum_instructions.publish(1.0)\n\n elif operation == TeleopGoal.DUMP_FRONT_DRUM_OPERATION:\n self.front_drum_instructions.publish(-1.0)\n\n elif operation == TeleopGoal.DIG_BACK_DRUM_OPERATION:\n self.back_drum_instructions.publish(1.0)\n\n elif operation == TeleopGoal.DUMP_BACK_DRUM_OPERATION:\n self.back_drum_instructions.publish(-1.0)\n\n else:\n # Otherwise, we must be stopping\n self.wheel_instructions.publish(ALL_STOP)\n self.front_arm_instructions.publish(0.0)\n self.back_arm_instructions.publish(0.0)\n self.front_drum_instructions.publish(0.0)\n self.back_drum_instructions.publish(0.0)\n\n # Time counter\n self._counter = 0\n\n # Start new timer for operation\n rospy.Timer(\n rospy.Duration(duration),\n self.execution_timer_callback,\n oneshot=True,\n )\n t0 = time.time()\n\n # Feedback\n while not rospy.is_shutdown() and self.executing_goal:\n\n elapsed = time.time() - t0\n rospy.loginfo(\n \"operation: %s, duration: %f, elapsed: %f\",\n operation,\n duration,\n elapsed,\n )\n\n if self._server.is_preempt_requested():\n rospy.loginfo(\"Preempted\")\n self.executing_goal = False\n self._server.set_preempted(result)\n return\n\n feedback = TeleopFeedback()\n feedback.x = self.x\n feedback.y = self.y\n feedback.heading = str(\"{} degrees\".format(self.heading))\n try:\n self._server.publish_feedback(feedback)\n except rospy.ROSException:\n self._server.set_aborted(\n None, text=\"Unable to publish feedback. Has ROS stopped?\"\n )\n return\n\n try:\n # Stop the robot after every action\n self.wheel_instructions.publish(ALL_STOP)\n self.front_arm_instructions.publish(0.0)\n self.back_arm_instructions.publish(0.0)\n self.front_drum_instructions.publish(0.0)\n self.back_drum_instructions.publish(0.0)\n except rospy.ROSException:\n self._server.set_aborted(\n None, text=\"Unable to publish all stop. Has ROS stopped?\"\n )\n return\n\n # Result\n result = TeleopResult()\n result.x = self.x\n result.y = self.y\n\n # Return success result to the client\n rospy.loginfo(\"Success\")\n self._server.set_succeeded(result)", "def move_base(self, x, y, z):\n # fill ROS message\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n traj = trajectory_msgs.msg.JointTrajectory()\n traj.joint_names = [\"odom_x\", \"odom_y\", \"odom_t\"]\n p = trajectory_msgs.msg.JointTrajectoryPoint()\n p.positions = [x, y, z]\n p.velocities = [0, 0, 0]\n p.time_from_start = rospy.Time(15)\n traj.points = [p]\n goal.trajectory = traj\n\n # send message to the action server\n self.cli.send_goal(goal)\n\n # wait for the action server to complete the order\n self.cli.wait_for_result()\n return self._running", "def make_goal(self):\n\n # TODO: prevent calculating positions too close to a wall\n # TODO: visualize this process better\n # TODO: publish goal in map frame to prevent errors if the robot moves in time\n global ms\n rospy.loginfo('Calculating navigation goal')\n\n if self.objective == 'discovery':\n dest = ms.dis_pt()\n elif self.objective == 'delivery':\n dest = ms.del_pt()\n\n # Transform can location into base_link\n pt = PointStamped(header=Header(stamp=rospy.Time(0), frame_id='map'), point=dest)\n self.destination = tf_listener.transformPoint(\"base_link\", pt).point # w.r.t self\n\n x, y = self.destination.x, self.destination.y\n theta = math.atan2(y, x)\n\n if self.objective == 'discovery':\n r = 1.0 # 1m back from target position\n elif self.objective == 'delivery':\n r = 0.5 # 0.5m back from target position, i.e. \"not eternally far away that it seems like a failure\"\n\n x -= r * math.cos(theta)\n y -= r * math.sin(theta)\n\n angle = Quaternion(0, 0, math.sin(theta / 2), math.cos(theta / 2))\n\n dest = PoseStamped(\n header=Header(frame_id='base_link'),\n pose=Pose(position=Point(x=x, y=y, z=0), orientation=angle))\n\n goal = MoveBaseGoal(target_pose=dest)\n\n return goal", "def send_feedback(self, goal):\n fb = MoveRobotFeedback(actionID=goal.actionID,\n current_depth=self.curr_depth,\n current_heading=self.curr_heading)\n self._as.publish_feedback(fb)", "def apply_action(self, action):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n robot_x = robot_state.pose.position.x\n robot_y = robot_state.pose.position.y\n # Set the distance moved in an action such that it is at least as large as the\n # minimum distance that would let a robot in the middle of the goal go to either side\n #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5)\n if action == Learn.MOVE_LEFT:\n print(\"Move left\")\n self.set_robot(robot_x, robot_y+self.move_dist)\n elif action == Learn.MOVE_RIGHT:\n print(\"Move right\")\n self.set_robot(robot_x, robot_y-self.move_dist)\n else:\n print(\"Stay put\")", "def _do_mc_action(self):\n goal = self._current_mc_goal\n self._position_control_client.send_goal(\n goal,\n done_cb = self._motion_control_callback\n )", "def move(self, agent, action):\n\t\tpass", "def __move(self):\n if self.goal is None:\n if self.tasks:\n self.goal = self.tasks.pop()\n self.goal_history.append(self.goal)\n self.logger.log(\n f\"Crewmate {self.agent_id} set as goal: {self.goal.name} in\" +\n f\" {self.game_map.room_names[self.goal.room_id]}\",\n Logger.LOG | Logger.PRINT_VISUAL)\n else:\n self.room = self.game_map.move_random(self)\n self.location_history.append(self.room)\n return\n\n if self.room is not self.goal.room_id:\n self.room = self.game_map.next_toward(self, self.goal.room_id)\n\n # Log the current room we are in: Either the room we moved to, or the room that happens to be the goal room\n self.location_history.append(self.room)", "def move2goal(self):\n vel_msg = Twist()\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = 0.4 # m/s\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = 1.5 # rad/s\n\n # Starting point reference\n goal_x = 1.0 \n goal_y = 1.0\n x_ref = 1.0\n y_ref = 1.0\n\n # Previous Reference\n x_prev_ref = 0.0\n y_prev_ref = 0.0\n theta_prev_ref = self.theta\n vrefA = 0.5\n wrefA = 0.0\n \n i = 0\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n\n x_prev_ref = tPx[0]\n y_prev_ref = tPy[0]\n theta_prev_ref = tPTheta[0]\n\n print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n print(f'ACTUAL THETA: {self.theta}')\n\n while not rospy.is_shutdown():\n \n if i >= 8:\n i = 0\n\n x_ref = goal_x\n y_ref = goal_y\n\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n # inputRef = ControllerInput(\n # xref=x_ref,\n # yref=y_ref,\n # RstateX=self.x_position,\n # RstateY=self.y_position,\n # RstateTheta=self.theta,\n # RstateVelocity=vel_msg.linear.x,\n # RstateW=vel_msg.angular.z,\n # xrefA=x_prev_ref,\n # yrefA=y_prev_ref,\n # thetarefA=theta_prev_ref,\n # vrefA=vrefA,\n # wrefA=wrefA\n # )\n\n # rospy.loginfo(f'X: {self.x_position} \\tY: {self.y_position}\\t Theta: {self.theta} ')\n # nmpc = NMPC_Controller(inputRef)\n # tPx, tPy, tPTheta = nmpc.test_create_mini_path()\n\n # print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n # print(f'ACTUAL THETA: {self.theta}')\n \n # new_v, new_w = nmpc.start_optmizer()\n # new_v = round(new_v, 4)\n # new_w = round(new_w, 4)\n\n # print(new_v, new_w)\n # rospy.loginfo(\n # f'X: {self.x_position}, Y: {self.y_position}, THETA: {self.theta}')\n \n # self.velocity_publisher.publish(vel_msg)\n # x_prev_ref = self.x_position\n # y_prev_ref = self.y_position\n # theta_prev_ref = self.theta\n # vrefA = vel_msg.linear.x\n # wrefA = vel_msg.angular.z\n \n\n # theta_prev_ref = self.theta\n # vel_msg.angular.z = 0.0\n\n\n '''Update the linear & angular velocity'''\n # vel_msg.linear.x = new_v\n # vel_msg.angular.z = new_w\n\n if i < 8:\n inputRef = ControllerInput(\n xref = tPx[i],\n yref = tPy[i],\n RstateX = self.x_position,\n RstateY = self.y_position,\n RstateTheta = self.theta,\n RstateVelocity = vel_msg.linear.x,\n RstateW = vel_msg.angular.z,\n xrefA = x_prev_ref,\n yrefA = y_prev_ref,\n thetarefA = theta_prev_ref,\n vrefA = vrefA,\n wrefA = wrefA\n )\n\n nmpc = NMPC_Controller(inputRef)\n new_v, new_w = nmpc.start_optmizer()\n new_v = round(new_v, 4)\n new_w = round(new_w, 4)\n\n print(f'(actual) X: {self.x_position}, Y: {self.x_position}, THETA: {self.theta}')\n print(f'(desired) X: {tPx[i]}, Y: {tPy[i]}')\n print(f'V: {vel_msg.linear.x}\\tW: {vel_msg.angular.z}')\n\n x_prev_ref = tPx[i-1]\n y_prev_ref = tPy[i-1]\n theta_prev_ref = tPTheta[i-1]\n vrefA = vel_msg.linear.x\n wrefA = vel_msg.angular.z\n\n vel_msg.linear.x = new_v\n vel_msg.angular.z = new_w\n # vel_msg.angular.z = 0.0\n\n print(f'index: {i}')\n\n distance = math.sqrt((self.x_position - tPx[i])**2 + (self.y_position - tPy[i])**2)\n if distance < 0.3:\n print(f'Distance: {distance}')\n i+=1\n\n\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()\n\n rospy.spin()", "def _target_callback(self, msg):\n self.target_pose = np.asarray(msg.pos)[np.newaxis].T\n self.target_vel = np.asarray(msg.vel)[np.newaxis].T\n self.target_acc = np.asarray(msg.acc)[np.newaxis].T\n\n print(\"\\nGoing to:\")\n print(\"Pos: \\n\" + str(self.target_pose))\n print(\"Vel: \\n\" + str(self.target_vel))\n print(\"Acc: \\n\" + str(self.target_acc))", "def update_motor_target(data):\n print('sending new motor target')\n slider_target = json.dumps({\"id\" : \"Motor1\", \"target\": data})\n SERIAL_PARENT.send(slider_target)\n OUTGOING.append(slider_target)", "def step(self, action):\n # print(\"############################\")\n # print(\"action: {}\".format(action))\n\n self.movement_complete.data = False\n\n # 1) Read last joint positions by getting the observation before acting\n old_observation = self.get_obs()\n\n # 2) Get the new joint positions according to chosen action (actions here are the joint increments)\n if self._joint_increment is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(action, old_observation[1:7])\n\n # 3) Move to position and wait for moveit to complete the execution\n self.publisher_to_moveit_object.pub_joints_to_moveit(next_action_position)\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n # time.sleep(s\n\n \"\"\"\n #execute action as long as the current position is close to the target position and there is no invalid collision and time spend in the while loop is below 1.2 seconds to avoid beeing stuck touching the object and not beeing able to go to the desired position \n time1=time.time()\n while np.linalg.norm(np.asarray(self.joints_state.position)-np.asarray(next_action_position))>0.1 and self.get_collisions()==False and time.time()-time1<0.1: \n rospy.loginfo(\"Not yet reached target position and no collision\")\n \"\"\"\n # 4) Get new observation and update min_distance after performing the action\n new_observation = self.get_obs()\n if new_observation[0] < self.min_distace:\n self.min_distace = new_observation[0]\n # print(\"observ: {}\".format( np.around(new_observation[1:7], decimals=3)))\n\n # 5) Convert Observations into state\n state = U.get_state(new_observation)\n\n # 6) Check if its done, calculate done_reward\n done, done_reward, invalid_contact = self.is_done(new_observation)\n\n # 7) Calculate reward based on Observatin and done_reward and update the accumulated Episode Reward\n reward = UMath.compute_reward(new_observation, done_reward, invalid_contact)\n\n ### TEST ###\n if done:\n joint_pos = self.joints_state.position\n print(\"Joint in step (done): {}\".format(np.around(joint_pos, decimals=3)))\n ### END of TEST ###\n\n self.accumulated_episode_reward += reward\n\n self.episode_steps += 1\n\n return state, reward, done, {}", "def go_to(self, x_map, y_map, yaw_map):\r\n loginfo(\"Going to pose x = %s, y = %s, yaw = %s.\" %\r\n (x_map, y_map, yaw_map))\r\n goal = MoveBaseGoal()\r\n goal.target_pose.header = Header(stamp=Time.now(), frame_id = '/map')\r\n goal.target_pose.pose = self._x_y_yaw_to_pose(x_map, y_map, yaw_map)\r\n self.move_base_ac.send_goal(goal)\r\n loginfo(\"Send goal to move base. Waiting for result.\")\r\n self.move_base_ac.wait_for_result()\r\n #loginfo(\"Got result: %s\" % self.move_base_ac.get_result())\r\n #loginfo(\"Pose: %s, %s, %s\" %\r\n # (self.get_x_map(), self.get_y_map(), self.get_yaw_map()))\r\n sleep(1)\r\n loginfo(\"At Goal: %i\", self._at_goal)\r\n return self._at_goal", "def update_goal(self):\n pass", "def _rviz_nav_goal_cb(self, msg):\n goal = Pose2D(x=msg.pose.position.x, y=msg.pose.position.y)\n tolerance = 0.0\n\n self.drive_to(goal, tolerance, avoid_targets=True, avoid_home=False)", "def goal_position(self, value):\n self._write(MX_GOAL_POSITION, value)", "def execute(self, userdata):\n\n global x_home\n global y_home\n\n rospy.loginfo(rospy.get_caller_id() + 'Executing state SLEEP ')\n ## Setting the goal home position\n goal = exp_assignment2.msg.PlanningGoal()\n goal.target_pose_robot.pose.position.x = x_home\n goal.target_pose_robot.pose.position.y = y_home\n rospy.loginfo(rospy.get_caller_id() + 'Back home x: %d y: %d',x_home,y_home)\n client.send_goal(goal) \n client.wait_for_result()\n rospy.loginfo('i m arrived, now i will take a nap')\n time.sleep(3)\n self.rate.sleep()\n return 'GoToNormal'", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def send_robot_cmd(self, command, *args):\n \n \n if self.robot_commander is None:\n self.start_robot_publisher()\n time.sleep(.5)\n\n # choose which platform\n #if GlobalSettings.USE_TEGA:\n msg = TegaBehaviors.get_msg_from_behavior(command, args)\n #else:\n # msg = JiboBehaviors.get_msg_from_behavior(command, args)\n\n # add header\n self.robot_commander.publish(msg) # would be nice to guarantee message performance here\n #rospy.loginfo(msg)", "def do_move(self, rel=True):\n cmd = self.MGMSG_MOT_MOVE_ABSOLUTE\n if rel:\n cmd = self.MGMSG_MOT_MOVE_RELATIVE\n self.__send_short(cmd, self.__chan, 0x00)", "def sendInteractionPos(self, targetX, targetY):\r\n\r\n print \"SEND & WAIT: InteractionPos\"\r\n waitForFullExec(self, self.sender.sendInteractionPos(targetX, targetY))", "def move_robot(request):\n\n phase_id = request.phase\n print \"phase_id is {}\".format(phase_id)\n if phase_id == 0:\n success = move_to_marshmallow()\n elif phase_id == 1:\n success = move_to_mouth()\n elif phase_id == 2:\n success = release_marshmallow()\n elif phase_id == 3:\n success = grip_marshmallow()\n elif phase_id == 4:\n success = move_to_start_state()\n elif phase_id == 5:\n success = perform_full_sequence()\n message = \"placeholder\"\n\n return TriggerPhaseResponse(success, message)", "def execute(self):\n self._odom_msg.header.stamp = rospy.Time.now()\n # query base state from robot and store in odom msg\n position, orientation, linear_velocity, angular_velocity = self._robot.get_base_state()\n [self._odom_msg.pose.pose.position.x,\n self._odom_msg.pose.pose.position.y,\n self._odom_msg.pose.pose.position.z] = position\n [self._odom_msg.pose.pose.orientation.x,\n self._odom_msg.pose.pose.orientation.y,\n self._odom_msg.pose.pose.orientation.z,\n self._odom_msg.pose.pose.orientation.w] = orientation\n [self._odom_msg.twist.twist.linear.x,\n self._odom_msg.twist.twist.linear.y,\n self._odom_msg.twist.twist.linear.z] = linear_velocity\n [self._odom_msg.twist.twist.angular.x,\n self._odom_msg.twist.twist.angular.y,\n self._odom_msg.twist.twist.angular.z] = angular_velocity\n self._publisher.publish(self._odom_msg)\n\n tf_msg = TransformStamped()\n tf_msg.header.frame_id = self._odom_msg.header.frame_id\n tf_msg.child_frame_id = self._odom_msg.child_frame_id\n tf_msg.transform.translation = self._odom_msg.pose.pose.position\n tf_msg.transform.rotation = self._odom_msg.pose.pose.orientation\n tf_msg.header.stamp = rospy.Time.now()\n self._br.sendTransform(tf_msg)", "def act(self):\n if self.goal and self.room is self.goal.room_id:\n self.logger.log(\n f\"Crewmate {self.agent_id} completed their goal: {self.goal.name} in \" +\n f\"{self.game_map.room_names[self.goal.room_id]}\",\n Logger.PRINT_VISUAL | Logger.LOG\n )\n\n # If we perform an action in a room, we can only see the room in which the action is performed.\n self.location_history.append(self.room)\n evt = RoomEvent((EventType.TASK_VISUAL if self.goal.is_visual else EventType.TASK), self.agent_id, self.goal.name)\n self.game_map.add_room_event(self.room, evt)\n self.goal = None\n else:\n self.__move()" ]
[ "0.7590081", "0.7262525", "0.69072", "0.68744373", "0.6722525", "0.66449314", "0.65029967", "0.64658207", "0.6431669", "0.62456536", "0.6217606", "0.61394477", "0.6081033", "0.6040425", "0.601179", "0.5964906", "0.5939005", "0.5862318", "0.5845824", "0.5809169", "0.57735795", "0.569537", "0.56854606", "0.56776893", "0.56529164", "0.5648275", "0.56389326", "0.5630719", "0.56119835", "0.5606232" ]
0.77526456
0
Perform maximum a posteriori fit
def fit(args): config_file = args.setupfn conf_base = os.path.basename(config_file).split('.')[0] print("Performing maximum a posteriori fitting for {}".format(conf_base)) P, post = radvel.utils.initialize_posterior(config_file, decorr=args.decorr) post = radvel.fitting.maxlike_fitting(post, verbose=True) postfile = os.path.join(args.outputdir, '{}_post_obj.pkl'.format(conf_base)) post.writeto(postfile) savestate = {'run': True, 'postfile': os.path.relpath(postfile)} save_status(os.path.join(args.outputdir, '{}_radvel.stat'.format(conf_base)), 'fit', savestate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maxfit(self, *args, **kwargs):\n return _image.image_maxfit(self, *args, **kwargs)", "def predict_max(self, x):\n y_ = self.predict(x)\n amax = torch.argmax(y_, dim=1).detach()\n t = torch.zeros_like(y_)\n t[torch.arange(y_.shape[0]),amax] = 1\n return t", "def posterior(self, val, **kwargs) -> float:\n\n data = self.data\n\n # override val with parameters specified via kwargs\n val = copy.deepcopy(val)\n for key, value in kwargs.items():\n setattr(val, key, value)\n\n # extract parameters\n gain = val.gain\n states = val.states\n pi = val.transitions\n pi_conc = val.transitions_conc\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n load_weight = val.load_weight\n num_rois = val.num_rois\n num_load = val.num_load\n num_data = val.num_data\n num_states = val.num_states\n\n # calculate shape parameters\n idx = mu_flor_mean > 0\n mu_flor_scale = np.zeros(mu_flor_mean.shape)\n mu_flor_scale[idx] = mu_flor_mean[idx] / mu_flor_shape[idx]\n mu_back_scale = mu_back_mean / mu_back_shape\n # calculate effective pi for collapsed state space when weight on load is taken into account\n pi_eff = pi.copy()\n pi_eff[-1, :] *= load_weight\n pi_eff[-1, -1] = 1 - load_weight\n\n # probability from likelihood\n brightness = np.zeros(shape=data.shape)\n for r in range(num_rois):\n brightness[r, :] = mu_flor @ states_to_pops(states[r, :, :], num_states) + mu_back[r]\n lhood = np.sum(stats.gamma.logpdf(data, a=brightness, scale=gain))\n\n # probability from phototrajectory\n kinetic = 0\n for i in range(num_states):\n if pi_eff[-1, i] > 0:\n kinetic += np.sum(states[:, :, 0] == i) * np.log(pi_eff[-1, i])\n for j in range(num_states):\n if pi_eff[i, j] > 0:\n kinetic += np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j)) * np.log(pi_eff[i, j])\n\n # probability from prior\n prior = (\n # prior on fluorophore brightness (ignore dark states)\n np.sum(stats.gamma.logpdf(mu_flor[idx], a=mu_flor_shape[idx], scale=mu_flor_scale[idx]))\n # prior on background brightness\n + np.sum(stats.gamma.logpdf(mu_back, a=mu_back_shape, scale=mu_back_scale))\n # prior on transitions\n + np.sum(Dirichlet.logpdf(pi, pi_conc))\n )\n\n prob = lhood + kinetic + prior\n\n return prob", "def fit(self, X):", "def auxmaxf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmax_f1_part_i(x,m_ind) \n \n return f", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def fit(self, Y, STATUS, ntop=100, nrecent=100, nmax=400, ntopmu=100, ntopvar=100, nkmeans=300, nkeamnsdata=5000,\n lam=1e-6):\n X = self.X\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n tested = [i for i in range(self.n) if STATUS[i] == 2]\n ytested = Y[tested].reshape(-1)\n self.y_max = np.max(ytested)\n # each 10 fits we update the hyperparameters, otherwise we just update the data which is a lot faster\n if np.mod(self.update_counter, self.updates_per_big_fit) == 0:\n print('fitting hyperparameters')\n # how many training points are there\n ntested = len(tested)\n # if more than nmax we will subsample and use the subsample to fit hyperparametesr\n if ntested > nmax:\n # subsample is uniion of 100 best points, 100 most recent points and then random points \n top = list(np.argsort(ytested)[-ntop:])\n recent = list(range(ntested - nrecent, ntested))\n topandrecent = list(set(top + recent))\n rand = list(\n np.random.choice([i for i in range(ntested) if i not in topandrecent], nmax - len(topandrecent),\n False))\n testedtrain = topandrecent + rand\n ytrain = ytested[testedtrain]\n train = [tested[i] for i in testedtrain]\n else:\n train = tested\n ytrain = ytested\n \n # use GPy code to fit hyperparameters to minimize NLL on train data\n mfy = GPy.mappings.Constant(input_dim=self.d, output_dim=1) # fit dense GPy model to this data\n ky = GPy.kern.RBF(self.d, ARD=True, lengthscale=np.ones(self.d))\n self.GP = GPy.models.GPRegression(X[train], ytrain.reshape(-1, 1), kernel=ky, mean_function=mfy)\n self.GP.optimize('bfgs')\n # strip out fitted hyperparameters from GPy model, because cant do high(ish) dim sparse inference\n self.mu = self.GP.flattened_parameters[0]\n self.a = self.GP.flattened_parameters[1]\n self.l = self.GP.flattened_parameters[2]\n self.b = self.GP.flattened_parameters[3]\n # selecting inducing points for sparse inference \n print('selecting inducing points')\n # get prediction from GPy model \n self.py = self.GP.predict(X)\n # points with 100 highest means\n topmu = [untested[i] for i in np.argsort(self.py[0][untested].reshape(-1))[-ntopmu:]]\n # points with 100 highest uncertatinty\n topvar = [untested[i] for i in np.argsort(self.py[1][untested].reshape(-1))[-ntopvar:]]\n # combine with train set above to give nystrom inducing points (inducing points that are also actual trainingdata points) \n nystrom = topmu + topvar + train\n # also get some inducing points spread throughout domain by using kmeans\n # kmeans is very slow on full dataset so choose random subset \n # also scale using length scales l so that kmeans uses approproate distance measure\n kms = KMeans(n_clusters=nkmeans, max_iter=5).fit(\n np.divide(X[list(np.random.choice(untested, nkeamnsdata))], self.l))\n # matrix of inducing points \n self.M = np.vstack((X[nystrom], np.multiply(kms.cluster_centers_, self.l)))\n # dragons...\n # email [email protected] if this bit goes wrong!\n print('fitting sparse model')\n DXM = euclidean_distances(np.divide(X, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_XM = self.a * np.exp(-DXM / 2)\n DMM = euclidean_distances(np.divide(self.M, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_MM = self.a * np.exp(-DMM / 2) + np.identity(self.M.shape[0]) * lam * self.a\n self.B = self.a + self.b - np.sum(np.multiply(np.linalg.solve(self.SIG_MM, self.SIG_XM.T), self.SIG_XM.T),0)\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n else:\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n self.update_counter += 1\n \"\"\" \n key attributes updated by fit \n \n self.SIG_XM : prior covarience matrix between data and inducing points\n self.SIG_MM : prior covarience matrix at inducing points\n \n self.SIG_MM_pos : posterior covarience matrix at inducing points\n self.mu_M_pos : posterior mean at inducing points \n \n \"\"\"", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def softmax(y):\n maxy = np.amax(y)\n e = np.exp(y - maxy)\n return e / np.sum(e)", "def fit(self, X, y):\n if self.kernel is None: # Use an RBF kernel as default\n self.kernel_ = C(1.0, constant_value_bounds=\"fixed\") \\\n * RBF(1.0, length_scale_bounds=\"fixed\")\n else:\n self.kernel_ = clone(self.kernel)\n\n self._rng = check_random_state(self.random_state)\n\n X, y = check_X_y(X, y, multi_output=True, y_numeric=True)\n print(X)\n n, d = X.shape\n self.n_ = n\n self.d_ = d\n P = np.append(X, np.ones([n,1]),1)\n\n # Normalize target value\n # if self.normalize_y:\n # self._y_train_mean = np.mean(y, axis=0)\n # # demean y\n # y = y - self._y_train_mean\n # else:\n # self._y_train_mean = np.zeros(1)\n\n if np.iterable(self.alpha) \\\n and self.alpha.shape[0] != y.shape[0]:\n if self.alpha.shape[0] == 1:\n self.alpha = self.alpha[0]\n else:\n raise ValueError(\"alpha must be a scalar or an array\"\n \" with same number of entries as y.(%d != %d)\"\n % (self.alpha.shape[0], y.shape[0]))\n\n self.X_train_ = np.copy(X) if self.copy_X_train else X\n self.y_train_ = np.copy(y) if self.copy_X_train else y\n self.P_train_ = np.copy(P) if self.copy_X_train else P\n\n if self.optimizer is not None and self.kernel_.n_dims > 0:\n # Choose hyperparameters based on maximizing the log-marginal\n # likelihood (potentially starting from several initial values)\n def obj_func(theta, eval_gradient=True):\n if eval_gradient:\n lml, grad = self.log_marginal_likelihood(\n theta, eval_gradient=True)\n return -lml, -grad\n else:\n return -self.log_marginal_likelihood(theta)\n\n # First optimize starting from theta specified in kernel\n optima = [(self._constrained_optimization(obj_func,\n self.kernel_.theta,\n self.kernel_.bounds))]\n\n # Additional runs are performed from log-uniform chosen initial\n # theta\n if self.n_restarts_optimizer > 0:\n if not np.isfinite(self.kernel_.bounds).all():\n raise ValueError(\n \"Multiple optimizer restarts (n_restarts_optimizer>0) \"\n \"requires that all bounds are finite.\")\n bounds = self.kernel_.bounds\n for iteration in range(self.n_restarts_optimizer):\n theta_initial = \\\n self._rng.uniform(bounds[:, 0], bounds[:, 1])\n optima.append(\n self._constrained_optimization(obj_func, theta_initial,\n bounds))\n # Select result from run with minimal (negative) log-marginal\n # likelihood\n lml_values = list(map(itemgetter(1), optima))\n self.kernel_.theta = optima[np.argmin(lml_values)][0]\n self.log_marginal_likelihood_value_ = -np.min(lml_values)\n else:\n self.log_marginal_likelihood_value_ = \\\n self.log_marginal_likelihood(self.kernel_.theta)\n\n # Precompute quantities required for predictions which are independent\n # of actual query points\n K = self.kernel_(self.X_train_)\n K[np.diag_indices_from(K)] += self.alpha\n\n try:\n self.L_ = cholesky(K, lower=True) # Line 2\n # self.L_ changed, self._K_inv needs to be recomputed\n self._K_inv = None\n Q = np.dot(np.transpose(self.P_train_), cho_solve((self.L_, True), self.P_train_))\n\n self.M_ = cholesky(Q, lower=True)\n except np.linalg.LinAlgError as exc:\n exc.args = (\"The kernel, %s, is not returning a \"\n \"positive definite matrix. Try gradually \"\n \"increasing the 'alpha' parameter of your \"\n \"GaussianProcessRegressor estimator.\"\n % self.kernel_,) + exc.args\n raise\n\n self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3\n self.beta = cho_solve((self.M_, True), np.dot(np.transpose(self.P_train_),self.alpha_) )\n\n return self", "def posterior_distr(self, y, **args):\n raise NotImplementedError", "def computePosterior(self):\n # in their log form, posterior = prior + beta * datalikelihood\n # make a copy of prior at first\n self.posterior.copy(self.prior)\n # add the data likelihood\n altar.blas.daxpy(self.beta, self.data, self.posterior)\n # all done\n return self", "def predict(probs):\n # Your code here.\n return np.argmax(probs, axis=1)", "def auxmaxf2(x):\n # Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmaxrho1(x,m_ind) + auxmaxrho2(x,m_ind) \n \n return f", "def get_optimal_postprocess(loaders=None, runner=None, logdir: str = \"\"):\n loaders[\"infer\"] = loaders[\"valid\"]\n\n runner.infer(\n model=runner.model,\n loaders=loaders,\n callbacks=[\n CheckpointCallback(resume=f\"{logdir}/checkpoints/best.pth\"),\n InferCallback(),\n ],\n )\n valid_masks = []\n probabilities = np.zeros((2220, 350, 525))\n for i, (batch, output) in enumerate(\n zip(loaders[\"infer\"].dataset, runner.callbacks[0].predictions[\"logits\"])\n ):\n image, mask = batch\n for m in mask:\n if m.shape != (350, 525):\n m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n valid_masks.append(m)\n\n for j, probability in enumerate(output):\n if probability.shape != (350, 525):\n probability = cv2.resize(\n probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR\n )\n probabilities[i * 4 + j, :, :] = probability\n\n class_params = {}\n for class_id in range(4):\n print(class_id)\n attempts = []\n for t in range(0, 100, 10):\n t /= 100\n for ms in [\n 0,\n 100,\n 1000,\n 5000,\n 10000,\n 11000,\n 14000,\n 15000,\n 16000,\n 18000,\n 19000,\n 20000,\n 21000,\n 23000,\n 25000,\n 27000,\n 30000,\n 50000,\n ]:\n masks = []\n for i in range(class_id, len(probabilities), 4):\n probability = probabilities[i]\n predict, num_predict = post_process(sigmoid(probability), t, ms)\n masks.append(predict)\n\n d = []\n for i, j in zip(masks, valid_masks[class_id::4]):\n if (i.sum() == 0) & (j.sum() == 0):\n d.append(1)\n else:\n d.append(dice(i, j))\n\n attempts.append((t, ms, np.mean(d)))\n\n attempts_df = pd.DataFrame(attempts, columns=[\"threshold\", \"size\", \"dice\"])\n\n attempts_df = attempts_df.sort_values(\"dice\", ascending=False)\n print(attempts_df.head())\n best_threshold = attempts_df[\"threshold\"].values[0]\n best_size = attempts_df[\"size\"].values[0]\n\n class_params[class_id] = (best_threshold, int(best_size))\n\n print(class_params)\n return class_params", "def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***", "def fit(self, X, y):\n proportions = y.value_counts()/y.value_counts().sum()\n self.labels = proportions.index.values.astype(bool)\n self.guess = np.argmax(proportions)\n self.fitted = True", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ..." ]
[ "0.6777389", "0.62072074", "0.6111423", "0.5956245", "0.592218", "0.5921827", "0.59028506", "0.58424306", "0.5815447", "0.5815447", "0.5815447", "0.57727796", "0.57099736", "0.56954104", "0.5691916", "0.56553304", "0.5617285", "0.559461", "0.55689347", "0.5566601", "0.55576885", "0.55576885", "0.55576885", "0.55576885", "0.55576885", "0.55576885", "0.55576885", "0.55576885", "0.55576885", "0.55576885" ]
0.71464545
0
De manier van levering en aanplanting van het wortelgestel van de boom of plant.
def wortelAanplant(self): return self._wortelAanplant.get_waarde()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mezclar_bolsa(self):", "def standings():\n pass", "def plant(self):\n\t\ttic=time.clock()\n\t\tcommands=[]\n\t\tt=self.m.times\n\t\tauto=self.m.automatic\n\t\tpHeads=self.plantHeads\n\t\t#gather information about the soil at site\n\t\tdeepest=0\n\t\tdeepestPos=None\n\t\tfor h in pHeads:\n\t\t\tdepth=self.G.terrain.humusLayer.getDepth(h.getPos())\n\t\t\tassert depth>=0\n\t\t\tif depth>deepest:\n\t\t\t\tdeepest=depth\n\t\t\t\tdeepestPos=h.getPos()\n\t\tdepth=deepest\n\t\tdigTime=self.m.getDigTime(deepestPos)\n\t\tself.sim.stats['humus depths'].append(depth)\n\t\tif self.m.inverting: #determine the time. Dependent on digTime\n\t\t\tif self.m.invertingMethod=='KO':\n\t\t\t\tinvertTime=self.G.simParam['tCWhenInvKO']\n\t\t\telif self.m.invertingMethod=='Excavator':\n\t\t\t\tinvertTime=self.G.simParam['tInvExcavator']-digTime\n\t\t\telse:\n\t\t\t\traise Exception('cannot identify inverting method %s'%self.m.invertingMethod)\n\t\tfor pH in pHeads:\n\t\t\tpH.reset()\n\t\t\tmoundBould=[]\n\t\t\torig=pH.getPos()#middle of plantinghead\n\t\t\tboul=self.G.terrain.GetBoulders(orig, R=pH.radius)\n\t\t\troots=self.G.terrain.GetRoots(orig,R=pH.radius)\n\t\t\tdirect=self.m.direction-pi/2.+self.posCyl[1] #same dir as from machine to point\n\t\t\tsumA=0\n\t\t\timmobile=self.G.simParam['critStoneSize']\n\t\t\tdibbleDisturb=0.001\n\t\t\tself.m.stopControl()\n\t\t\tself.sim.stats['mound attempts']+=1\n\t\t\tfor r in roots: #determine if a root is hit in the critical area.\n\t\t\t\tif pH.rootCollide(r): #root is within area..\n\t\t\t\t\tprint \"striked a root..\"\n\t\t\t\t\tangle=abs(r.direction-direct)\n\t\t\t\t\tray1=[orig,fun.getCartesian([0,1],fromLocalCart=True, origin=orig, direction=r.direction)]\n\t\t\t\t\tray2=[orig,fun.getCartesian([0,1],fromLocalCart=True, origin=orig, direction=direct)]\n\t\t\t\t\tangle=fun.getAngle(ray1, ray2) #angle between root and planting head\n\t\t\t\t\tpH.strikedImmobile=True\n\t\t\t\t\tself.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\tfor head in pHeads: head.timeConsumption['halting']+=t['haltTime']\n\t\t\t\t\tif self.G.simParam['noRemound'] or angle>self.m.rootDegreesOK:\n\t\t\t\t\t\tself.debugPrint('pos: %s collided with root. angle was too much %s'%(str(orig), str(angle*180.0/pi)))\n\t\t\t\t\t\tpH.abort=True\n\t\t\t\t\t\tpH.done=True\n\t\t\t\t\telse: #remound\n\t\t\t\t\t\tprint \"remounds\"\n\t\t\t\t\t\tself.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\t\t\t\tself.cmnd(commands, timeTmp, auto['mound'])\n\t\t\t\t\t\tfor pH in pHeads:\n\t\t\t\t\t\t\tpH.timeConsumption['halting']+=t['haltTime'] #that's for both, if 2h\n\t\t\t\t\t\t\tpH.remounded=True\n\t\t\t\t\t\t\tpH.timeConsumption['mounding']+=timeTmp\n\t\t\t\t\t\t\n\n\t\t\t\t\t\n\t\t\tif not (pH.abort or pH.strikedImmobile):\n\t\t\t\tfor b in boul:\n\t\t\t\t\t#check if we are inside the scoop. It's the middle of the stone that matters\n\t\t\t\t\t#get local xy-coordinates\n\t\t\t\t\tcylPos=self.m.getCylindrical(b.pos,origin=orig, direction=direct)\n\t\t\t\t\ttwoDdist=self.m.getCartesian(cylPos, origin=orig, direction=direct, local=True)#not really optimal, could be improved\n\t\t\t\t\tinside=False #just to skip a really long if-statement\n\t\t\t\t\tif self.G.simParam['rectangular']:\n\t\t\t\t\t\tif b.radius+b.z>-pH.depth and collide(pH, b, o1pos=orig):\n\t\t\t\t\t\t\tinside=True\n\t\t\t\t\telif b.z**2+twoDdist[1]**2<(b.radius+pH.depth)**2 and collide(pH, b, o1pos=orig): #the first check is for the cylinder, through pythagoras with 2D[1] since cylinder and not sphere\n\t\t\t\t\t\tinside=True\n\t\t\t\t\tif inside: \n \t\t\t\t\t\t#old one: abs(bpos[0])<pH.width/2. and abs(bpos[1])<pH.length/2.:\n\t\t\t\t\t\tmoundBould.append(b)\n\t\t\t\t\t\tsumA+=b.area\n\t\t\t\t\t\tlocalPos=-twoDdist[1], b.z #2D position with z as y-axis\n\t\t\t\t\t\t#now, look how much it occuppies vertically.\n\t\t\t\t\t\ttwoDdist=self.m.getCartesian(cylPos, origin=orig, direction=direct, local=True)#not really optimal, could be improved\n\t\t\t\t\t\tif self.G.simParam['rectangular']:\n\t\t\t\t\t\t\tnodes=[(-pH.length*0.5,0), (-pH.length*0.5, -pH.depth), (pH.length*0.5, -pH.depth), (pH.length*0.5, 0)]\n\t\t\t\t\t\t\tlast=None\n\t\t\t\t\t\t\tpoints=[]\n\t\t\t\t\t\t\tfor node in nodes:#loop over the rectangle edges.\n\t\t\t\t\t\t\t\tif last:\n\t\t\t\t\t\t\t\t\tray=(last,node)\n\t\t\t\t\t\t\t\t\ttmp=col.intersectRaySphere(np.array(ray),b.radius,localPos, additionalInfo=True)\n\t\t\t\t\t\t\t\t\tif type(tmp)!=bool:\n\t\t\t\t\t\t\t\t\t\tfor point in tmp[1:]:\n\t\t\t\t\t\t\t\t\t\t\tpoints.append(list(point))\n\t\t\t\t\t\t\t\tlast=node\n\t\t\t\t\t\t\tassert len(points)!=1 #would be tangent but..\n\t\t\t\t\t\t\tupper=(-twoDdist[1], b.z+b.radius)\n\t\t\t\t\t\t\tlower=(-twoDdist[1], b.z-b.radius)\n\t\t\t\t\t\t\tif not col.pointInPolygon(upper, nodes):\n\t\t\t\t\t\t\t\tif len(points)==0: #it passed through the easy check above...\n\t\t\t\t\t\t\t\t\tupper=-pH.depth\n\t\t\t\t\t\t\t\t\tmoundBould.remove(b)\n\t\t\t\t\t\t\t\t\tsumA-=b.area\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tupper=max([p[1] for p in points])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tupper=upper[1]\n\t\t\t\t\t\t\tif not col.pointInPolygon(lower, nodes):\n\t\t\t\t\t\t\t\tif len(points)==0:\n\t\t\t\t\t\t\t\t\tlower=-pH.depth\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tlower=min([p[1] for p in points])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlower=lower[1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr=b.radius\n\t\t\t\t\t\t\t#look how much of the stone that is within the scoop.\n\t\t\n\t\t\t\t\t\t\tpoints=col.circlesIntersectPoints((0,0), localPos, pH.depth, b.radius)\n\t\t\t\t\t\t\tassert points != False # we know that these circles collide.\n\t\t\t\t\t\t\tif points== True: #all of the stone inside or huge stone\n\t\t\t\t\t\t\t\tupper=b.z+b.radius\n\t\t\t\t\t\t\t\tlower=b.z-b.radius\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tupper=max(points[0][1], points[1][1])\n\t\t\t\t\t\t\t\tif col.pointInCircle((-twoDdist[1], b.z+b.radius), (0,0), pH.depth):\n\t\t\t\t\t\t\t\t\tassert b.z+b.radius>=upper\n\t\t\t\t\t\t\t\t\tupper=b.z+b.radius\n\t\t\t\t\t\t\t\tlower=min(points[0][1], points[1][1])\n\t\t\t\t\t\t\t\tif col.pointInCircle((-twoDdist[1], b.z-b.radius), (0,0), pH.depth):\n\t\t\t\t\t\t\t\t\tassert b.z-b.radius<=lower\n\t\t\t\t\t\t\t\t\tlower=b.z-b.radius\n\t\t\t\t\t\thInside=upper-lower\n\t\t\t\t\t\tassert hInside>=0\n\t\t\t\t\t\tratio=hInside/float(pH.depth)\n\t\t\t\t\t\tpH.strikedImmobile=True\n\t\t\t\t\t\tself.sim.stats['immobile boulder struck']+=1\n\t\t\t\t\t\tself.sim.stats['immobile vol sum']+=b.volume\n\t\t\t\t\t\tif ratio>self.m.immobilePercent:\n\t\t\t\t\t\t\tself.debugPrint(\"ABORTS %s percent is vertically occupided by an imobile boulder\"%str(ratio))\n\t\t\t\t\t\t\tpH.abort=True\n\t\t\t\t\t\t\tpH.done=True\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\t\t\tfor head in pHeads:\n\t\t\t\t\t\t\t\thead.timeConsumption['halting']+=t['haltTime'] #that's for both, if 2h\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b.radius>pH.biggestBlock:\n\t\t\t\t\t\t\tpH.biggestBlock=b.radius*2\n\t\t\t\tpH.moundSumA=sumA\t\t\n\t\t\tpH.moundObst=moundBould\n\t\t\th=Hole(orig,terrain=self.G.terrain,z=pH.depth, nodes=pH.getNodes(orig) , isSpherical=False)\n\t\t#time to mound and heap. With the Excavator inverting method, we don't take time for heaping now.\n\t\tif not self.m.inverting:\n\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telif self.m.inverting and self.m.invertingMethod=='KO': #heap first..\n\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telif self.m.inverting and self.m.invertingMethod=='Excavator': #don't heap..\n\t\t\ttimeTmp=digTime\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telse:\n\t\t\traise Exception('Logical error. If we are inverting, we need to use methods KO or Excavator, not %s'%self.invertingMethod)\n\t\tfor pH in pHeads:\n\t\t\tpH.timeConsumption['mounding']+=timeTmp\n\t\t#mounding failures\n\t\tfor h in self.plantHeads:\n\t\t\tif random.uniform(0,1)<self.m.moundingFailureProb and not h.remounded: #failure..\n\t\t\t\t\t\n\t\t\t\tif self.G.simParam['noRemound']:\n\t\t\t\t\th.debugPrint('failed mounding')\n\t\t\t\t\th.abort=True\n\t\t\t\telse:\n\t\t\t\t\th.debugPrint('Failed mounding.. the other heads have to wait')\n\t\t\t\t\tcommands=self.cmnd(commands, digTime+t['heapTime'],auto['mound'])\n\t\t\t\t\tfor pH in self.plantHeads:\n\t\t\t\t\t\tself.sim.stats['remound attempts']+=1\n\t\t\t\t\t\tpH.timeConsumption['mounding']+=digTime+t['heapTime']\n\t\t\t\t\t\tpH.remounded=True\n\t\t#it's time to invert\n\t\tif self.m.inverting:\n\t\t\tcommands=self.cmnd([], invertTime, auto=False)\n\t\t\treinverted=False\n\t\t\treinvertTime=digTime+t['heapTime'] #same for KO and Excv\n\t\t\tfor h in self.plantHeads:\n\t\t\t\tif pH.abort: continue\n\t\t\t\tself.sim.stats['inverting attempts']+=1\n\t\t\t\th.timeConsumption['inverting']+=invertTime\n\t\t\t\tif random.uniform(0,1)<self.m.invertFailureProb: #failure..\n\t\t\t\t\tself.debugPrint('reinverts')\n\t\t\t\t\tif self.G.simParam['noRemound']:\n\t\t\t\t\t\th.debugPrint('failed inverting')\n\t\t\t\t\t\th.abort=True\n\t\t\t\t\telif not reinverted:\n\t\t\t\t\t\treinverted=True\n\t\t\t\t\t\th.debugPrint('Failed mounding.. the other heads have to wait')\n\t\t\t\t\t\tcommands=self.cmnd(commands,reinvertTime,auto['mound'])\n\t\t\t\t\t\tfor pH in self.plantHeads:\n\t\t\t\t\t\t\tself.sim.stats['reinverting attempts']+=1\n\t\t\t\t\t\t\th.timeConsumption['inverting']+=reinvertTime\n\t\tself.plantSignals=0\n\t\tself.pHeadsUsed=0\n\t\tev=[]\n\t\tfor pH in pHeads:\n\t\t\tif not pH.abort: \n\t\t\t\tself.pHeadsUsed+=1\n\t\t\t\tpH.cause=\"plant\"\n\t\t\t\tpH.debugPrint(\"instructs pH to plant %s\")\n\t\t\t\tev.append(pH)\n\t\tif self.pHeadsUsed>0:\n\t\t\tcommands.append((\"interrupt\", ev)) #will later be recognized in run and self.interupt(pH) will be invoked. \n\t\t\tcommands.append((waituntil, self, self.plantingFinished)) #waits for one or both events.\n\t\tPlantingDevice.timesProf[1]+=time.clock()-tic\n\t\treturn commands", "def grande_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)", "def grow(self):\n if self.water_level == 0:\n self.plant += FOOD_GROWTH * random()\n if self.plant > LEVEL_MAX:\n self.plant = LEVEL_MAX", "def aanleg(self):\n return self._aanleg.get_waarde()", "def jagen(self,locatieheld):\n\t\tself.xposheld, self.yposheld = locatieheld\n\t\txafstand = abs(self.xpos - self.xposheld)\n\t\tyafstand = abs(self.ypos - self.yposheld)\n\t\tif self.xpos > self.xposheld:\n\t\t\tbewegingx = \"links\"\n\t\telse:\n\t\t\tbewegingx = \"rechts\"\n\t\tif self.ypos > self.yposheld:\n\t\t\tbewegingy = \"omhoog\"\n\t\telse:\n\t\t\tbewegingy = \"omlaag\"\n\t\tif xafstand > yafstand:\n\t\t\tif bewegingx == \"links\":\n\t\t\t\tself.xpos = self.xpos -1\n\t\t\telse:\n\t\t\t\tself.xpos = self.xpos +1\n\t\telse:\n\t\t\tif bewegingy == \"omhoog\":\n\t\t\t\tself.ypos = self.ypos -1\n\t\t\telse:\n\t\t\t\tself.ypos = self.ypos +1\n\t\t\t\t\n\t\tif self.xpos < 1:\n\t\t\tself.xpos = 5\n\t\tif self.xpos > 5:\n\t\t\tself.xpos = 1\n\t\tif self.ypos < 1:\n\t\t\tself.ypos = 5\n\t\tif self.ypos > 5:\n\t\t\tself.ypos = 1\n\t\t\n\t\tself.updatepositie()", "def explode(self):\n fire_potential = self.flannability * self.weight\n if fire_potential < 10:\n return '...fizzle'\n elif fire_potential < 50:\n return '...boom!'\n else:\n return '...BABOOM!!'\n\n # part 3 sublass", "def boleta_expulsada(self):\n self._info(\"boleta_expulsada\")\n return None", "def test_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion)\n verif_bateau(joueur1, joueur2.porte_avion)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion)\n verif_bateau(joueur2, joueur1.porte_avion)", "def calculateBGresourcesPlant(self):\n ids = np.array(range(0, self.no_plants))\n self._water_avail = np.zeros(self.no_plants)\n self._water_absorb = np.zeros(self.no_plants)\n self._water_exchanged_plants = np.zeros(self.no_plants)\n for gID in set(self._gIDs):\n # get plant indices of group members\n members = ids[np.where(self._gIDs == gID)]\n # make a graph dictionary of the group\n graph_dict_group = {i: self.graph_dict[i] for i in members}\n # make a list with indices of connected plants of the group\n link_list_group = np.array(\n self.getLinkList(graph_dict=graph_dict_group))\n if len(link_list_group) == 0:\n ## if the plant is not grafted water_absorbed and\n # water_available corresponds to SimpleBettina water uptake\n # and water_exchange is 0\n self._water_absorb[members] = self.getBGresourcesIndividual(\n psi_top=self._psi_top[members],\n psi_osmo=self._psi_osmo[members],\n ag_resistance=self._above_graft_resistance[members],\n bg_resistance=self._below_graft_resistance[members])\n self._water_avail[members] = self._water_absorb[members]\n else:\n self.calculateBGresourcesGroup(members=members,\n link_list_group=link_list_group)", "def __init__(\n self,\n height=20,\n width=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n trees_carrots_ratio=0.5,\n YEAR=20,\n nb_of_hunters=0,\n ):\n super().__init__()\n # Set parameters\n self.height = height\n self.width = width\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n self.trees_carrots_ratio = trees_carrots_ratio\n self.YEAR = YEAR\n self.nb_of_hunters = nb_of_hunters\n\n self.schedule = RandomActivationByBreed(self) # classe contenant un dictionnaire des types d'agents et agents existants par type, avec une ordre d'activation possible\n self.grid = MultiGrid(self.height, self.width, torus=True)\n self.datacollector = DataCollector(\n {\n \"Fox\": lambda m: m.schedule.get_breed_count(Predator),\n \"Rabbit\": lambda m: m.schedule.get_breed_count(Prey),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(3*self.YEAR)\n energy = self.random.randrange( int(self.sheep_gain_from_food/2), 2 * self.sheep_gain_from_food)\n sheep = Prey(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(4*self.YEAR)\n energy = self.random.randrange(int(self.wolf_gain_from_food/2), 2 * self.wolf_gain_from_food)\n wolf = Predator(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, x, y in self.grid.coord_iter():\n if self.trees_carrots_ratio < self.random.random(): # aléatoire du nombre d'arbres et de carottes\n fully_grown = self.random.choice([True, False])\n if fully_grown: # carottes ou pousses de carotes\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n plant = Plant(self.next_id(), (x, y), self, fully_grown, countdown)\n else:\n plant = Tree(self.next_id(), (x, y), self)\n self.grid.place_agent(plant, (x, y))\n self.schedule.add(plant)\n\n # create hunters\n for i in range(self.nb_of_hunters):\n x = self.random.randrange(self.width-13, self.width-7) # HUNTERMODIF\n y = self.random.randrange(self.height-13, self.height-7) # HUNTERMODIF\n hunter = Hunter(self.next_id(), (x, y), self)\n self.grid.place_agent(hunter, (x, y))\n self.schedule.add(hunter)\n\n self.running = True\n self.datacollector.collect(self)", "def craft(self, items):\n\n if items[0].looted and items[1].looted and items[2].looted:\n print(\"Seringue fabriquée ! Vous pouvez endormir le garde.\")\n self.stuff = [\"seringue\"]", "def battle_resting(self):\n pass", "def __str__(self):\n return self.designation + ' ' +self.winery + ' wine'", "def stalTijd(begintijd):\r\n starttijd = datetime.strptime(begintijd, '%d-%m-%Y %H:%M') # begintijd in juiste format\r\n huidigeTijd = datetime.strptime(time.strftime('%d-%m-%Y %H:%M'), '%d-%m-%Y %H:%M') # huidige tijd opvragen\r\n dagVerschil = (huidigeTijd - starttijd).days # verschil in dagen\r\n secondeVerschil = (huidigeTijd - starttijd).seconds # verschil in seconden\r\n dagMin = dagVerschil * 1440 # dagen naar minuten omrekenen\r\n secMin = secondeVerschil / 60 # seconden naar minuten omrekenen\r\n minuten = int(dagMin + secMin) # totaal aantal minuten\r\n uurMin = str(timedelta(minutes=minuten))[:-3]\r\n if 'day' not in uurMin: # wanneer tijdsduur minder dan een dag is\r\n tijdSplit = uurMin.split(':')\r\n uren = tijdSplit[0]\r\n if tijdSplit[1] != '00':\r\n minuten = tijdSplit[1].lstrip('0') # overbodige eerste '0' verwijderen\r\n else:\r\n minuten = tijdSplit[1][:1]\r\n if uren == '0': # wanneer aantal uren 0 is uren niet printen\r\n uurTekst = ''\r\n uren = ''\r\n else:\r\n uurTekst = ' uur en '\r\n if minuten == '1': # 'minuut' printen in plaats van 'minuten' bij 1 minuut\r\n tijdsDuur = 'Je fiets is ' + uren + uurTekst + minuten + ' minuut gestald'\r\n else:\r\n tijdsDuur = 'Je fiets is ' + uren + uurTekst + minuten + ' minuten gestald'\r\n return tijdsDuur\r\n else:\r\n tijdSplit = uurMin.split(' ')\r\n urenMinuten = tijdSplit[2].split(':')\r\n dagen = tijdSplit[0]\r\n uren = urenMinuten[0]\r\n if urenMinuten[1] != '00':\r\n minuten = urenMinuten[1].lstrip('0') # overbodige eerste '0' verwijderen\r\n else:\r\n minuten = urenMinuten[1][:1]\r\n if uren == '0': # wanneer aantal uren 0 is uren niet printen\r\n uurTekst = ''\r\n uren = ''\r\n else:\r\n uurTekst = ' uur en '\r\n if 'days' in uurMin: # 'dagen' printen in plaats van 'dag' bij 1 dag\r\n if minuten == '1':\r\n tijdsDuur = 'Je fiets is ' + dagen + ' dagen, ' + uren + uurTekst + minuten + ' minuut gestald'\r\n else:\r\n tijdsDuur = 'Je fiets is ' + dagen + ' dagen, ' + uren + uurTekst + minuten + ' minuten gestald'\r\n return tijdsDuur\r\n else:\r\n if minuten == '1':\r\n tijdsDuur = 'Je fiets is ' + dagen + ' dag, ' + uren + uurTekst + minuten + ' minuut gestald'\r\n else:\r\n tijdsDuur = 'Je fiets is ' + dagen + ' dag, ' + uren + uurTekst + minuten + ' minuten gestald'\r\n return tijdsDuur", "def petite_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)", "def BoatEarMoon():\n D=1\n alpha=math.radians(83)\n beta=math.radians(42) \n phi=math.radians(70) \n mu=math.radians(10) \n omega=math.radians(30) \n A=25, a=12, b=20, L=0, P=0, W1=0, W2=0, N=0\n \n resMode()", "def start_episode(self, world: wws.WumpusWorld):\n\n world_info = {k: [] for k in ('Hunter', 'Pits', 'Wumpus', 'Gold', 'Exits')}\n world_info['Size'] = (world.size.x, world.size.y)\n world_info['Blocks'] = [(c.x, c.y) for c in world.blocks]\n\n for obj in world.objects:\n if isinstance(obj, wws.Hunter):\n world_info['Hunter'].append((obj.location.x, obj.location.y))\n elif isinstance(obj, wws.Pit):\n world_info['Pits'].append((obj.location.x, obj.location.y))\n elif isinstance(obj, wws.Wumpus):\n world_info['Wumpus'].append((obj.location.x, obj.location.y))\n elif isinstance(obj, wws.Exit):\n world_info['Exits'].append((obj.location.x, obj.location.y))\n elif isinstance(obj, wws.Gold):\n world_info['Gold'].append((obj.location.x, obj.location.y))\n\n print('World details:')\n for k in ('Size', 'Pits', 'Wumpus', 'Gold', 'Exits', 'Blocks'):\n print(' {}: {}'.format(k, world_info.get(k, None)))\n\n self.counter = 0\n self.total_counter = 0\n self.reward = 0\n self.result_sequence_actions = []\n self.result_reward = 0\n hunt_wumpus_problem = HuntWumpusProblem(world, wws.Hunter.Actions)\n\n result = self.iterative_deepening_search(hunt_wumpus_problem)\n \n if not result.sequence_actions:\n self.result_reward = -1\n self.result_sequence_actions = [wws.Hunter.Actions.CLIMB]\n else:\n self.result_reward = result.total_reward\n self.result_sequence_actions = result.sequence_actions\n\n if result.sequence_actions == []:\n print(\"There is no solution for the problem\")\n return\n \n print(\"\")\n print(\"\".join([\"*\" for i in range(100)]))\n print(f\"The counter of nodes explored is {self.total_counter} (counting from LIMIT = 0)\\n\")\n print(f\"Action sequence: \\n{list(map(lambda x: x.name, self.result_sequence_actions))}\\n\")\n print(f\"The total reward of the search is {self.result_reward}\")\n print(\"\".join([\"*\" for i in range(100)]))\n print(\"\")", "def finTour(self):\n print(\"fin du tour\")\n self.etat = \"Fin\"\n if self.joueurActif.nbRessource + self.joueurActif.getNbRessourceTour() <= self.joueurActif.nbMaxRessource :\n self.joueurActif.nbRessource += self.joueurActif.getNbRessourceTour()\n else:\n self.joueurActif.nbRessource = self.joueurActif.nbMaxRessource\n print(self.joueurActif.nbRessource)\n if self.joueurActif == self.joueur1:\n self.joueurActif = self.joueur2\n print(\"Au joueur 2 de jouer\")\n else:\n self.joueurActif = self.joueur1\n print(\"Au joueur 1 de jouer\")\n for iEntite in self.joueurActif.entiteResetDeplacement:\n iEntite.setMoove(True)\n for iEntite in self.joueurActif.entiteResetCombat:\n iEntite.setCanAttack(True)\n \n if self.joueur1.nbRessource >= 2000:\n print(\"FIN DE LA PARTIE LE JOUEUR 1 A GAGNER\")\n if self.joueur2.nbRessource >= 2000:\n print(\"FIN DE LA PARTIE LE JOUEUR 2 A GAGNER\") \n \n self.etat = \"En jeu\"", "def lancement_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n number_of_ships: int):\n # Victoire devient True quand un joueur détruit tout les bateaux adverse\n victoire = False\n # PHASE 1 PLACEMENT BATEAU\n\n positionner_bateau(joueur1, number_of_ships)\n positionner_bateau(joueur2, number_of_ships)\n\n # PHASE 2 VERIFICATION DE L ETAT DES BATEAUX\n while not victoire:\n\n if number_of_ships == 3:\n petite_partie(joueur1, joueur2, tableau_invisible_joueur1, tableau_invisible_joueur2)\n\n elif number_of_ships == 1:\n test_partie(joueur1, joueur2, tableau_invisible_joueur1, tableau_invisible_joueur2)\n\n elif number_of_ships == 5:\n grande_partie(joueur1,\n joueur2,\n tableau_invisible_joueur1, tableau_invisible_joueur2,\n )\n\n if verif_win(joueur2, number_of_ships):\n victoire = True\n print(\"le joueur 1 a gagné\")\n\n if verif_win(joueur1, number_of_ships):\n victoire = True\n print(\"le joueur 2 a gagné\")\n\n envoi_score(joueur1, joueur2)\n afficher_score(joueur1, joueur2)", "def bomb_planted(event_var):\r\n debug.write(\"[SourceRPG] Handling bomb_planted\", 1)\r\n if isFairForTeam(event_var['es_userteam']) or not int(unfairAdvantage):\r\n if es.isbot(event_var['userid']) and not int(botsGetXpOnEvents):\r\n return\r\n player = players[event_var['userid']]\r\n player.addXp( int(bombPlantXp) * player['level'], 'planting the bomb' )\r\n debug.write(\"[SourceRPG] bomb_planted handled\", 1)", "def theRoof(pos, blockTypeMain = wool , mainColor=wPurple, replaceGlass = wGlass):\n \n # try again the same trick to add the roof\n # Middle part\n for i in range(0,12,1):\n iy = i\n if i >= 6:\n iy=11-i\n #print i, iy\n mc.setBlocks(pos.x-4+i, pos.y+10+iy, pos.z+4,\n pos.x-4+i, pos.y+10+iy, pos.z+29, blockTypeMain, mainColor)\n\n # RIGHT SIDE of the house\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+5+ii,\n pos.x-13+ii, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+8,\n pos.x-11+ii, pos.y+9+ii, pos.z+26-ii, material)\n \n # and LEFT side of the house\n xAdjust = 21\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5-ii+xAdjust, pos.y+9+ii, pos.z+5+ii,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-7-ii+xAdjust, pos.y+9+ii, pos.z+8,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+26-ii, material)", "def main():\n BATTLEFIELD_CONF = {\n 'field_height': 10,\n 'field_width': 10,\n }\n\n FLEET_PROPERTIES = {\n 'submarine': {'length': 1, 'direction': None, },\n 'destroyer': {'length': 2, 'direction': 'horizontal', },\n 'cruiser': {'length': 3, 'direction': 'horizontal', },\n 'carrier': {'length': 4, 'direction': 'vertical', },\n }\n\n battle_field = FieldDesigner()\n\n battle_field.design_field(\n height=BATTLEFIELD_CONF['field_height'],\n width=BATTLEFIELD_CONF['field_width'],\n )\n\n submarine = BattleShip(\n ship_length=FLEET_PROPERTIES['submarine']['length'],\n direction=FLEET_PROPERTIES['submarine']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='submarine',\n )\n\n cruiser = BattleShip(\n ship_length=FLEET_PROPERTIES['cruiser']['length'],\n direction=FLEET_PROPERTIES['cruiser']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='cruiser',\n )\n\n destroyer = BattleShip(\n ship_length=FLEET_PROPERTIES['destroyer']['length'],\n direction=FLEET_PROPERTIES['destroyer']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='destroyer',\n )\n\n carrier = BattleShip(\n ship_length=FLEET_PROPERTIES['carrier']['length'],\n direction=FLEET_PROPERTIES['carrier']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='carrier',\n )\n\n submarine01 = submarine.place_ship()\n cruiser01 = cruiser.place_ship()\n destroyer01 = destroyer.place_ship()\n carrier01 = carrier.place_ship()\n\n fleet_inventory = Fleet(\n [\n submarine01,\n cruiser01,\n destroyer01,\n carrier01,\n ]\n )\n\n print(fleet_inventory)\n print(battle_field)", "def verteileKarten(anzahlSpieler):\n pass", "def winkel(self, *args, **kwargs):\n\t\t\n if kwargs.get('h'):\n print(\"\\nWinkel der sphärischen Geraden mit einer anderen sphärischen\")\n print(\"Geraden im gemeinsamen Schnittpunkt\\n\")\t\t\n print(\"Aufruf sgerade . winkel( sgerade1 )\\n\")\t\t \n print(\" sgerade sphärische Gerade\\n\")\n return\n\t\t\t\t\n try:\t\t\t\t\n if len(args) != 1:\n raise AglaError('sphärische Gerade angeben')\n gg = args[0]\t\t \n if not isinstance(gg, sGerade):\t\t\t \n raise AglaError(\"sphärische Gerade angeben\")\n except AglaError as e:\n print('agla:', str(e))\n return\t\n\t\t\n if gg == self:\t\t\n return 0\t\t\t\n P1 = self.pol\t\t\t\n P2 = gg.pol\n wi1 = P1[0].e.winkel(P2[0].e)\n wi2 = P1[0].e.winkel(P2[1].e)\n return wi1, wi2", "def Dragon_Blade(self):\t\t\n\t\tprint(self.name.Title() + \" Dragon blade!\")", "def mover_bm_derecha(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1]],\n [self.vertice_2[0] + self.velocidad ,\n self.vertice_2[1]],\n [self.vertice_1[0] + 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1] + 1],\n [self.vertice_4[0] + self.velocidad,\n self.vertice_4[1]],\n self.vertice_1)\n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x += self.velocidad * (self.x <= 655)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] + self.nueva_posicion_posible_parte_superior[1], self.casilla[1]]\n self.redefinir_vertices()", "def water_uptake_campbell(self, soil):\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n root_hydr_cond = np.zeros(soil.total_layers)\r\n shoot_hydr_cond = np.zeros(soil.total_layers)\r\n plant_hydr_cond = np.zeros(soil.total_layers)\r\n root_activity = np.zeros(soil.total_layers)\r\n root_cond_adj = np.zeros(soil.total_layers)\r\n tot_root_cond_adj = 0\r\n salinity_factor = np.zeros(soil.total_layers)\r\n soil_water_pot_avg = 0\r\n WAT_POT_FIELD_CAP = -33\r\n\r\n # Transpiration\r\n self.pot_transp = daily_ref_evap_transp * self.light_intercpt\r\n self.max_pot_transp = (self.campbell_max_daily_transp *\r\n self.light_intercpt)\r\n self.expect_transp = min(self.pot_transp, self.max_pot_transp) # mm/day\r\n\r\n # Plant hydraulic conductance (kg s m-4)\r\n tot_plant_hydr_cond = (self.max_pot_transp /\r\n (WAT_POT_FIELD_CAP -\r\n self.leaf_water_pot_stress_onset))\r\n # assumption of 2/3 of plant hydraulic conductance is from roots\r\n tot_root_hydr_cond = tot_plant_hydr_cond / 0.65\r\n # assumption of 1/3 of plant hydraulic conductivity is from shoots\r\n tot_shoot_hydr_cond = tot_plant_hydr_cond / 0.35\r\n\r\n for lyr in soil.layers:\r\n root_activity[lyr] = 1\r\n salinity_factor[lyr] = 1\r\n root_cond_adj[lyr] = (root_activity[lyr] * self.root_fraction[lyr]\r\n * salinity_factor[lyr])\r\n root_hydr_cond[lyr] = tot_root_hydr_cond * root_cond_adj[lyr]\r\n tot_root_cond_adj += root_cond_adj[lyr]\r\n\r\n # Root, shoot and plant hydraulic conductance(kg s m-4)\r\n for lyr in soil.layers:\r\n if root_cond_adj[lyr] > 0:\r\n shoot_hydr_cond[lyr] = (tot_shoot_hydr_cond *\r\n root_cond_adj[lyr] / tot_root_cond_adj)\r\n plant_hydr_cond[lyr] = (root_hydr_cond[lyr] *\r\n shoot_hydr_cond[lyr] /\r\n (root_hydr_cond[lyr] +\r\n shoot_hydr_cond[lyr]))\r\n else:\r\n plant_hydr_cond[lyr] = 0\r\n\r\n tot_root_hydr_cond *= tot_root_cond_adj\r\n tot_plant_hydr_cond = ((tot_root_hydr_cond * tot_shoot_hydr_cond) /\r\n (tot_root_hydr_cond + tot_shoot_hydr_cond))\r\n\r\n if tot_plant_hydr_cond > 0:\r\n for lyr in soil.layers:\r\n soil_water_pot_avg += (soil.water_potential[lyr] *\r\n root_cond_adj[lyr])\r\n leaf_water_pot = (soil_water_pot_avg - self.expect_transp /\r\n tot_plant_hydr_cond)\r\n if leaf_water_pot < self.leaf_water_pot_stress_onset:\r\n leaf_water_pot = ((tot_plant_hydr_cond * soil_water_pot_avg *\r\n (self.leaf_water_pot_stress_onset -\r\n self.leaf_water_pot_wilt_point) +\r\n self.leaf_water_pot_wilt_point *\r\n self.expect_transp)\r\n / (tot_plant_hydr_cond *\r\n (self.leaf_water_pot_stress_onset -\r\n self.leaf_water_pot_wilt_point) +\r\n self.expect_transp))\r\n if leaf_water_pot < self.leaf_water_pot_wilt_point:\r\n leaf_water_pot = self.leaf_water_pot_wilt_point\r\n self.att_transp = 0\r\n transp_ratio = self.att_transp / self.expect_transp\r\n\r\n elif leaf_water_pot < self.leaf_water_pot_stress_onset:\r\n self.att_transp = (self.expect_transp * (leaf_water_pot -\r\n self.leaf_water_pot_wilt_point) / (\r\n self.leaf_water_pot_stress_onset -\r\n self.leaf_water_pot_wilt_point))\r\n transp_ratio = self.att_transp / self.expect_transp\r\n\r\n else:\r\n self.att_transp = self.expect_transp\r\n transp_ratio = 1\r\n # crop water uptake (kg/m2/d = mm/d)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (plant_hydr_cond[lyr] *\r\n (soil.water_potential[lyr] -\r\n leaf_water_pot) * transp_ratio)\r\n if self.water_uptake[lyr] < 0:\r\n self.water_uptake[lyr] = 0\r\n self.crop_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.crop_transp\r\n self.cum_pot_transp += self.expect_transp\r\n self.transp_ratio = self.crop_transp / self.expect_transp", "def fit_house_in_diamond(houses_copy, batteries):\n\n # Output huis die overgebleven is\n output_missing_house = houses_copy[0].get_output()\n\n # Sorteer batterijen resterend capaciteit hoog > laag, en selecteer meest_resterende batterij\n batteries.sort(key=lambda battery: battery.get_remaining(), reverse=True)\n copy_batteries = deepcopy(batteries)\n copy_batteries.pop(0)\n battery_with_most_capacity = batteries[0]\n\n # Blijf loopen, en huizen efficient verwisselen, tot het huis in de batterij past\n for index, battery in enumerate(copy_batteries):\n second_most_battery_capacity = batteries[index + 1]\n capacity_battery_second = second_most_battery_capacity.get_remaining()\n\n # Kijk naar de huizen in de eerst batterij en pak hoogste output\n houses_first_battery = battery_with_most_capacity.get_houses()\n house_most_output = max(houses_first_battery, key=lambda house: house.get_output())\n house_first_index = houses_first_battery.index(house_most_output)\n output_house_most = house_most_output.get_output()\n\n # Kijk naar de huizen in de tweede batterij\n houses_second_battery = second_most_battery_capacity.get_houses()\n second_index_house = None\n second_house_output = 100\n least_remaining_battery_2 = 100\n\n # Pak het huis met een output dat ervoor zorgt dat missende huis past\n for index, house in enumerate(houses_second_battery):\n output_house = house.get_output()\n remaining_battery_2 = capacity_battery_second - (output_house_most - output_house)\n\n # Let hierop dat de eerste en tweede batterij de wissel aankunnen en dat er in één wissel genoeg ruimte is voor het missende huis\n if remaining_battery_2 < least_remaining_battery_2 and remaining_battery_2 >= 0:\n second_house_output = output_house\n second_index_house = index\n least_remaining_battery_2 = remaining_battery_2\n\n houses_first_battery.pop(house_first_index)\n second_house = houses_second_battery.pop(second_index_house)\n houses_first_battery.append(second_house)\n houses_second_battery.append(house_most_output)\n\n if output_missing_house < battery_with_most_capacity.get_remaining():\n houses_first_battery.append(houses_copy.pop(0))\n break" ]
[ "0.6290834", "0.6014581", "0.59381443", "0.5893181", "0.5864082", "0.5799145", "0.5793714", "0.5658449", "0.5597196", "0.55904484", "0.5589598", "0.5574663", "0.556646", "0.55553424", "0.5544154", "0.5526583", "0.5516136", "0.5506473", "0.5501297", "0.5497164", "0.54813373", "0.5476595", "0.5472306", "0.54705244", "0.5465644", "0.5464727", "0.54616815", "0.54614395", "0.544259", "0.54206085" ]
0.6271675
1
parsing the html node for template definitions
def parse_html_template_node(htmlNode): from BeautifulSoup import NavigableString node = HtmlTemplateDefinition() node.tag = htmlNode.name for name, value in htmlNode.attrs: if name != "define": node.attrs[name] = value else: node.defines = value for subnode in htmlNode: if isinstance(subnode, NavigableString): node.children.append(HtmlTextNode(str(subnode))) continue sub_def = parse_html_template_node(subnode) node.sub_definitions[sub_def.defines] = sub_def return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def parse(force=False):\r\n\tfrom htmltemplate import WWW_DIR, TEMPLATE_FILE, TEMPLATE_PY\r\n\t# pylint: disable=duplicate-string-formatting-argument\r\n\tprint(\"Parse html template\")\r\n\tlines = open(WWW_DIR+TEMPLATE_FILE).readlines()\r\n\tpyClassFile = open(TEMPLATE_PY,\"w\")\r\n\tpyClassFile.write(\"''' File automatically generated with template.html content '''\\nfrom htmltemplate.template import Template \\n\")\r\n\tstack = []\r\n\tfor line in lines:\r\n\t\tif \"<!--\" in line:\r\n\t\t\tspl = line.split(\"<!--\")\r\n\t\t\tif \":begin-->\" in line:\r\n\t\t\t\tclassname = spl[1].split(\":begin-->\")[0]\r\n\t\t\t\tstack.append([classname,\"\",\"\"])\r\n\t\t\telif \":end-->\" in line:\r\n\t\t\t\tclassname = spl[1].split(\":end-->\")[0]\r\n\t\t\t\tif classname != stack[-1][0]:\r\n\t\t\t\t\traise SyntaxError()\r\n\t\t\t\tclassname, text, comment = stack.pop()\r\n\t\t\t\tattributes, beginTag, endTag, beginFormat, endFormat = findall(r'\\%\\([A-Za-z_0-9]*\\)s',text)\r\n\r\n\t\t\t\tprint(\"Html template update %s\"%classname)\r\n\t\t\t\tclassattributes = set()\r\n\t\t\t\tfor attribute in attributes:\r\n\t\t\t\t\tclassattributes.add(attribute)\r\n\r\n\t\t\t\tcomment = comment.rstrip()\r\n\r\n\t\t\t\tpyClassFile.write(\"\"\"\\n%s\\n\"\"\"%comment)\r\n\r\n\t\t\t\tif beginTag != \"\":\r\n\t\t\t\t\tpyClassFile.write(\"\"\"begTag%s = b'''%s'''\\n\"\"\"%(classname,beginTag))\r\n\t\t\t\tif endTag != \"\":\r\n\t\t\t\t\tpyClassFile.write(\"\"\"endTag%s = b'''%s'''\\n\"\"\"%(classname,endTag))\r\n\t\t\t\tpyClassFile.write(\"\"\"def %s(*args, **params):\\n\"\"\"%classname)\r\n\r\n\t\t\t\tpyClassFile.write(\"\"\"\\tself = Template(*((\"%s\",) + args), **params)\\n\\n\"\"\"%classname)\r\n\r\n\t\t\t\tpyClassFile.write(\"\"\"\\tdef getBegin(self):\\n\"\"\")\r\n\t\t\t\tif beginFormat == \"\":\r\n\t\t\t\t\tif beginTag != \"\":\r\n\t\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\tglobal begTag%s\\n\"\"\"%classname)\r\n\t\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\treturn begTag%s\\n\"\"\"%(classname))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\treturn b''\\n\"\"\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\tglobal begTag%s\\n\"\"\"%classname)\r\n\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\treturn begTag%s%s(%s)\\n\"\"\"%(classname, \"\\x25\",beginFormat[:-1]))\r\n\t\t\t\tpyClassFile.write(\"\"\"\\tself.getBegin = getBegin\\n\\n\"\"\")\r\n\r\n\t\t\t\tpyClassFile.write(\"\"\"\\tdef getEnd(self):\\n\"\"\")\r\n\t\t\t\tif endFormat == \"\":\r\n\t\t\t\t\tif endTag != \"\":\r\n\t\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\tglobal endTag%s\\n\"\"\"%classname)\r\n\t\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\treturn endTag%s\\n\"\"\"%(classname))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\treturn b''\\n\"\"\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\tglobal endTag%s\\n\"\"\"%classname)\r\n\t\t\t\t\tpyClassFile.write(\"\"\"\\t\\treturn endTag%s%s(%s)\\n\"\"\"%(classname, \"\\x25\", endFormat[:-1]))\r\n\t\t\t\tpyClassFile.write(\"\"\"\\tself.getEnd = getEnd\\n\\n\"\"\")\r\n\r\n\t\t\t\tfor attribute in classattributes:\r\n\t\t\t\t\tif attribute in [\"pattern\"]:\r\n\t\t\t\t\t\tpyClassFile.write('\\tself.{:<12} = params.get(\"{}\", b\"*\")\\n'.format(attribute,attribute))\r\n\t\t\t\t\telif attribute in [\"id\",\"name\"]:\r\n\t\t\t\t\t\tpyClassFile.write('\\tself.{:<12} = params.get(\"{}\", b\"%d\"%id(self))\\n'.format(attribute,attribute))\r\n\t\t\t\t\telif attribute in [\"disabled\",\"active\"]:\r\n\t\t\t\t\t\tpyClassFile.write('\\tself.{:<12} = params.get(\"{}\", False)\\n'.format(attribute,attribute))\r\n\t\t\t\t\telif attribute in [\"checked\"]:\r\n\t\t\t\t\t\tpyClassFile.write('\\tself.{:<12} = params.get(\"{}\", True)\\n'.format(attribute,attribute))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpyClassFile.write('\\tself.{:<12} = params.get(\"{}\", b\"\")\\n'.format(attribute,attribute))\r\n\t\t\t\tpyClassFile.write('\\treturn self\\n')\r\n\t\t\telse:\r\n\t\t\t\traise SyntaxError()\r\n\t\telse:\r\n\t\t\tif line.strip() != \"\":\r\n\t\t\t\tif len(stack) >= 1:\r\n\t\t\t\t\tstack[-1][1] += line.strip()\r\n\t\t\t\t\tstack[-1][2] += \"# \" +line.lstrip()\r\n\r\n\tpyClassFile.close()", "def html_template_file(self):\n pass", "def node():\n return render_template('nodes.html')", "def __init__(self, template):\n\n self.template = template\n self.parsed_template = {}", "def _parse(\n self, source: str, name: t.Optional[str], filename: t.Optional[str]\n ) -> nodes.Template:\n return Parser(self, source, name, filename).parse()", "def _parse(self, template, fpos=0):\r\n # blank out comments\r\n # (So that its content does not collide with other syntax, and\r\n # because removing them completely would falsify the character-\r\n # position (\"match.start()\") of error-messages)\r\n template = self._reComment.sub(lambda match: self._comment_start+\" \"*len(match.group(1))+match.group(2), template)\r\n\r\n # init parser\r\n parsetree = []\r\n curr = 0 # current position (= end of previous block)\r\n block_type = None # block type: if,for,macro,raw,...\r\n block_indent = None # None: single-line, >=0: multi-line\r\n\r\n # find blocks\r\n for match in self._reBlock.finditer(template):\r\n start = match.start()\r\n # process template-part before this block\r\n if start > curr:\r\n self._parse_sub(parsetree, template[curr:start], fpos)\r\n\r\n # analyze block syntax (incl. error-checking and -messages)\r\n keyword = None\r\n block = match.groupdict()\r\n pos__ = fpos + start # shortcut\r\n if block[\"sKeyw\"] is not None: # single-line block tag\r\n block_indent = None\r\n keyword = block[\"sKeyw\"]\r\n param = block[\"sParam\"]\r\n content = block[\"sContent\"]\r\n if block[\"sSpace\"]: # restore spaces before start-tag\r\n if len(parsetree) > 0 and parsetree[-1][0] == \"str\":\r\n parsetree[-1] = (\"str\", parsetree[-1][1] + block[\"sSpace\"])\r\n else:\r\n parsetree.append((\"str\", block[\"sSpace\"]))\r\n pos_p = fpos + match.start(\"sParam\") # shortcuts\r\n pos_c = fpos + match.start(\"sContent\")\r\n elif block[\"mKeyw\"] is not None: # multi-line block tag\r\n block_indent = len(block[\"indent\"])\r\n keyword = block[\"mKeyw\"]\r\n param = block[\"mParam\"]\r\n content = block[\"mContent\"]\r\n pos_p = fpos + match.start(\"mParam\")\r\n pos_c = fpos + match.start(\"mContent\")\r\n ignored = block[\"mIgnored\"].strip()\r\n if ignored and ignored != self._comment_start:\r\n raise TemplateSyntaxError(\"No code allowed after block-tag.\", self._errpos(fpos+match.start(\"mIgnored\")))\r\n elif block[\"mEnd\"] is not None: # multi-line block end\r\n if block_type is None:\r\n raise TemplateSyntaxError(\"No block to end here/invalid indent.\", self._errpos(pos__) )\r\n if block_indent != len(block[\"mEnd\"]):\r\n raise TemplateSyntaxError(\"Invalid indent for end-tag.\", self._errpos(pos__) )\r\n ignored = block[\"meIgnored\"].strip()\r\n if ignored and ignored != self._comment_start:\r\n raise TemplateSyntaxError(\"No code allowed after end-tag.\", self._errpos(fpos+match.start(\"meIgnored\")))\r\n block_type = None\r\n elif block[\"sEnd\"] is not None: # single-line block end\r\n if block_type is None:\r\n raise TemplateSyntaxError(\"No block to end here/invalid indent.\", self._errpos(pos__))\r\n if block_indent is not None:\r\n raise TemplateSyntaxError(\"Invalid indent for end-tag.\", self._errpos(pos__))\r\n block_type = None\r\n else:\r\n raise TemplateException(\"FATAL: Block regexp error. Please contact the author. (%s)\" % match.group())\r\n\r\n # analyze block content (mainly error-checking and -messages)\r\n if keyword:\r\n keyword = keyword.lower()\r\n if 'for' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'for'\r\n cond = self._reForParam.match(param)\r\n if cond is None:\r\n raise TemplateSyntaxError(\"Invalid 'for ...' at '%s'.\" %(param), self._errpos(pos_p))\r\n names = tuple(n.strip() for n in cond.group(\"names\").split(\",\"))\r\n self._testexpr(cond.group(\"iter\"), pos_p+cond.start(\"iter\"))\r\n parsetree.append((\"for\", names, cond.group(\"iter\"), self._parse(content, pos_c)))\r\n elif 'if' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if not param:\r\n raise TemplateSyntaxError(\"Missing condition for 'if' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'if'\r\n self._testexpr(param, pos_p)\r\n parsetree.append((\"if\", param, self._parse(content, pos_c)))\r\n elif 'elif' == keyword:\r\n if block_type != 'if':\r\n raise TemplateSyntaxError(\"'elif' may only appear after 'if' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if not param:\r\n raise TemplateSyntaxError(\"Missing condition for 'elif' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n self._testexpr(param, pos_p)\r\n parsetree.append((\"elif\", param, self._parse(content, pos_c)))\r\n elif 'else' == keyword:\r\n if block_type not in ('if', 'for'):\r\n raise TemplateSyntaxError(\"'else' may only appear after 'if' of 'for' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'else' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n parsetree.append((\"else\", self._parse(content, pos_c)))\r\n elif 'macro' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'macro'\r\n # make sure param is \"\\w+\" (instead of \".+\")\r\n if not param:\r\n raise TemplateSyntaxError(\"Missing name for 'macro' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if not self._reMacroParam.match(param):\r\n raise TemplateSyntaxError(\"Invalid name for 'macro' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n #remove last newline\r\n if len(content) > 0 and content[-1] == '\\n':\r\n content = content[:-1]\r\n if len(content) > 0 and content[-1] == '\\r':\r\n content = content[:-1]\r\n parsetree.append((\"macro\", param, self._parse(content, pos_c)))\r\n\r\n # parser-commands\r\n elif 'raw' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'raw' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'raw'\r\n parsetree.append((\"str\", content))\r\n elif 'include' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'include' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'include'\r\n try:\r\n u = self._load(content.strip())\r\n except Exception,err:\r\n raise TemplateIncludeError(err, self._errpos(pos__))\r\n self._includestack.append((content.strip(), u)) # current filename/template for error-msg.\r\n p = self._parse(u)\r\n self._includestack.pop()\r\n parsetree.extend(p)\r\n elif 'set_escape' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'set_escape' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'set_escape'\r\n esc = content.strip().upper()\r\n if esc not in ESCAPE_SUPPORTED:\r\n raise TemplateSyntaxError(\"Unsupported escape '%s'.\" %(esc), self._errpos(pos__))\r\n self.escape = ESCAPE_SUPPORTED[esc]\r\n else:\r\n raise TemplateSyntaxError(\"Invalid keyword '%s'.\" %(keyword), self._errpos(pos__))\r\n curr = match.end()\r\n\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing end-tag.\", self._errpos(pos__))\r\n\r\n if len(template) > curr: # process template-part after last block\r\n self._parse_sub(parsetree, template[curr:], fpos)\r\n\r\n return parsetree", "def test_read_namespaced_template(self):\n pass", "def parse_template(data, template):\n img_html = \"\"\"<div class=\"thumb-wrap\"><div class=\"thumb-holder\"></div><a href=\"{{URL}}\" target=\"_top\"><div class=\"thumb-img\" style=\"background-image:url('{{IMG}}');\"></div></a></div>\"\"\"\n template = template.replace('{{URL}}', data['link'].replace('http:','https:'))\n template = template.replace('{{URLX}}', data['link'])\n template = template.replace('{{TITLE}}', data['title'])\n #template = template.replace('{{BLURB}}', data['summary'])\n img_html = img_html.replace('{{URL}}', data['link'].replace('http:','https:'))\n if hasattr(data, 'tags') and len(data['tags']) > 0:\n template = template.replace('{{SECTION}}', data['tags'][0]['term'])\n else:\n template = template.replace('<h2><a href=\"{{URL}}\" target=\"_top\">{{SECTION}}</a></h2>', '')\n if hasattr(data, 'media_content') and len(data['media_content']) > 0:\n template = template.replace('{{IMG}}', '%s?w=150' % data['media_content'][0]['url'].replace('http:','https:'))\n else:\n template = template.replace(img_html, '')\n\n return template", "def __init__(self, template_content, section_type):\n self.helpers = Documentation()\n self.template_content = template_content\n self.section_type = section_type\n self.stype = self.section_type\n self.children = []", "def define_content(self, html):\n self.html_template(html, lang=\"en\")\n self.add_language(\"en\")", "def handlebars_template(parser, token):\n \n template_library = global_handlebars_template_library()\n \n try:\n parts = token.split_contents()\n context_variable = None\n \n if len(parts) == 2:\n tag_name, template_name = parts\n elif len(parts) == 3:\n tag_name, template_name, context_variable = parts\n else:\n raise ValueError\n except ValueError:\n raise template.TemplateSyntaxError(\"%r tag invalid arguments\" % token.contents.split())\n \n for s in [ '\"', '\"' ]:\n if (template_name.startswith(s) and template_name.endswith(s)):\n template_name = template_name[len(s):-len(s)]\n break\n \n if (template_name not in template_library.templates):\n raise template.TemplateDoesNotExist(\"%r '%s' not found\" % (template_name, \n token.contents.split()[0]));\n \n return CustomTemplateNode(template_name, template_library, context_variable)", "def render_template(self, template_path, vars=None):\n\n content = {'plain':'', 'html':''}\n\n with open(template_path, 'r') as h:\n template = Template(h.read())\n html = template.render(vars)\n\n soup = BeautifulSoup(html, \"html.parser\")\n\n # Generating plain text source from html source\n # The style tag and .link_button are removed\n for style in soup.select('style,.link_button'):\n style.extract()\n\n # Only keep the text inside the tags\n plain = ''.join(soup.findAll(text=True)).strip()\n\n content['html'] = html\n content['plain'] = plain\n\n return content", "def run(self):\n\n for line in self.template:\n match = self._regex.match(line)\n if match:\n self._process(match)\n return self.parsed_template", "def clean_html_template(self):\r\n template = self.cleaned_data[\"html_template\"]\r\n self._validate_template(template)\r\n return template", "def parseSearchHtml(self):\n pass", "def parseSearchHtml(self):\n pass", "def htAccessTmplContent( self, pars, directory ):\n\n return None", "def _load_template(name: str) -> str:\n html_tpl = _read_text(name + '.html')\n import re\n\n # line breaks are not needed\n html_tpl = html_tpl.replace('\\n', '')\n # remove comments\n html_tpl = re.sub(r'<!--(.|\\s|\\n)*?-->', '', html_tpl)\n # remove space around special characters\n html_tpl = re.sub(r'\\s*([><])\\s*', r'\\1', html_tpl)\n return html_tpl", "def read_html_template(resume_template_file):\n\n # CREATE VARIABLE RESUME OUTPUT TO STORE HTML CODE\n resume_output = []\n\n # opens resume template html file\n with open(resume_template_file, \"r\") as fin:\n template = list(fin.readlines())\n\n # debugging\n # print(\"template:\", template)\n\n # strips the trailing spaces from each of the lines in template\n for line in template:\n line = line.replace('\\n', '')\n # and saves to the variable resume output\n resume_output.append(line)\n\n # debugging\n # print(\"resume output after read_html:\", resume_output)\n\n # returns output code\n return resume_output", "def _parse_template(self):\n with open(\"./common/sagemaker_rl/orchestrator/cloudformation.yaml\") as template_fileobj:\n template_data = template_fileobj.read()\n self.cf_client.validate_template(TemplateBody=template_data)\n return template_data", "def get_wrapper_template(self, declaration):\n pass", "def test_register_template(self):\n pass", "def _extract_html(self, problemtree): # private\r\n if not isinstance(problemtree.tag, basestring):\r\n # Comment and ProcessingInstruction nodes are not Elements,\r\n # and we're ok leaving those behind.\r\n # BTW: etree gives us no good way to distinguish these things\r\n # other than to examine .tag to see if it's a string. :(\r\n return\r\n\r\n if (problemtree.tag == 'script' and problemtree.get('type')\r\n and 'javascript' in problemtree.get('type')):\r\n # leave javascript intact.\r\n return deepcopy(problemtree)\r\n\r\n if problemtree.tag in html_problem_semantics:\r\n return\r\n\r\n problemid = problemtree.get('id') # my ID\r\n\r\n if problemtree.tag in inputtypes.registry.registered_tags():\r\n # If this is an inputtype subtree, let it render itself.\r\n status = \"unsubmitted\"\r\n msg = ''\r\n hint = ''\r\n hintmode = None\r\n input_id = problemtree.get('id')\r\n if problemid in self.correct_map:\r\n pid = input_id\r\n status = self.correct_map.get_correctness(pid)\r\n msg = self.correct_map.get_msg(pid)\r\n hint = self.correct_map.get_hint(pid)\r\n hintmode = self.correct_map.get_hintmode(pid)\r\n\r\n value = \"\"\r\n if self.student_answers and problemid in self.student_answers:\r\n value = self.student_answers[problemid]\r\n\r\n if input_id not in self.input_state:\r\n self.input_state[input_id] = {}\r\n\r\n # do the rendering\r\n state = {\r\n 'value': value,\r\n 'status': status,\r\n 'id': input_id,\r\n 'input_state': self.input_state[input_id],\r\n 'feedback': {\r\n 'message': msg,\r\n 'hint': hint,\r\n 'hintmode': hintmode,\r\n }\r\n }\r\n\r\n input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)\r\n # save the input type so that we can make ajax calls on it if we need to\r\n self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state)\r\n return self.inputs[input_id].get_html()\r\n\r\n # let each Response render itself\r\n if problemtree in self.responders:\r\n overall_msg = self.correct_map.get_overall_message()\r\n return self.responders[problemtree].render_html(\r\n self._extract_html, response_msg=overall_msg\r\n )\r\n\r\n # let each custom renderer render itself:\r\n if problemtree.tag in customrender.registry.registered_tags():\r\n renderer_class = customrender.registry.get_class_for_tag(problemtree.tag)\r\n renderer = renderer_class(self.capa_system, problemtree)\r\n return renderer.get_html()\r\n\r\n # otherwise, render children recursively, and copy over attributes\r\n tree = etree.Element(problemtree.tag)\r\n for item in problemtree:\r\n item_xhtml = self._extract_html(item)\r\n if item_xhtml is not None:\r\n tree.append(item_xhtml)\r\n\r\n if tree.tag in html_transforms:\r\n tree.tag = html_transforms[problemtree.tag]['tag']\r\n else:\r\n # copy attributes over if not innocufying\r\n for (key, value) in problemtree.items():\r\n tree.set(key, value)\r\n\r\n tree.text = problemtree.text\r\n tree.tail = problemtree.tail\r\n\r\n return tree", "def from_html(self, content):\r\n pass", "def test_get_root_html2(self):\n pass", "def parse_template(template):\n field_name = None\n field_value = []\n\n for line in template.strip().split('\\n') + ['end:']:\n if line.startswith('#'):\n continue\n match = RE_TEMPLATE_FIELD_LINE.match(line)\n if match:\n if field_name is not None:\n yield (field_name, '\\n'.join(field_value).strip())\n elif len(field_value) > 0:\n logging.warning('Ignoring lines: %r', field_value)\n\n field_name = match.group(1)\n field_value = [match.group(2)]\n else:\n field_value.append(line)", "def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)", "def test_create_namespaced_processed_template(self):\n pass" ]
[ "0.6144188", "0.59667295", "0.5910988", "0.58417255", "0.5832843", "0.57946736", "0.5791633", "0.5513455", "0.5368274", "0.5321239", "0.5256728", "0.5246223", "0.52121377", "0.5194012", "0.51886094", "0.51714516", "0.5155424", "0.5155424", "0.5152998", "0.51181227", "0.5117904", "0.5112046", "0.5086778", "0.50792724", "0.5071816", "0.5060292", "0.50503427", "0.50254405", "0.5014689", "0.5007561" ]
0.72280455
0
Convert the number to the minimal representation. This strips the number of any valid separators and removes surrounding whitespace.
def compact(number): return clean(number, ' -').strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compact(number):\n return clean(number, ' -./,').strip()", "def clean(number):\n digits = [c for c in number if c.isdigit()]\n if len(digits) == 11 and digits[0] == \"1\":\n return ''.join(digits[1:])\n elif len(digits) != 10:\n return \"0000000000\"\n else:\n return ''.join(digits)", "def compact(number):\n return clean(number, ' -.').upper().strip()", "def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def compact(number):\n number = clean(number).strip().replace(' ', '-').split('-')\n if len(number) == 4:\n # zero pad the different sections if they are found\n lengths = (2, 4, 7, 3)\n return ''.join(n.zfill(l) for n, l in zip(number, lengths))\n else:\n # otherwise zero pad the account type\n number = ''.join(number)\n return number[:13] + number[13:].zfill(3)", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def compact(number):\n number = clean(number, ' ').upper().strip()\n for prefix in ('УНП', u'УНП', 'UNP', u'UNP'):\n if type(number) == type(prefix) and number.startswith(prefix):\n number = number[len(prefix):]\n # Replace Cyrillic letters with Latin letters\n cleaned = ''.join(_cyrillic_to_latin.get(x, x) for x in to_unicode(number))\n if type(cleaned) != type(number): # pragma: no cover (Python2 only)\n cleaned = cleaned.encode('utf-8')\n return cleaned", "def normalise_number(number, number_length):\n number = ''.join(c for c in number if c.isnumeric())\n if len(number) == number_length:\n return number\n return None", "def cleanInteger(number):\n \n number = str(number).replace(' ', '')\n \n test = number\n for i in range(10):\n test = test.replace(str(i), '')\n \n if test:\n return None\n \n return number", "def compact_number(value: int) -> str:\n value = float('{:.3g}'.format(value))\n magnitude = 0\n while abs(value) >= 1000:\n magnitude += 1\n value /= 1000.0\n return '{}{}'.format(\n '{:f}'.format(value).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude]\n )", "def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def leading_number(val):\n n=\"\"\n for c in val:\n if c not in \"0123456789.\":\n break\n n = n + c\n return n", "def keep_digits(x: str) -> str:\n return \"\".join([c for c in x if c.isdigit()]).strip()", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def strip_non_digits(x: str) -> str:\n exp = re.compile(\"[^\\d]+\")\n return re.sub(exp, \"\", x)", "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = \"+86-10-\" + new_number[-8:]\n\treturn new_number", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def intspace(value):\n # http://softwaremaniacs.org/forum/django/19392/\n if value is None:\n return None\n orig = force_str(value)\n new = re.sub(r\"^(-?\\d+)(\\d{3})\", r\"\\g<1> \\g<2>\", orig)\n return new if orig == new else intspace(new)", "def strip_numbers(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"-?\\d+\")\n return re.sub(regex, \"\", text)", "def _format_num(self, value) -> typing.Any:\n if type(value) == str and ((len(value.strip()) == 0) or (len(value.strip()) > 0 and not str_util.is_number(value))):\n if not self.required or self.allow_none:\n return None\n raise self.make_error(\"invalid\", input=value)\n return self.num_type(value)", "def _format_num(self, value) -> typing.Any:\n if type(value) == str and ((len(value.strip()) == 0) or (len(value.strip()) > 0 and not str_util.is_number(value))):\n if not self.required or self.allow_none:\n return None\n raise self.make_error(\"invalid\", input=value)\n return self.num_type(value)", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def intspace(value):\n orig = force_unicode(value)\n new = re.sub(\"^(-?\\d+)(\\d{3})\", '\\g<1> \\g<2>', orig)\n if orig == new:\n return new\n else:\n return intspace(new)" ]
[ "0.7245501", "0.656221", "0.64973676", "0.64160055", "0.6292922", "0.6228728", "0.6026842", "0.6006934", "0.5927505", "0.5873222", "0.5849065", "0.5830982", "0.5821903", "0.58107376", "0.58034885", "0.5800797", "0.5761181", "0.5749941", "0.5686296", "0.56762457", "0.5647582", "0.56390023", "0.56390023", "0.56390023", "0.56321967", "0.56294036", "0.5601005", "0.5601005", "0.55616635", "0.5558975" ]
0.70902634
1
Calculate the checksum. The checksum is only used for the 9 digits of the number and the result can either be 0 or 42.
def checksum(number): return sum(i * int(n) for i, n in enumerate(reversed(number), 1)) % 11
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __calculate_checksum(cls, number) -> str:\n # TODO in future stages, this function will use the Luhn algorithm to create checksum\n return str(sum(int(num) for num in str(number)) % 10)", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def checksum(n):\n\n # Compute the sum of the non-check digits.\n s = sum(luhn_digits(n * 10))\n\n # Multiply by 9.\n result = s * 9\n\n # The units digit is the check digit\n check_digit = result % 10\n\n m = int(str(n) + str(check_digit))\n assert(verify(m))\n\n return check_digit", "def _get_checksum(code: str) -> int:\r\n total = 0\r\n\r\n for index, digit in enumerate(code):\r\n digit = int(digit)\r\n if (index + 1) % 2 != 0:\r\n digit *= 2\r\n if digit > 9:\r\n digit -= 9\r\n total += digit\r\n\r\n checksum = 10 - total % 10\r\n\r\n return checksum if checksum != 10 else 0", "def checksum(number):\n c = 0\n for i, item in enumerate(reversed(str(number))):\n c = dTable[c][pTable[i % 8][int(item)]]\n return c", "def calculate_checksum(self, data):\n\t\tdata = data[2:] # Ignore start tokens ($$)\n\t\tcrc16 = crcmod.predefined.mkCrcFun('crc-ccitt-false')\n\t\treturn hex(crc16(data))[2:].upper().zfill(4)", "def checksum(n):\n return zlib.crc32(n.to_bytes(int(math.log2(n)), \"big\"))", "def _checksum(cls, buff):\n checksum = 0\n\n while True:\n data = buff.read(cls.checksum_struct.size)\n\n if len(data) == 0:\n break\n if len(data) < 4:\n pad_count = len(data) % 4\n data = data + \"\\x00\" * pad_count\n raise ValueError(\"Checksum data length is not a multiple of 4. %d\" % len(data))\n print(data)\n c1, c2 = cls.checksum_struct.unpack(data)\n checksum += c1 + c2\n print(checksum, checksum % 65536) # novatel 32 bit crc\n return checksum % 65536", "def calc_checksum(self, segment: bytes) -> int:\n if len(segment) % 2 == 1: # padding\n segment += b'\\x00'\n strarr = array.array('H', segment) # split into 16-bit substrings\n cksum = sum(strarr) # sum\n cksum = (cksum >> 16) + (cksum & 0xffff) # carry\n cksum += (cksum >> 16) # carry in case of spill\n cksum = ~cksum & 0xffff # 1's complement\n return cksum", "def luhn_checksum(card_number):\n \n # Convert number into a list so we can edit each index value\n num = [int(x) for x in str(card_number)]\n \n # Step 1: multiply each odd index by 2 \n for i in range(0, 15, 2): # len(num) was falling one short so resorted to using int\n num[i] *= 2\n \n # Step 2: subtract 9 from any numbers greater than 9\n for i in range(0, 15):\n if num[i] > 9:\n num[i] -= 9\n else:\n continue\n \n # Step 3: total the 15 digits \n total = 0\n for i in range(0, 15):\n total += num[i]\n \n # Step 4: multiply total by 9 and take the last digit which is our checksum\n total_2 = total * 9\n string_total_2 = str(total_2)\n checksum = string_total_2[-1]\n \n return checksum", "def calculate_checksum(cls, barcode: Union[str, \"EAN13\", \"EAN8\", \"EAN14\"]) -> int:\n\n if isinstance(barcode, cls):\n barcode = barcode.code\n elif isinstance(barcode, str):\n pass\n else:\n raise TypeError(f\"Can't accept type {type(barcode)}\")\n\n if len(barcode) >= cls.BARCODE_LENGTH:\n barcode = barcode[:cls.BARCODE_LENGTH]\n # Here there is no check digit so it's calculated\n digits = list(map(int, list(barcode)))\n\n # Get even and odd indeces of the digits\n weighted_odd = digits[1::2]\n weighted_even = digits[::2]\n\n # Calculate the checksum\n checksum = sum(weighted_odd) * cls.WEIGHTS.ODD + sum(weighted_even) * cls.WEIGHTS.EVEN\n if checksum % 10 == 0:\n return 0\n\n # Find the closest multiple of 10, that is equal to\n # or higher than the checksum and return the difference\n closest10 = ((checksum // 10) * 10) + 10\n return closest10 % checksum\n\n raise IncorrectFormat(f\"Barcode should be at least {cls.BARCODE_LENGTH} digits long.\")", "def checksum(value):\n ch = _checksum([ord(c) for c in value])\n ch = ((ch % 0x0d) & 7) | ((ch/7) << 2)\n return _checksum(sum((_le_encode(ch-9*i) for i in xrange(20)), []))", "def checksum(data):\r\n # group the data by word, little-endian\r\n data_list = []\r\n for t in range(10):\r\n data_list.append( data[2*t] + (data[2*t+1]<<8) )\r\n \r\n # compute the checksum on 32 bits\r\n chk32 = 0\r\n for d in data_list:\r\n chk32 = (chk32 << 1) + d\r\n\r\n # return a value wrapped around on 15bits, and truncated to still fit into 15 bits\r\n checksum = (chk32 & 0x7FFF) + ( chk32 >> 15 ) # wrap around to fit into 15 bits\r\n checksum = checksum & 0x7FFF # truncate to 15 bits\r\n return int( checksum )", "def checksum(code):\n return sum(code) % 256", "def _get_checksum(self, arg):", "def luhn_checksum(card_number):\n def _double_and_sum_digits(d):\n s = d * 2\n result = s if s < 10 else (s - 9)\n return result\n\n mapped_digits = [\n d if index % 2 == 0 else _double_and_sum_digits(d)\n for index, d\n in enumerate(reversed(digits_of(card_number)))\n ]\n\n checksum = sum(mapped_digits) % 10\n return checksum", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def doChecksum(line):\n return sum(map(int, filter(lambda c: c >= '0' and c <= '9', line[:-1].replace('-','1')))) % 10", "def _checksum(value):\n a, b, c = 0x9e3779b9, 0x9e3779b9, 0xe6359a60\n\n index = 0\n while index <= len(value)-12:\n a, b, c = _mix(\n a + _le_decode(value[index:index+4]),\n b + _le_decode(value[index+4:index+8]),\n c + _le_decode(value[index+8:index+12]))\n index += 12\n\n a, b, c = _mix(\n a + _le_decode(value[index:index+4]),\n b + _le_decode(value[index+4:index+8]),\n c + (_le_decode(value[index+8:])<<8) + len(value))\n\n return c", "def ahv_checksum(value):\n return str(\n (10 - sum((3 - 2 * (i % 2)) * int(n) for i, n in enumerate(reversed(value)))) % 10\n )", "def lv_checksum(value):\n multipliers = (1, 6, 3, 7, 9, 10, 5, 8, 4, 2)\n\n check = sum(mult * int(c) for mult, c in zip(multipliers, value))\n return ((1 - check) % 11) % 10", "def checksum (upc):\n\n # check type of input\n # raise TypeError if not string\n\n # xxxxxxxxxxx x\n # check length of string\n # raise ValueError if not 12\n\n # convert string to array\n # generate checksum using the first 11 digits provided\n # check against the the twelfth digit\n # result of first 11 digits must be consistent with the value of the 12th digit\n # value must be number\n\n # return True if they are equal, False otherwise\n num = []\n #\"123456\" --> \"1\" \"2\" \"3\" \"4\" \"5\" \"6\" --> num = [1,2,3,4,5,6] --> num[0] = 1, num[3] = 4\n if type(upc) is str:\n for i in range(0, len(upc)):\n try:\n num.append(int(upc[i]))\n except ValueError:\n raise ValueError(\"Not correct length\")\n # if upc[i] is not number checksum('1b2')\n else:\n raise TypeError(\"Invalid type passed as parameter\")\n #raiseError\n\n if len(num) != 12:\n raise ValueError(\"Not correct length\")\n\n\n odd, even = num[::2], num[1::2]\n result = 0\n for i in range(0,len(odd)):\n result = result + odd[i]\n\n result *= 3\n\n # This is to add even numbered digits\n for i in range(0, (len(even)-1)):\n result = result + even[i]\n\n result %= 10\n if result != 0:\n result = 10 - result\n\n if result == num[11]:\n return True\n\n return False", "def calcChecksum(self, data, length):\n checksum = 0\n\n for i in range(length//2):\n checksum = checksum ^ (data[i*2] | (data[i*2+1] << 8)) #xor-ing\n return 0xffff & (checksum ^ 0xffff) #inverting", "def calculate_checksum(self, message):\n s = 0\n for i in range(0, len(message)-1, 2):\n w = (message[i]) + (message[i + 1] << 8) << 8\n s = ((w + s) & 0xffff) + ((w + s) >> 16)\n return s", "def ean_checksum(eancode):\n if len(eancode) <> 13:\n return -1\n oddsum=0\n evensum=0\n total=0\n eanvalue=eancode\n reversevalue = eanvalue[::-1]\n finalean=reversevalue[1:]\n\n for i in range(len(finalean)):\n if i % 2 == 0:\n oddsum += int(finalean[i])\n else:\n evensum += int(finalean[i])\n total=(oddsum * 3) + evensum\n\n check = int(10 - math.ceil(total % 10.0)) %10\n return check", "def luhn_checksum(num: str) -> str:\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n if i % 2 == 0:\n sx *= 2\n if sx > 9:\n sx -= 9\n check += sx\n return str(check * 9 % 10)", "def create_checksum(self, fifteen_digit):\n duplicate_odd_digits = [int(fifteen_digit[i - 1]) * 2 if i % 2 else\n int(fifteen_digit[i - 1]) for i in range(1, 16)]\n subtract_nine = [digit - 9 if digit > 9 else digit for digit in duplicate_odd_digits]\n sum_up = sum(subtract_nine)\n return (10 - sum_up % 10) % 10", "def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF", "def calculate_checksum(self, message):\n return sum([int(x, 16) if type(x) == str else x for x in message]) & 0xFF", "def get_calculated_checksum(code):\n code.reverse()\n calculated_checksum = 0\n for index, number in enumerate(code):\n # +1 because index starts from 0\n calculated_checksum += int(number) * (index + 1)\n return calculated_checksum" ]
[ "0.78587705", "0.7552482", "0.7338071", "0.7285181", "0.72572434", "0.7091845", "0.6993215", "0.6969281", "0.68769675", "0.6869387", "0.686647", "0.686087", "0.6852495", "0.68352187", "0.6782381", "0.6770472", "0.67436624", "0.67336917", "0.6673327", "0.66170275", "0.66162884", "0.65972716", "0.6588952", "0.6528197", "0.6513477", "0.65078455", "0.6500301", "0.6498502", "0.64860046", "0.6464687" ]
0.76590395
1
Check if the number is valid. This checks the length and check digit.
def validate(number): number = compact(number) if not isdigits(number): raise InvalidFormat() if len(number) != 10: raise InvalidLength() if checksum(number) != 0: raise InvalidChecksum() return number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0", "def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number", "def validate(number):\n number = compact(number)\n if len(number) != 11:\n raise InvalidLength()\n if not isdigits(number):\n raise InvalidFormat()\n if number.startswith('0'):\n raise InvalidFormat()\n # In the first 10 digits exactly one digit must be repeated two or\n # three times and other digits can appear only once.\n counter = defaultdict(int)\n for n in number[:10]:\n counter[n] += 1\n counts = [c for c in counter.values() if c > 1]\n if len(counts) != 1 or counts[0] not in (2, 3):\n raise InvalidFormat()\n return mod_11_10.validate(number)", "def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())", "def validate(number):\n number = compact(number)\n if not isdigits(number):\n raise InvalidFormat()\n if len(number) != 10:\n raise InvalidLength()\n # check if birth date is valid\n get_birth_date(number)\n # TODO: check that the birth date is not in the future\n # check the check digit\n if calc_check_digit(number[:-1]) != number[-1]:\n raise InvalidChecksum()\n return number", "def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number", "def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True", "def checknum(val):\n\n if len(val) == 0:\n return False\n\n for i in range(len(val)):\n if not val[i].isdigit():\n return False\n\n return True", "def phone_number_validator(phone_number):\n if len(phone_number) != 10:\n return False\n if phone_number[0] == '0':\n return False\n try:\n int(phone_number)\n except ValueError:\n return False\n return True", "def validate(number):\n number = compact(number)\n if not isdigits(number):\n raise InvalidFormat()\n if len(number) != 16:\n raise InvalidLength()\n if _calc_checksum(number) != 0:\n raise InvalidChecksum()\n i = info(number)\n if 'bank' not in i or 'branch' not in i:\n raise InvalidComponent()\n return number", "def validate_phoneNumber(number):\n return isinstance(number, int)", "def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double", "def is_valid(self):\n return phonenumbers.is_valid_number(self)", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def validate(number):\n # numbers only:\n try:\n long(number)\n except:\n return 0\n\n # must be at least 13 digits:\n if len(str(number)) < 13:\n return 0\n\n # can't be all zeros, even though this passes the check below\n if long(number) == 0:\n return 0\n \n ### check the digits: ###########\n # see http://www.beachnet.com/~hstiles/cardtype.html\n\n # digits, from right to left...\n digits = list(str(number))\n digits.reverse()\n\n doubles = \"\"\n sum = 0\n # Step 1: Double the value of alternate digits of the primary\n # account number beginning with the second digit from the right\n # (the first right--hand digit is the check digit.)\n for i in range(len(digits)):\n if i % 2:\n # note that this does NOT fire for the rightmost digit,\n # because 0 % 2 is 0... :)\n doubles = doubles + str(int(digits[i]) * 2)\n\n # Step 2: Add the individual digits comprising the products\n # obtained in Step 1 to each of the unaffected digits in the\n # original number.\n else:\n sum = sum + int(digits[i])\n\n for ch in doubles:\n sum = sum + int(ch)\n\n # Step 3: The total obtained in Step 2 must be a number ending in\n # zero (30, 40, 50, etc.) for the account number to be validated.\n if (sum % 10) != 0:\n return 0\n\n return 1", "def value_error(number):\n try:\n nbr = int(number)\n except ValueError:\n print(\"You can't sum letters, please write a number\")\n verification = False\n else:\n verification = True\n return verification", "def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True", "def verify(n):\n\n # Take the sum of all digits.\n sum_of_digits = sum(luhn_digits(n))\n\n # The number is valid iff the sum of digits modulo 10 is equal to 0\n return sum_of_digits % 10 == 0", "def validatePhoneNumber(self):\n ## Declaring a Flag to control a while loop\n phone_number_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_ok:\n ## Asking for a phone number and checkig to see if it is 10 digits\n if self.phone_number.isdigit():\n if len(self.phone_number) == 10:\n phone_number_ok = True\n return True\n else:\n print(\"Please Enter a 10 digit phone number.\")\n return False\n \n else:\n print(\"You have enetered an invalid phone number. Please try again.\")\n return False", "def valid_checkdigit(ccnum):\n\n sum = 0\n num_digits = len(ccnum)\n oddeven = num_digits & 1\n\n for count in range(0, num_digits):\n digit = int(ccnum[count])\n\n if not ((count & 1) ^ oddeven):\n digit = digit * 2\n if digit > 9:\n digit = digit - 9\n\n sum = sum + digit\n\n return ((sum % 10) == 0)", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def invalid_phone_number(phonenumber):\n if all(digit.isdigit() for digit in phonenumber) \\\n and len(phonenumber) <= 10 and phonenumber.startswith(\"0\"):\n return False\n return True", "def validate_number(number):\n modified = False\n number = number.replace(\"(\", \"\").replace(\")\", \"\").replace(\"-\", \"\").replace(\" \", \"\").replace(\"+\", \"\")\n if len(number) == 11 and number.isdigit() and not number[1] in \"01\":\n number = \"+\" + number\n modified = True\n elif len(number) == 10 and number.isdigit() and not number[0] in \"01\":\n number = \"+1\" + number\n modified = True\n return number, modified", "def validate_account_number(num, should_exist=True):\n if len(num) != 8:\n return False\n elif num[0] == '0':\n return False\n else:\n if should_exist:\n return account_number_exists(num)\n else:\n return not account_number_exists(num)", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValueError:\n return False" ]
[ "0.7818345", "0.7770616", "0.76976967", "0.75029325", "0.73723716", "0.73674816", "0.7297837", "0.72790253", "0.7276956", "0.71201795", "0.7106368", "0.7062896", "0.7016437", "0.70082414", "0.69429743", "0.6932591", "0.69264835", "0.68842614", "0.687007", "0.6866722", "0.68570805", "0.68570805", "0.68570805", "0.68570805", "0.68570805", "0.68570805", "0.6834646", "0.6824191", "0.6728828", "0.6711897" ]
0.78588283
0
Sample an ordering given scores
def sample( self, scores: torch.Tensor ): perturbed = torch.log(scores) + self.gumbel_noise.sample((len(scores),)) return torch.argsort(-perturbed.detach())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_score_ordered(scores, idx):\t\n\treturn [x[1][idx] for x in sorted(scores.items())]", "def test_scoring(self):\n scores = score_words(['foo', 'far', 'has', 'car'])\n expected = [(7, 'far'), (6, 'car'), (5, 'has'), (4 , 'foo')]\n self.assertEqual(scores, expected)", "def eval_ordering(ordering, prefs):\n score=0\n cnt=len(ordering)\n for i in range(cnt-1):\n for j in range(i+1,cnt):\n e1,e2=ordering[i],ordering[j]\n if e1<e2:\n if (e1,e2) in prefs:\n score+=prefs[(e1,e2)]\n else:\n if (e2,e1) in prefs:\n score-=prefs[(e2,e1)]\n return score", "def sorted_scores(scores):\n\treturn sorted(scores, key=lambda sailor: (total_score(sailor), sailor[1][0]))", "def kwiksort(dict_prefs, list_els, runs=10, random_seed=None):\n best_score=float(\"-infinity\")\n if random_seed is not None:\n np.random.seed(random_seed)\n for run in range(runs):\n ordering=_kwiksort(list_els,dict_prefs)\n score=eval_ordering(ordering,dict_prefs)\n if score>best_score:\n best_score=score\n best_order=ordering\n return best_order", "def quaternary_tournament(population, scores, next_gen_number, random_seed=42):\n\n np.random.seed(random_seed)\n\n indices = list(range(len(population)))\n indices_array = np.array(indices)\n\n selected = []\n for i in range(next_gen_number):\n best_score = math.inf\n picked = None\n selected_indices = np.random.choice(indices_array, size=4)\n\n for indx in selected_indices:\n if scores[indx] < best_score:\n best_score = scores[indx]\n picked = population[indx]\n\n selected.append(picked)\n\n return selected", "def sample(self, batch_size):\n # get the sum of priorities\n priority_sum = self.sum_tree.get_sum_priority()\n # sample priorities \n priorities_to_sample = np.random.uniform(0, priority_sum, batch_size)\n # get the indexes of replays\n sample_idxes = [self.sum_tree.get(x) for x in priorities_to_sample]\n # fetch the transitions and prepare the batch for training\n random_sample = [self.queue[x] for x in sample_idxes]\n # zip\n zipped = [ torch.from_numpy( np.asarray(arr).astype(np.float32) ).float() for arr in zip(*random_sample) ]\n sample = Transition( zipped[0], zipped[1].unsqueeze_(-1).long(), zipped[2].unsqueeze_(-1), zipped[3], zipped[4].unsqueeze_(-1).byte() )\n return sample, sample_idxes", "def score_samples(self, X):\n ...", "def sort_population_by_score(input_population: Population) -> None:\n input_population.sort(key=score_individual, reverse=True)", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def sample_interest_points(method, scores, N):\n assert method in ['prob','topk', 'random']\n n = scores.size(0)\n if n < N:\n choice = np.random.choice(n, N)\n else:\n if method == 'random':\n choice = np.random.permutation(n)[:N]\n elif method =='topk':\n choice = torch.topk(scores, N, dim=0)[1]\n elif method =='prob':\n idx = np.arange(n)\n probs = (scores / scores.sum()).numpy().flatten()\n choice = np.random.choice(idx, size= N, replace=False, p=probs)\n \n return choice", "def populate_questions(scores):\n \n print(\"populate_questions, scores: \", str(scores))\n\n try:\n return random.sample(range(len(quiz.list_fragen)), TOTAL_ROUNDS*len(scores))\n except ValueError:\n print(\"List of questions is too short.\")", "def test_samples_high_weight_elements_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertCountEqual([\"a\", \"b\"], s.elements.keys())", "def score(priority_list, totalItemCount, itemUsageDict, threshold):\n scored = list()\n for item in priority_list:\n scored.append((item, itemUsageDict[item][\"winRatio\"] * (itemUsageDict[item][\"totalCount\"]/ totalItemCount) * threshold))\n return scored", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def test_result_group_sorts_by_first_metric(self, result_group, index, score):\n assert result_group.results[index].metrics.score == score", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def test_score():\n print(\"Tests for 'score' function\")\n test_suite = TestSuite()\n\n # Testing with empty hand\n result = score([])\n test_suite.run_test(result, 0, '0')\n # Testing with non-empty hand\n result = score([1, 3])\n test_suite.run_test(result, 3, '1')\n # Testing with non-empty hand\n result = score([1, 3, 1, 1])\n test_suite.run_test(result, 3, '2')\n # Testing with non-empty hand\n result = score([4, 3, 4, 3, 3])\n test_suite.run_test(result, 9, '3')\n\n # Show report\n test_suite.report_results()", "def ranked_items(self, threshold=None):\n threshold = threshold or len(self)\n log.debug(\"Called Scoresheet.ranked_items(): threshold=%d\", threshold)\n\n # Sort first by score, then by key. This way, we always get the same\n # ranking, even in case of ties.\n # We use the tmp structure because it is much faster than\n # itemgetter(1, 0).\n tmp = ((score, key) for key, score in self.items())\n ranked_data = sorted(tmp, reverse=True)\n\n for score, key in ranked_data[:threshold]:\n yield key, score", "def test_search_with_scoring(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring\n score = ScriptScore(\"s = 0 + doc['bar'].value\")\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)", "def tiles_by_score(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)\n return sorted_list", "def _get_indices(scores: np.ndarray, shuffle_prop: float) -> np.ndarray:\n return _shuffle_subset(scores.argsort().argsort(), shuffle_prop)", "def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]", "def order_scores(doctors):\n\n # return doctors.sort(key=operator.methodcaller('get_review_score'))\n # print doctors\n print\n print\n ret_docs = sorted(doctors, key=operator.itemgetter('review_score'), reverse=True)\n # ret_docs = doctors.sort(key=lambda k: k['review_score'])\n # print ret_docs\n return ret_docs", "def sample_from_probabilities(probabilities, topn=ALPHASIZE):\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0\n p = p / np.sum(p)\n return np.random.choice(ALPHASIZE, 1, p=p)[0]", "def top_students(mongo_collection):\n all_items = mongo_collection.find({})\n for item in all_items:\n count = 0\n new_topics = item\n for sta in item.get(\"topics\"):\n count += sta.get(\"score\")\n averageScore = count/len(item.get(\"topics\"))\n\n myquery = {\"name\": item.get(\"name\")}\n newvalues = {\"$set\": {\"averageScore\": averageScore}}\n mongo_collection.update_many(myquery, newvalues)\n\n order = mongo_collection.find().sort(\"averageScore\", DESCENDING)\n\n return order", "def sample(self, existing_results, num_samples):\n new_samples = set()\n existing_samples = list(existing_results.keys())\n existing_samples.sort()\n\n if self.max is not None and self.max not in existing_results and len(new_samples) < num_samples:\n new_samples.add(self.max)\n elif self.max is None and len(new_samples) < num_samples:\n new_samples.add(max(existing_samples) * 2)\n\n if self.min is not None and self.min not in existing_results and len(new_samples) < num_samples:\n new_samples.add(self.min)\n elif self.min is None and len(new_samples) < num_samples:\n new_samples.add(min(existing_samples) * 2)\n\n if (self.max is not None and self.min is not None and (self.max - self.min) / 2 not in existing_results and\n len(new_samples) < num_samples):\n new_samples.add(0.5 * (self.max - self.min))\n\n if len(existing_results) > 2 and len(new_samples) < num_samples:\n gradients = [(existing_results[existing_samples[i]] - existing_results[existing_samples[i-1]]) /\n (existing_samples[i] - existing_samples[i-1]) for i in range(1, len(existing_samples))]\n\n candidate_samples = []\n for i in range(1, len(existing_samples)):\n candidate_sample = 0.5 * (existing_samples[i] - existing_samples[i-1])\n gradient = gradients[i-1]\n if i > 2:\n score +=\n\n # Sort the candidate samples by score\n candidate_samples.sort(key=operator.itemgetter(1), reverse=True)\n for i in range(0, min(len(candidate_samples), ))\n\n return new_samples", "def cy_process_recommendations(entities, scores, n=10):\n r = c_funcs.cy_aggregate_scores(entities, scores, n)\n heapq.heapify(r)\n return {'result': [{\"item\": k, \"score\": v} for k, v in heapq.nlargest(\n n, r, key= lambda x: x[1])]}" ]
[ "0.61846995", "0.5977011", "0.59168404", "0.5782286", "0.5709815", "0.5659634", "0.56449974", "0.56339675", "0.5622803", "0.55706906", "0.55613416", "0.5512437", "0.5457819", "0.5451847", "0.5430736", "0.542836", "0.5408105", "0.54074764", "0.540372", "0.5390767", "0.53904086", "0.5381767", "0.5381371", "0.53750366", "0.533618", "0.5321506", "0.529438", "0.5288626", "0.5266595", "0.52556676" ]
0.63623494
0
Compute log probability given scores and an action (a permutation). The formula uses the equivalence of sorting with Gumbel noise and PlackettLuce model (See Yellot 1977)
def log_prob(self, scores : torch.Tensor, permutations): s = torch.log(select_indices(scores, permutations)) n = len(scores) p = self.upto if self.upto is not None else n - 1 return -sum( torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0)) for k in range(p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def log_prob(self, state, time_step, rec_states_p, action, return_entropy=True):\n action_index = int(tf.argmax(action, axis=-1).numpy())\n #print(\"action to get prob\")\n #print(action)\n\n probs, _ = self.get_action(state, time_step, rec_states_p)\n log_prob = tf.math.log(probs[0][action_index])\n log_prob = tf.expand_dims(log_prob, -1)\n #print(\"action index\", action_index)\n #print(\"original probs\")\n #print(probs)\n #print(\"logs\")\n #print(log_prob)\n if return_entropy:\n entropy = -tf.reduce_sum(probs * tf.math.log(probs), axis=-1)\n entropy = tf.expand_dims(entropy, -1)\n return log_prob, entropy\n else: return log_prob", "def logprob(self, actions, action_logits):\n neg_log_prob = F.nll_loss(action_logits, actions, reduction='none')\n return -neg_log_prob", "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def logprob(self, action_sample, policy_params):\n return self.head.logprob(action_sample, policy_params)", "def logp(self, args):\n mean, stddev, action = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n logp = dist.log_prob(action)\n return logp", "def log_prob(self):", "def get_log_prob(self, state: rlt.FeatureData, squashed_action: torch.Tensor):\n if self.use_l2_normalization:\n # TODO: calculate log_prob for l2 normalization\n # https://math.stackexchange.com/questions/3120506/on-the-distribution-of-a-normalized-gaussian-vector\n # http://proceedings.mlr.press/v100/mazoure20a/mazoure20a.pdf\n pass\n\n loc, scale_log = self._get_loc_and_scale_log(state)\n raw_action = torch.atanh(squashed_action)\n r = (raw_action - loc) / scale_log.exp()\n log_prob = self._normal_log_prob(r, scale_log)\n squash_correction = self._squash_correction(squashed_action)\n if SummaryWriterContext._global_step % 1000 == 0:\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/loc\", loc.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/scale_log\", scale_log.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/log_prob\", log_prob.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/squash_correction\", squash_correction.detach().cpu()\n )\n return torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1)", "def compute_policy_log_probs(available_actions, policy, actions):\n def compute_log_probs(probs, labels):\n # Select arbitrary element for unused arguments (log probs will be masked)\n labels = tf.maximum(labels, 0)\n indices = tf.stack([tf.range(tf.shape(labels)[0]), labels], axis=1)\n # TODO tf.log should suffice\n return safe_log(tf.gather_nd(probs, indices))\n\n\n fn_id, arg_ids = actions\n fn_pi, arg_pis = policy\n # TODO: this should be unneccessary\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n fn_log_prob = compute_log_probs(fn_pi, fn_id)\n tf.summary.scalar('log_prob/fn', tf.reduce_mean(fn_log_prob))\n\n log_prob = fn_log_prob\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n arg_log_prob = compute_log_probs(arg_pi, arg_id)\n arg_log_prob *= tf.to_float(tf.not_equal(arg_id, -1))\n log_prob += arg_log_prob\n tf.summary.scalar('log_prob/arg/%s' % arg_type.name,\n tf.reduce_mean(arg_log_prob))\n\n return log_prob", "def get_log_prob(self, pi: Categorical, actions: Tensor):\n return pi.log_prob(actions)", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def get_log_prob(self, pi: Normal, actions: Tensor):\n return pi.log_prob(actions).sum(axis=-1)", "def score(self, beam, logprobs):\n l_term = (((5 + len(beam.next_ys)) ** self.alpha) /\n ((5 + 1) ** self.alpha))\n return (logprobs / l_term)", "def get_log_prob(self, states, actions):\n\n mean, log_std = self.__network.forward(tr.from_numpy(states).float())\n\n actions = tr.from_numpy(actions).float()\n log_prob = - (actions - mean) ** 2\n log_prob /= (2.0 * tr.exp(log_std) ** 2 + 1e-10)\n log_prob -= log_std + 0.5 * self.__output_dim * np.log(2 * np.pi)\n return log_prob.sum(1, keepdim=True)", "def log_probability(self, sequence):\n sequence = self._transform(sequence)\n\n T = len(sequence)\n\n if T > 0 and sequence[0][_TAG]:\n last_state = sequence[0][_TAG]\n p = self._priors.logprob(last_state) + self._output_logprob(\n last_state, sequence[0][_TEXT]\n )\n for t in range(1, T):\n state = sequence[t][_TAG]\n p += self._transitions[last_state].logprob(\n state\n ) + self._output_logprob(state, sequence[t][_TEXT])\n last_state = state\n return p\n else:\n alpha = self._forward_probability(sequence)\n p = logsumexp2(alpha[T - 1])\n return p", "def log_probability(self, samples):\n pass", "def score_samples(self, x):\n n = x.shape[0]\n logp = np.log(self.mix_weight)\n logpz = np.zeros((n, self.ncomponents))\n\n for i in range(self.ncomponents):\n logpz[:, i] = logp[i] + multivariate_normal.logpdf(x, self.cond_proba.mean[i], self.cond_proba.cov[i])\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll", "def get_action_prob(self, game, probabilistic=True):\n for _ in range(Config.numMCTSSims):\n self.search(game)\n\n state = game.string_representation()\n counts = [\n self.Nsa.get((state, action), 0) for action in range(game.get_action_size())\n ]\n\n if probabilistic:\n if sum(counts) != 0:\n return [x / sum(counts) for x in counts]\n # TODO: understand this case (no valid actions)\n\n probs = [0] * len(counts)\n probs[np.argmax(counts)] = 1\n return probs", "def score_samples(self, x):\n n = x.shape[0]\n logp = np.log(self.mix_weight)\n logpz = np.zeros((n, self.ncomponents))\n\n for i in range(self.ncomponents):\n logpz[:, i] = logp[i] + multivariate_student.logpdf(x, self.cond_proba.mean[i], self.cond_proba.cov[i],\n self.cond_proba.df)\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll", "def compute_unclipped_logrho(behavior_logits, target_logits, actions):\n target_log_prob = log_prob(actions, target_logits, reduction=\"none\")\n behavior_log_prob = log_prob(actions, behavior_logits, reduction=\"none\")\n\n return target_log_prob - behavior_log_prob", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])", "def to_logprobs(x):\n N = x.size(0)\n sid_bins = x.size(1)//2\n H, W = x.size()[-2:]\n A = x[:, ::2, :, :].clone()\n B = x[:, 1::2, :, :].clone()\n\n A = A.view(N, 1, -1)\n B = B.view(N, 1, -1)\n\n C = torch.cat((A, B), dim=1)\n # C = torch.clamp(C, min=1e-8, max=1e8) # prevent nans\n log_ord_c = F.log_softmax(C, dim=1)\n log_ord_c0 = log_ord_c[:, 0, :].clone()\n log_ord_c0 = log_ord_c0.view(-1, sid_bins, H, W)\n log_ord_c1 = log_ord_c[:, 1, :].clone()\n log_ord_c1 = log_ord_c1.view(-1, sid_bins, H, W)\n return log_ord_c0, log_ord_c1", "def log_prob(actions, logits, reduction=\"none\"):\n # Equivalent to tf.sparse_softmax_cross_entropy_with_logits.\n\n loss = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n # logits: shape [BATCH_SIZE, CLASS_SIZE]\n # actions: shape [BATCH_SIZE]\n neg_log_prob = loss(logits, torch.squeeze(actions, dim=-1))\n\n log_prob = -neg_log_prob\n\n return log_prob", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def _graph_fn_get_distribution_log_probs(self, key, parameters, actions):\n # For bounded continuous action spaces, need to unscale (0.0 to 1.0 for beta distribution).\n if self.bounded_action_space[key] is True:\n actions = (actions - self.action_space.low) / (self.action_space.high - self.action_space.low)\n return self.distributions[key].log_prob(parameters, actions)", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def pred_prob(hp, ss, y):\n K = len(ss['counts'])\n N = sum(ss['counts'])\n assert y >= 0 and y <= K\n if y < K:\n return log((ss['counts'][y] - hp['d']) / (hp['alpha'] + N))\n elif y == K:\n return log((hp['alpha'] + hp['d'] * K) / (hp['alpha'] + N))", "def perplexity(model, data):\n probs = [model.get_prob(word) for word in data] # get word's probability\n probs_log = [\n log2(word_prob) if word_prob > 0 else log2(float_info.epsilon)\n for word_prob in probs\n ] # log the probabilities. using epsilon when the probability is 0\n sum_probs = reduce(lambda a, b: a + b, probs_log) # sum all\n power_val = (-1 * sum_probs) / len(probs_log) # divide by n and neg all\n return 2 ** power_val", "def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p", "def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]" ]
[ "0.724304", "0.694372", "0.68872494", "0.67296517", "0.6673799", "0.6590309", "0.64809585", "0.64702743", "0.645265", "0.6351982", "0.6325196", "0.63182306", "0.6232857", "0.6152958", "0.6137351", "0.61292297", "0.6126552", "0.6091621", "0.6089503", "0.6088007", "0.60647804", "0.60639226", "0.6037442", "0.6014798", "0.5999243", "0.5999202", "0.59936225", "0.5985501", "0.5971621", "0.5951126" ]
0.7008553
1
Add a view to the collection.
def add_view(self, view): # Add to views self._views.append(view) # If app was provided in constructor, register view with Flask app if self.app is not None: self.app.register_blueprint(view.create_blueprint(self)) if view.is_menu: self._add_view_to_menu(view)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)", "def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)", "def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return", "def add_view( *args, **kwargs ):", "def _add_view_to_menu(self, view):\n self._add_menu_item(MenuView(view.name, view), view.category)", "def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)", "def add_views(self, *args):\n for view in args:\n self.add_view(view)", "def view_add(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> JsonResponse:\n # Get the workflow element\n if workflow.nrows == 0:\n messages.error(\n request,\n _('Cannot add a view to a workflow without data'))\n return JsonResponse({'html_redirect': ''})\n\n # Form to read/process data\n form = ViewAddForm(request.POST or None, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_add.html',\n )", "def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views[dataView] = None", "def addViewToDb(self,name):\n\t\tsql = \"INSERT INTO hudson_views(viewname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[name])", "def _add_element(self, element: Element, add_relationships: bool) -> ElementView:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n view = self.find_element_view(element=element)\n if view is None:\n view = ElementView(element=element)\n self.element_views.add(view)\n if add_relationships:\n self._add_relationships(element)\n return view", "def add_route(self, pattern: str, view: Callable) -> None:\n route = Route(pattern)\n self._routes[route] = view", "def view_add(self):\n is_admin = self.request.view_name == \"admin-add\"\n if self.schema_add is None:\n raise HTTPNotFound()\n kwargs = self.request.json\n jsonschema.validate(instance=kwargs, schema=self.schema_add)\n child_view = self.add(**kwargs)\n if is_admin:\n return child_view.admin_tile\n else:\n return child_view.tile", "def register_view(self, viewfunc, url_rule=None) :\n\n\t\tviewid = View.parse_id(viewfunc, self.settings.VIEW_ROOT)\n\t\t\n\t\tif viewid not in self.views :\n\t\t\t# Add view if not exists\n\t\t\tv = View(\n\t\t\t\tid = viewid,\n\t\t\t\tviewfunc = viewfunc,\n\t\t\t\turl_rule = url_rule,\n\t\t\t)\n\t\t\tself.views[viewid] = v\n\n\t\telse :\n\t\t\t# Update view if exists\n\t\t\tv = self.views[viewid]\n\t\t\tv.viewfunc = viewfunc\n\n\t\t\tif url_rule is not None :\n\t\t\t\tv.url_rule = url_rule\n\n\t\treturn v", "def addCollection():\n return render_template(\"addCollection.html\")", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def views(self, views):\n\n self._views = views", "def add(cls, doc):\n cls.get_collection().add(doc)", "def createViews(views):\n ...", "def _add_perspective_item(self, window, item):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(item.relative_to) > 0:\r\n relative_to = window.get_view_by_id(item.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # fixme: This seems a bit ugly, having to reach back up to the\r\n # window to get the view. Maybe its not that bad?\r\n view = window.get_view_by_id(item.id)\r\n if view is not None:\r\n # fixme: This is probably not the ideal way to sync view traits\r\n # and perspective_item traits.\r\n view.style_hint = item.style_hint\r\n # Add the view to the window.\r\n window.add_view(\r\n view, item.position, relative_to, (item.width, item.height)\r\n )\r\n\r\n else:\r\n # The reason that we don't just barf here is that a perspective\r\n # might use views from multiple plugins, and we probably want to\r\n # continue even if one or two of them aren't present.\r\n #\r\n # fixme: This is worth keeping an eye on though. If we end up with\r\n # a strict mode that throws exceptions early and often for\r\n # developers, then this might be a good place to throw one ;^)\r\n logger.error('missing view for perspective item <%s>' % item.id)\r\n\r\n return", "def add_views(admin, db):\n admin.add_view(PartAdmin(Part, db.session, endpoint='admin_parts', url='parts'))\n admin.add_view(PartComponentAdmin(PartComponent, db.session, endpoint='admin_part_components', url='part_components'))\n pass", "def addCollectionNode():\n return render_template(\"addCollectionNode.html\")", "def add_view(self, schema, create=True):\n if not constants.NAME_RX.match(schema[\"name\"]):\n raise ValueError(\"invalid view name\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"tables\"]):\n raise ValueError(\"name is already in use for a table\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"views\"]):\n raise ValueError(\"name is already in use for a view\")\n if create:\n sql = 'CREATE VIEW \"%s\" AS %s' % (\n schema[\"name\"],\n dbshare.query.get_sql_statement(schema[\"query\"]),\n )\n self.dbcnx.execute(sql)\n cursor = self.dbcnx.cursor()\n try:\n sql = 'PRAGMA table_info(\"%s\")' % schema[\"name\"]\n cursor.execute(sql)\n except sqlite3.Error: # Invalid view\n sql = 'DROP VIEW \"%s\"' % schema[\"name\"]\n cursor.execute(sql)\n raise ValueError(\"invalid view; maybe non-existent column?\")\n # Source names considering quotes and disregarding AS part, if any.\n schema[\"sources\"] = dbshare.query.get_from_sources(schema[\"query\"][\"from\"])\n schema[\"columns\"] = [{\"name\": row[1], \"type\": row[2]} for row in cursor]\n sql = \"INSERT INTO %s (name, schema) VALUES (?,?)\" % constants.VIEWS\n with self.dbcnx:\n self.dbcnx.execute(sql, (schema[\"name\"], json.dumps(schema)))\n self.db[\"views\"][schema[\"name\"]] = schema", "def _addView(self, win, fn=None, noName=\"\", addNext=False, indexes=None):\n raise RuntimeError('Not implemented')", "def EventContentMissionExcelAddViewFlag(builder, ViewFlag):\n return AddViewFlag(builder, ViewFlag)", "def add(self):\n pass", "def add_route(self, view, path, exact=True):\n if path[0] != '/':\n path = '/' + path\n for route in self._routes:\n assert path != route.path, 'Cannot use the same path twice'\n self._routes.append(Route(view=view, path=path, exact=exact))", "def _add_level_to_view(self, level):\n key = Level.key(self.sorting)(level)\n index = bisect.bisect(self.view_keys, key)\n self.view_keys[index:index] = [key]\n\n # If sorting is reversed, the key list and view are in different orders\n if(self.sorting & Sorting.Reversed):\n index = len(self.view_list) - index\n\n\n self.list_lock.acquire()\n\n self.beginInsertRows(QModelIndex(), index, index)\n self.view_list[index:index] = [level]\n\n self.endInsertRows()\n\n self.list_lock.release()", "def _connectView(self):\n self._view.select_asset = self.select_asset\n self._view.add_assets = self.add_assets\n self._view.remove_assets = self.remove_assets\n self._view.update_assets = self.update_assets\n self._view.commit = self.commit", "def add_route(config, route, view, route_name=None, renderer='json'):\n route_name = route_name or view.__name__\n config.add_route(route_name, route)\n config.add_view(view, route_name=route_name, renderer=renderer)" ]
[ "0.75795144", "0.74328953", "0.716204", "0.69258523", "0.6875929", "0.6744332", "0.65339804", "0.6369164", "0.6368626", "0.6259857", "0.6074848", "0.5948421", "0.5892814", "0.5855832", "0.5814092", "0.5697377", "0.56804794", "0.5673495", "0.56560206", "0.5632909", "0.56310827", "0.5619223", "0.561429", "0.5613625", "0.5578423", "0.55358773", "0.5531409", "0.55180466", "0.5500716", "0.5499962" ]
0.7520189
1
Add one or more views to the collection.
def add_views(self, *args): for view in args: self.add_view(view)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def views(self, views):\n\n self._views = views", "def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)", "def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)", "def add_view( *args, **kwargs ):", "def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)", "def createViews(views):\n ...", "def create_views(self):\n # Extract view objects\n customer_views = CustomerViews().views\n admin_views = AdminViews().views\n\n # Add customer views/routes\n for view in customer_views:\n view_obj = customer_views.get(view)\n endpoint = view_obj.endpoint\n view_name = view_obj.name\n self.add_url_rule(endpoint, view_func=view_obj.as_view(view_name))\n \n # Add admin views/routes\n for view in admin_views:\n view_obj = admin_views.get(view)\n endpoint = view_obj.endpoint\n view_name = view_obj.name\n self.add_url_rule(endpoint, view_func=view_obj.as_view(view_name))", "def increment_views(self):\n self.views += 1\n self.save()", "def add_views(admin, db):\n admin.add_view(PartAdmin(Part, db.session, endpoint='admin_parts', url='parts'))\n admin.add_view(PartComponentAdmin(PartComponent, db.session, endpoint='admin_part_components', url='part_components'))\n pass", "def _add_all(self, window):\r\n\r\n for view in window.views:\r\n if view.visible:\r\n self._add_view(window, view)\r\n\r\n return", "def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return", "def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views[dataView] = None", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def _add_view_to_menu(self, view):\n self._add_menu_item(MenuView(view.name, view), view.category)", "def addViewToDb(self,name):\n\t\tsql = \"INSERT INTO hudson_views(viewname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[name])", "def addCollection():\n return render_template(\"addCollection.html\")", "def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)", "def view_add(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> JsonResponse:\n # Get the workflow element\n if workflow.nrows == 0:\n messages.error(\n request,\n _('Cannot add a view to a workflow without data'))\n return JsonResponse({'html_redirect': ''})\n\n # Form to read/process data\n form = ViewAddForm(request.POST or None, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_add.html',\n )", "def view_list(self, view_list):\n\n self._view_list = view_list", "def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)", "def add_views(apps, schema_editor):\n connection = schema_editor.connection\n with connection.cursor() as cur:\n for view in reversed(OCP_ALL_VIEWS):\n LOG.info(f\"\"\"Dropping materialized view \"{view}\" with cascade\"\"\")\n cur.execute(f\"\"\"DROP MATERIALIZED VIEW \"{view}\" CASCADE;\"\"\")\n\n for view in OCP_ALL_VIEWS:\n view_sql = pkgutil.get_data(\"reporting.provider.all.openshift\", f\"sql/views/{view}.sql\")\n view_sql = view_sql.decode(\"utf-8\")\n LOG.info(f\"\"\"Creating materialized view \"{view}\"...\"\"\")\n with connection.cursor() as cursor:\n cursor.execute(view_sql)", "def count_view(self):\n self.count_views += 1\n self.save(update_fields=['count_views'])", "def views(self):\r\n return Views(self)", "def add_view_permissions(sender, **kwargs):\n # for each of our content types\n for content_type in ContentType.objects.all():\n # build our permission slug\n codename = \"view_%s\" % content_type.model\n\n # if it doesn't exist..\n if not Permission.objects.filter(content_type=content_type, codename=codename):\n # add it\n Permission.objects.create(content_type=content_type,\n codename=codename,\n name=\"Can view %s\" % content_type.name)\n # print \"Added view permission for %s\" % content_type.name", "def register_view():\n\n icon_set_id = request.args.get(\"iconSetId\")\n ip_address = request.remote_addr\n ip_address_anonymized = anonymize_ip(ip_address)\n\n # Add IP address to corresponding icon set\n if icon_set_id not in view_addresses:\n view_addresses[icon_set_id] = [ip_address_anonymized]\n view_counts[icon_set_id] = 1\n elif ip_address_anonymized not in view_addresses[icon_set_id]:\n view_addresses[icon_set_id].append(ip_address_anonymized)\n view_counts[icon_set_id] += 1\n else:\n return \"\"\n\n with open(path_views, \"w+\") as view_file:\n # Write updated object to file\n json.dump(view_addresses, view_file)\n\n return \"\"", "def _(event):\n\n N = len(self.view_model.results)\n coll = self.shared_state[\"active_collection\"]\n self.view_model.status_textcontrol.text = (\n f\"adding {N} records to {coll.name}...\"\n )\n count = 0\n for record in self.view_model.results:\n try:\n coll.add_document(record_id=record[\"record_id\"])\n count += 1\n except Exception:\n pass\n self.view_model.status_textcontrol.text = (\n f\"added {count} records to {coll.name}.\"\n )", "def add_view_permissions(sender, **kwargs):\n from django.contrib.auth.models import Permission\n from django.contrib.contenttypes.models import ContentType\n\n for content_type in ContentType.objects.filter(app_label=sender.label):\n codename = \"view_%s\" % content_type.model\n\n perm, created = Permission.objects.get_or_create(\n content_type=content_type, codename=codename, defaults={\n 'name': 'Can view %s' % content_type.name,\n }\n )\n\n if created:\n sys.stdout.write(\n 'Added view permission for %s' % content_type.name +\n '\\n'\n )", "def increase_view_count(self):\n try:\n self.view_counter += 1\n self.save(update_fields=['view_counter'])\n except:\n warnings.warn(\"Unable to increase view count for advert {}\".format(self.pk))", "def view_add(self):\n is_admin = self.request.view_name == \"admin-add\"\n if self.schema_add is None:\n raise HTTPNotFound()\n kwargs = self.request.json\n jsonschema.validate(instance=kwargs, schema=self.schema_add)\n child_view = self.add(**kwargs)\n if is_admin:\n return child_view.admin_tile\n else:\n return child_view.tile", "def _connectView(self):\n self._view.select_asset = self.select_asset\n self._view.add_assets = self.add_assets\n self._view.remove_assets = self.remove_assets\n self._view.update_assets = self.update_assets\n self._view.commit = self.commit" ]
[ "0.7105995", "0.6818911", "0.6739592", "0.65900046", "0.6586987", "0.6408147", "0.6131624", "0.60490364", "0.59855074", "0.5868126", "0.58605367", "0.5818668", "0.57982373", "0.5793522", "0.5711679", "0.5710666", "0.5690995", "0.5679969", "0.5618374", "0.5610827", "0.5562094", "0.5448883", "0.53888565", "0.53632945", "0.53583044", "0.52997434", "0.52991325", "0.5291738", "0.5277768", "0.52540904" ]
0.78226066
0
Add link to menu links collection.
def add_link(self, link): if link.category: self._add_menu_item(link, link.category) else: self._menu_links.append(link)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def add_link(self, link):\n raise NotImplementedError", "def add_links(self, *args):\n for link in args:\n self.add_link(link)", "def links(self, links):\n\n self.container['links'] = links", "def update_links(self, new_link):\r\n self.__links = new_link", "def link(self, link):\n\n self.container['link'] = link", "def add_pressed(self):\n new_link = self.link_field.text\n self.links_list.add_widget(LinkIconListItem(self, text=new_link))\n self.link_field.text = \"\"\n self.add_link_button.disabled = True\n self.link_field.focus = False\n self.link_field.helper_text = \"Please enter a valid url\"\n self.links.append(new_link)\n utils.update_data()\n utils.data[self.parent_screen.name][\"links\"] = self.links\n utils.save_project_data(utils.data[self.parent_screen.name],\n f\"{utils.data[self.parent_screen.name]['proj_path']}/project_data.json\")", "def _add_link_to_targets(self, link):\n for target in self._selected_data():\n target.add_component_link(link)", "def add_link(self, from_doc_id, to_doc_id):\n # Insert the doc_id to doc_id combination as a tuple and append to list\n # of links\n if (from_doc_id,to_doc_id) not in self._visited_links:\n self._links.append((from_doc_id,to_doc_id))", "def add_link():\n return True", "def links(self, links):\n self._links = links", "def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value", "def _link_items(self):\n pass", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def addLink(self, name, alias, **attrs):\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n self.globalConfig.pageAttributes[name]['alias'] = alias", "def add_link(self, start, end, link_type):\n\n key = str(start.id()) + \"_\" + link_type + \"_\" + str(end.id())\n\n # Add link only if it does not exist yet\n if (key in self.__links):\n return\n\n js = \"links.push({source: \" + self.__nodes[start.id()] + \", target: \" + self.__nodes[end.id()] + \"});\"\n\n d3_link_id = self.frame.evaluateJavaScript(js) - 1\n\n self.__links[key] = d3_link_id", "def link(self, link):\n\n self._set_field(\"link\", link)", "def menu_link(\n self,\n menu: 'pygame_menu.Menu',\n link_id: str = ''\n ) -> 'pygame_menu.widgets.MenuLink':\n if isinstance(menu, type(self._menu)):\n # Check for recursive\n if menu == self._menu or menu.in_submenu(self._menu, recursive=True):\n raise ValueError(\n f'Menu \"{menu.get_title()}\" is already on submenu structure,'\n f' recursive menus lead to unexpected behaviours. For '\n f'returning to previous menu use pygame_menu.events.BACK '\n f'event defining an optional back_count number of menus to '\n f'return from, default is 1'\n )\n\n else:\n raise ValueError('menu object is not a pygame_menu.Menu class')\n\n # noinspection PyProtectedMember\n widget = MenuLink(\n menu=menu,\n menu_opener_handler=self._menu._open,\n link_id=link_id\n )\n self.configure_defaults_widget(widget)\n self._append_widget(widget)\n self._add_submenu(menu, widget)\n\n return widget", "def add_admin_links(sender, **kwds):\n\n priv_check = kwds['request'].user.has_privilege\n\n entries = [('list', url_for('admin/news'), _(u'Overview'))]\n\n if priv_check(NEWS_CREATE) or priv_check(NEWS_EDIT):\n entries.append(('edit', url_for('admin/news/new'), _(u'Write')))\n\n kwds['navbar'].insert(1,(('news', url_for('admin/news'), _(u'News'), entries)))", "def add_to_menu ( self, menu_item ):\r\n pass" ]
[ "0.7691359", "0.72404385", "0.70956004", "0.7082974", "0.6781136", "0.67305875", "0.66682863", "0.6609667", "0.6557421", "0.6507363", "0.64803123", "0.6463921", "0.64266014", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63898194", "0.63861287", "0.629133", "0.6290942", "0.62825334", "0.6280614", "0.62632614" ]
0.7950336
0