query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns timings for parts, where the video should be kept
def getSectionsOfNewVideo (silences, duration): return [0.0] + silences + [duration]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def video_time():\r\n # The full time has the form \"0:32 / 3:14\"\r\n full_time = world.css_text('div.vidtime')\r\n\r\n # Split the time at the \" / \", to get [\"0:32\", \"3:14\"]\r\n elapsed_str, duration_str = full_time.split(' / ')\r\n\r\n # Convert each string to seconds\r\n return (parse_time_str(elapsed_str), parse_time_str(duration_str))", "def video_times():\n p = parse_cmdline(get_parser=get_parser_times)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.DEBUG)\n vis.show_video_abi_glm_times(\n start_date=p.start_time,\n end_date=p.end_time,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir,\n sector=p.sector,\n area=p.area)\n print(\"Files written to:\", p.outdir)", "def get_timings(self):\n exp=lib.is_Exposure_d8(self.hcam,7)*1E-3\n frame_rate=lib.is_SetFrameRate(self.hcam,0x8000)\n return self.AcqTimes(exp,1./frame_rate)", "def listTimesinQ(urlPartsQ, verboseLogs):\n files = readUrlDir(urlPartsQ, verboseLogs, '.jpg')\n if files:\n return list(map(lambda x: {'time': int(x[:-4])}, files))\n return None", "def get_talks_gt_one_hour(videos):\r\n return [video for video in videos if iso8601DurationToSeconds(video.duration) > 60 * 60]", "def timings(self):\r\n return self._timings", "def get_timings(log):\n log.info('Doing get_timings')\n timingfile = os.path.join(os.environ['decor'], 'decorcode',\n 'stim_timing_info', 'Timing_layout.txt')\n timingf = open(timingfile, 'r')\n run = []\n clip = []\n trs = []\n for line in timingf:\n i, j, k = line.split()\n run.append(i)\n clip.append(j)\n trs.append(k)\n return (run, clip, trs)", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def get_info(frame_or_sketch_or_vid_path):\n if \".mp4\" not in frame_or_sketch_or_vid_path:\n # invalid file path ()\n # TODO: allow other video extensions\n return None\n\n ret_dict = {}\n ret_dict[\"path\"] = frame_or_sketch_or_vid_path\n ret_dict[\"file_name\"] = utils.get_file_name(frame_or_sketch_or_vid_path)\n ret_dict[\"file_ext\"] = utils.get_file_ext(frame_or_sketch_or_vid_path)\n\n # find video file name = video_id\n file_dir_last = utils.get_nth_parentdir(frame_or_sketch_or_vid_path)\n\n # file_dir_full = utils.get_file_path(frame_or_sketch_or_vid_path)\n # file_name = utils.get_full_file_name(frame_or_sketch_or_vid_path)\n\n video_id = f\"{file_dir_last.split('.mp4_')[0]}.mp4\"\n start_end_time = file_dir_last.split(\".mp4_\")[1]\n start_end_time_parts = start_end_time.split(\"_\")\n\n # OLD\n # tmp = frame_or_sketch_or_vid_path.rsplit(\"video_\")[1].replace(\".mp4\", \"\")\n # tmp_parts = tmp.split(\"/\")[0].split(\"_\") # remove frame part if existent\n # ret_dict[\"video_id\"] = tmp_parts[0]\n # ret_dict[\"start_time\"] = float(tmp_parts[1])\n # ret_dict[\"end_time\"] = ret_dict[\"start_time\"]\n\n ret_dict[\"video_id\"] = video_id\n ret_dict[\"start_time\"] = float(start_end_time_parts[0])\n if len(start_end_time_parts) > 1:\n ret_dict[\"end_time\"] = float(start_end_time_parts[1])\n\n if ret_dict[\"file_ext\"] == \".jpg\":\n ret_dict[\"frame\"] = int(ret_dict[\"file_name\"].split(\"_\")[1])\n elif ret_dict[\"file_ext\"] == \".json\":\n ret_dict[\"frame\"] = get_sketch_frame(ret_dict[\"path\"])\n else:\n ret_dict[\"fps\"] = get_fps(ret_dict[\"path\"])\n ret_dict[\"start_frame\"] = time_to_frame(ret_dict[\"start_time\"], ret_dict[\"fps\"])\n ret_dict[\"end_frame\"] = time_to_frame(ret_dict[\"end_time\"], ret_dict[\"fps\"])\n return ret_dict", "def getTimes():", "def getTimes():", "def getTimes():", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def get_talks_gt_one_hour(videos):\n return [v for v in videos if get_hours(v) >= 1]", "def __get_times(self):\n data = self.simulate_file.readlines()\n data = list(map(str.strip, data))\n data = list(map(float, data))\n start = data[0]\n times = data[1:]\n return (start, times)", "def timings(self):\n if self._C_timings is None:\n raise RuntimeError(\"Cannot extract timings with non-finalized Profiler.\")\n return {field: max(getattr(self._C_timings, field), 10**-6)\n for field, _ in self._C_timings._fields_}", "def get_words_with_end_times(subtitle_file_path):\n\n with open(subtitle_file_path) as subtitle_file:\n\n # Remove first 4 lines (containing meta information)\n for j in range(0, 4):\n subtitle_file.readline()\n\n text = subtitle_file.read()\n\n # Check if the subtitle file supports individual word times\n if text.find(\"<c>\") == -1:\n print(\"Individual word times are not supported for file: \" + subtitle_file_path)\n return None, None\n\n chunks = text.split(\" \\n\\n\") # split into chunks for easier data processing\n\n words = list()\n word_end_times = list()\n\n for chunk in chunks:\n chunk_lines = chunk.split(\"\\n\")\n words_line = chunk_lines[2]\n\n words_in_chunk = []\n word_end_times_in_chunk = []\n\n first_word_end_index = words_line.find(\"<\")\n if first_word_end_index != -1:\n first_word = words_line[\n 0:first_word_end_index] # get the first word (can't be found using method below)\n\n words_in_chunk = re.findall(\"<c> [\\S]*</c>\", words_line) # get all words\n words_in_chunk = [w[4:-4] for w in words_in_chunk] # strip <c> and <c/>\n\n word_end_times_in_chunk = re.findall(\"<\\d\\d:\\d\\d:\\d\\d.\\d\\d\\d>\", words_line) # get all word end times\n word_end_times_in_chunk = [t[1:-1] for t in word_end_times_in_chunk] # strip < and >\n else:\n # Only one word\n first_word = words_line\n\n last_time = chunk_lines[4][17:29] # end time for the last word\n\n words_in_chunk.insert(0, first_word)\n word_end_times_in_chunk.append(last_time)\n\n words.extend(words_in_chunk)\n word_end_times.extend(word_end_times_in_chunk)\n\n # For the last chunk we have to get the word end time from somewhere else\n first_line_in_last_chunk = chunks[-1].split(\"\\n\")[0]\n last_time = first_line_in_last_chunk[17:29]\n word_end_times.pop()\n word_end_times.append(last_time)\n\n if len(words) != len(word_end_times):\n print(\"Warning: word count does not match times count\")\n\n return words, word_end_times", "def get_full_secs(self):\n return _uhd_swig.time_spec_t_get_full_secs(self)", "def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')", "def preparation_time_in_minutes(number_of_layers):\n return number_of_layers * 2", "def get_video_parts(video_path):\n parts = video_path.split(os.path.sep)\n print(\"parts: \", parts)\n filename = parts[7]\n filename_no_ext = filename.split('.')[0]\n classname = parts[6]\n train_or_test = parts[5]\n\n return train_or_test, classname, filename_no_ext, filename", "def get_times():\n try:\n with open(\"/var/lib/cloud/instance/obj.pkl\", \"r\") as file_:\n data = pickle.load(file_)\n except IOError:\n return\n\n meta = data.metadata.get(\"meta\")\n if meta is None:\n raise EnvironmentError(\"Wrong virtualization environment.\")\n\n keys = [x for x in meta.keys() if re.search(\".*Wall.*Time\", x, re.IGNORECASE)]\n if len(keys) != 1:\n if len(keys) == 0:\n raise ValueError(\"No meta-data entry with key 'WallTime'\")\n else:\n raise ValueError(\"Ambiguous meta-data found: %s\" % keys)\n\n walltime = int(meta.get(keys[0]))\n starttime = int(os.stat(\"/var/lib/cloud/instance/obj.pkl\").st_ctime)\n return walltime, starttime", "def get_time_range(vid_folder_string):\n parts = vid_folder_string.split(\"_\")\n tc_start = -1.0\n tc_end = -1.0\n if len(parts) == 3:\n # segment is single frame\n tc_start = parts[2]\n tc_end = parts[2]\n pass\n elif len(parts) == 4:\n # segment is multiframe\n tc_start = parts[2]\n tc_end = parts[3]\n else:\n print(\"Invalid Segment: \" + vid_folder_string)\n return float(tc_start), float(tc_end)", "def get_mov_timestamps(filename):\n\n atom_header_size = 8\n # difference between Unix epoch and QuickTime epoch, in seconds\n epoch_adjuster = 2082844800\n\n creation_time = modification_time = None\n\n # search for moov item\n with open(filename, \"rb\") as f:\n while True:\n atom_header = f.read(atom_header_size)\n # ~ print('atom header:', atom_header) # debug purposes\n if atom_header[4:8] == b'moov':\n break # found\n else:\n atom_size = struct.unpack('>I', atom_header[0:4])[0]\n f.seek(atom_size - 8, 1)\n\n # found 'moov', look for 'mvhd' and timestamps\n atom_header = f.read(atom_header_size)\n if atom_header[4:8] == b'cmov':\n raise RuntimeError('moov atom is compressed')\n elif atom_header[4:8] != b'mvhd':\n raise RuntimeError('expected to find \"mvhd\" header.')\n else:\n f.seek(4, 1)\n creation_time = struct.unpack('>I', f.read(4))[0] - epoch_adjuster\n creation_time = datetime.fromtimestamp(creation_time)\n if creation_time.year < 1990: # invalid or censored data\n creation_time = None\n\n modification_time = struct.unpack('>I', f.read(4))[0] - epoch_adjuster\n modification_time = datetime.fromtimestamp(modification_time)\n if modification_time.year < 1990: # invalid or censored data\n modification_time = None\n\n return creation_time, modification_time", "def get_talks_lt_twentyfour_min(videos):\r\n return [video for video in videos if iso8601DurationToSeconds(video.duration) < 60 * 24]", "def test_find_parallel_duration():\n pt2_example = {\n \"C\": [],\n \"A\": [\"C\"],\n \"F\": [\"C\"],\n \"B\": [\"A\"],\n \"D\": [\"A\"],\n \"E\": [\"B\", \"D\", \"F\"],\n }\n assert find_parallel_duration(pt2_example, 2, 0) == 15", "def GetCoordinatedVideoTiming(edid, start_index):\n if not(edid[start_index] == edid[start_index + 1] == edid[start_index + 2]\n == 0x00):\n return CoordinatedVideoTiming(edid, start_index)\n else:\n return None", "def trim_by_points(file: str,\r\n start_time: int,\r\n end_time: int,\r\n factor: str = 's') -> str:\r\n idx = 1\r\n start_time = int(start_time)\r\n end_time = int(end_time)\r\n\r\n _factor = 1 if factor == 's' else 60\r\n total_limit = int(duration(file) / _factor)\r\n\r\n if factor == 'p':\r\n start_time = int((start_time / 100) * total_limit)\r\n end_time = int((end_time / 100) * total_limit)\r\n total_limit = 100\r\n\r\n if end_time < start_time:\r\n raise Exception('Ending time is less than starting time.')\r\n else:\r\n if end_time >= total_limit:\r\n if factor == 'p':\r\n print('Video doesn\\'t have frame to process.')\r\n else:\r\n print('Video doesn\\'t have frames to process and will max out.')\r\n end_time = total_limit\r\n elif start_time < 0:\r\n print('Start should be greater than 0.')\r\n start_time = 0\r\n trim_video(file, filename(file, idx), start_time * _factor,\r\n end_time * _factor)\r\n return filename(file, idx)", "def get_timestamps(self):\n\n start_timestamp = (self._current_playback[\"timestamp\"] - self._current_playback[\"progress_ms\"]) / 1000\n end_timestamp = (self._current_playback[\"timestamp\"] + self._current_playback[\"item\"][\"duration_ms\"]) / 1000\n\n return start_timestamp, end_timestamp", "def get_spec_times(self, fit):\n\n start = asarray(self.raw_results[fit][\"start\"])\n stop = asarray(self.raw_results[fit][\"stop\"])\n return start, stop" ]
[ "0.6023563", "0.59063125", "0.56466246", "0.5624058", "0.56141603", "0.560595", "0.5565088", "0.5434631", "0.5414467", "0.538054", "0.538054", "0.538054", "0.5377151", "0.5325003", "0.5311827", "0.5305418", "0.5302824", "0.5241184", "0.5222898", "0.5206784", "0.5206103", "0.5200956", "0.519829", "0.51944464", "0.51872534", "0.5187232", "0.5185009", "0.5168299", "0.51353985", "0.5132375" ]
0.5964529
1
Remove implicit resolvers for a particular tag Takes care not to modify resolvers in super classes. We want to load datetimes as strings, not dates, because we go on to serialise as json which doesn't have the advanced types of yaml, and leads to incompatibilities down the track.
def remove_implicit_resolver(cls, tag_to_remove): if 'yaml_implicit_resolvers' not in cls.__dict__: cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() for first_letter, mappings in cls.yaml_implicit_resolvers.items(): cls.yaml_implicit_resolvers[first_letter] = [ (tag, regexp) for tag, regexp in mappings if tag != tag_to_remove ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_implicit_resolver(cls, tag_to_remove):\n if 'yaml_implicit_resolvers' not in cls.__dict__:\n cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()\n\n for first_letter, mappings in cls.yaml_implicit_resolvers.items():\n cls.yaml_implicit_resolvers[first_letter] = [(tag, regexp)\n for tag, regexp in mappings\n if tag != tag_to_remove]", "def deregister_specialization(self, t):\n t = self.canon(t)\n self.cython_ctypes.pop(t, None)\n self.cython_cytypes.pop(t, None)\n self.cython_pytypes.pop(t, None)\n self.cython_cimports.pop(t, None)\n self.cython_cyimports.pop(t, None)\n self.cython_pyimports.pop(t, None)\n self.clearmemo()", "def deconstruct(self):\n return super(\n AutoDateTimeField, self).deconstruct()", "def untag_resources_with_options(\n self,\n request: dds_20151201_models.UntagResourcesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UntagResourcesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.all):\n query['All'] = request.all\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_id):\n query['ResourceId'] = request.resource_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.resource_type):\n query['ResourceType'] = request.resource_type\n if not UtilClient.is_unset(request.tag_key):\n query['TagKey'] = request.tag_key\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UntagResources',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UntagResourcesResponse(),\n self.call_api(params, req, runtime)\n )", "def unparse(dt_or_rel):\n if isinstance(dt_or_rel, SMPPRelativeTime):\n return unparse_relative_time(dt_or_rel)\n return unparse_absolute_time(dt_or_rel)", "def removeEmbedded(self, tag):\n self.embeddedTags = self.embeddedTags[:-1]", "async def untag_resources_with_options_async(\n self,\n request: dds_20151201_models.UntagResourcesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UntagResourcesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.all):\n query['All'] = request.all\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_id):\n query['ResourceId'] = request.resource_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.resource_type):\n query['ResourceType'] = request.resource_type\n if not UtilClient.is_unset(request.tag_key):\n query['TagKey'] = request.tag_key\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UntagResources',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UntagResourcesResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)", "def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []", "def deregister(cls, plugin: Union[str, Sequence[str]], *, silent: bool = False) -> None:\n\n reset_convert_cache = False\n\n if isinstance(plugin, str):\n plugin = [plugin]\n\n mapping = None # type: Optional[Dict[str, Any]]\n for p in plugin:\n if p == '*':\n cls.CS_MAP.clear()\n cls.DE_MAP.clear()\n cls.CAT_MAP.clear()\n cls.FILTER_MAP.clear()\n cls.CONTRAST_MAP.clear()\n cls.INTERPOLATE_MAP.clear()\n cls.CCT_MAP.clear()\n cls.FIT_MAP.clear()\n return\n\n ptype, name = p.split(':', 1)\n if ptype == 'space':\n mapping = cls.CS_MAP\n reset_convert_cache = True\n elif ptype == \"delta-e\":\n mapping = cls.DE_MAP\n elif ptype == 'cat':\n mapping = cls.CAT_MAP\n elif ptype == 'filter':\n mapping = cls.FILTER_MAP\n elif ptype == 'contrast':\n mapping = cls.CONTRAST_MAP\n elif ptype == 'interpolate':\n mapping = cls.INTERPOLATE_MAP\n elif ptype == 'cct':\n mapping = cls.CCT_MAP\n elif ptype == \"fit\":\n mapping = cls.FIT_MAP\n if name == 'clip':\n if reset_convert_cache: # pragma: no cover\n cls._get_convert_chain.cache_clear()\n if not silent:\n raise ValueError(\n \"'{}' is a reserved name gamut mapping/reduction and cannot be removed\".format(name)\n )\n continue # pragma: no cover\n else:\n if reset_convert_cache: # pragma: no cover\n cls._get_convert_chain.cache_clear()\n raise ValueError(\"The plugin category of '{}' is not recognized\".format(ptype))\n\n if name == '*':\n mapping.clear()\n elif name in mapping:\n del mapping[name]\n elif not silent:\n if reset_convert_cache:\n cls._get_convert_chain.cache_clear()\n raise ValueError(\"A plugin of name '{}' under category '{}' could not be found\".format(name, ptype))\n\n if reset_convert_cache:\n cls._get_convert_chain.cache_clear()", "def test_untag_none(self):\n untag = document_fields.DocumentFields.untag\n fields_to_test = {\n 'foo': 'base',\n '[email protected]': None,\n }\n fields = copy.deepcopy(fields_to_test)\n self.assertDictEqual({\n 'foo': 'base',\n }, untag(fields, locale=None, params={'env': None}))\n self.assertDictEqual({\n 'foo': None,\n }, untag(fields, locale=None, params={'env': 'prod'}))\n\n fields_to_test = {\n 'nested': {\n 'foo': 'nested-base',\n },\n 'nested@de': {\n 'foo': 'nested-de-base',\n '[email protected]': None,\n }\n }\n fields = copy.deepcopy(fields_to_test)\n self.assertDictEqual({\n 'nested': {\n 'foo': 'nested-base',\n },\n }, untag(fields, locale=None, params={'env': None}))\n self.assertDictEqual({\n 'nested': {\n 'foo': 'nested-base',\n },\n }, untag(fields, locale=None, params={'env': 'dev'}))\n self.assertDictEqual({\n 'nested': {\n 'foo': None,\n },\n }, untag(fields, locale='de', params={'env': 'prod'}))", "def test_untag_with_no_base(self):\n fields_to_test = {\n 'foo@de': 'bar-de',\n 'baz@de': {\n 'fum@de': 'boo-de'\n },\n }\n fields = copy.deepcopy(fields_to_test)\n self.assertDictEqual({}, document_fields.DocumentFields.untag(fields))\n self.assertDictEqual({\n 'foo': 'bar-de',\n 'baz': {\n 'fum': 'boo-de',\n },\n }, document_fields.DocumentFields.untag(fields, locale='de'))", "def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)", "def deregister_refinement(self, name):\n self.refined_types.pop(name, None)\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n self.clearmemo()", "def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)", "def disable_importlib_metadata_finder(metadata):\n try:\n import importlib_metadata\n except ImportError:\n return\n if importlib_metadata is metadata:\n return\n to_remove = [\n ob\n for ob in sys.meta_path\n if isinstance(ob, importlib_metadata.MetadataPathFinder)\n ]\n for item in to_remove:\n sys.meta_path.remove(item)", "def resolve():\n while _TO_RESOLVE:\n obj = _TO_RESOLVE.pop()\n annotations(obj)", "def untag_resource(Resource=None, TagKeys=None):\n pass", "def object_deserializer(obj):\n for key, val in obj.items():\n if isinstance(val, six.string_types) and DATETIME_REGEX.search(val):\n try:\n obj[key] = dates.localize_datetime(parser.parse(val))\n except ValueError:\n obj[key] = val\n return obj", "def deconstruct(self):\n name, path, args, kwargs = super(DateTimeListField, self).deconstruct()\n kwargs['objects'] = self.objects\n return name, path, args, kwargs", "def removeResolver(self, *args):\n return _libsbml.SBMLResolverRegistry_removeResolver(self, *args)", "def unregister ():\n dsf_prop_export.unregister ()\n dsf_geom_export.unregister ()\n dsf_wm_import.unregister ()\n dsf_pose_import.unregister ()\n dsf_arm_import.unregister ()\n dsf_uvset_import.unregister ()\n dsf_morph_export.unregister ()\n dsf_morph_import.unregister ()\n dsf_geom_import.unregister ()", "def untag():\n form = TagSubscriptionForm(hidden_mode=True)\n if not form.validate_on_submit():\n abort(403)\n\n subscription = current_user.subscriptions.filter_by(\n channel_id=form.channel_id.data\n ).first_or_404()\n tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()\n\n results = subscription.untag(tag.id)\n response = {\"success\": results}\n return jsonify(response)", "def deserialise(obj):\n if isinstance(obj, str) and 12 < len(obj) < 40:\n try:\n # some tests try tricking us with timezones - but we assume naive datetime objects in utc\n # 1970-01-21T21:14:37+12:45 -> 1970-01-21 08:29:37 (1970-01-21T08:29:37)\n x = obj\n obj = du_parser.parse(obj).astimezone(tz=du_tz.tzutc()).replace(tzinfo=None)\n LOG.info('datetime rehydrated: %s -> %s (%s)' % (x, obj, obj.isoformat()))\n except Exception as e:\n LOG.debug('not a date: %s (%s)' % (obj, e))\n return obj", "def resolve_tags(tags=None):\n\n all_tags = {}\n for provider in _run_context_provider_registry:\n if provider.in_context():\n # TODO: Error out gracefully if provider's tags are not valid or have wrong types.\n all_tags.update(provider.tags())\n\n if tags is not None:\n all_tags.update(tags)\n\n return all_tags", "def resolver():\n if RESOLVER:\n return RESOLVER\n path = str(pathlib.Path(__file__).parents[1].joinpath(\"schema\", \"app.json\"))\n with open(path) as stream:\n schema = json.load(stream)\n globals()[\"RESOLVER\"] = RefResolver(\n \"https://schema.timeflux.io/app.json\", None\n ).from_schema(schema)\n return RESOLVER", "def add_deserializer(config: Configurator, name: str, func: t.Callable) -> None:\n config.registry.settings.setdefault(\"pyramid_openapi3_deserializers\", {})\n reg = config.registry.settings[\"pyramid_openapi3_deserializers\"]\n reg[name] = func", "async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))", "def clear_tags(self) -> dict:\n\n return {t: self.tags[t] for t in (self.tags or {}) if t.startswith('~')} or None", "def RemoveTags(obj):\n tags = obj.GetTags() # Get tags\n for t in tags: # Iterate through tags\n t.Remove() # Remove tag" ]
[ "0.72351325", "0.4945799", "0.48895606", "0.4787134", "0.47753465", "0.4747511", "0.47394142", "0.47089127", "0.46628618", "0.46158558", "0.46017975", "0.45002973", "0.44972196", "0.4452387", "0.44437444", "0.44306776", "0.43933737", "0.43895388", "0.43718284", "0.43617448", "0.43536296", "0.43428952", "0.43308246", "0.43164062", "0.43063393", "0.42960256", "0.4295386", "0.42840233", "0.42830437", "0.4269642" ]
0.72606164
0
Try to get consent status for a single email address
def test_get_one(self, requests_mock, accepts_marketing): matcher = requests_mock.post( f'{settings.CONSENT_SERVICE_BASE_URL}' f'{consent.CONSENT_SERVICE_PERSON_PATH_LOOKUP}', json={ 'results': [ { 'email': '[email protected]', 'consents': [ CONSENT_SERVICE_EMAIL_CONSENT_TYPE, ] if accepts_marketing else [], }, ], }, status_code=status.HTTP_200_OK, ) resp = consent.get_one('[email protected]') assert resp == accepts_marketing assert matcher.called_once assert matcher.last_request.query == 'limit=1' assert matcher.last_request.json() == {'emails': ['[email protected]']}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Get_applicant_status(self, email):\n status = None\n if email in self.Attendees:\n status = ApplicantStatus.Accepted\n elif email in self.Waitlist:\n status = ApplicantStatus.Waitlisted\n else:\n raise MissingAddressException(email)\n return status", "def get(self, request):\n user = self.request.user\n return Response({\"status\": user.confirmed_email}, status=status.HTTP_200_OK)", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def check_email(self, emailid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'emailid': emailid}\n url = SECURE_API_URL + \"raas/v1/user/checkemail\"\n return self._lr_object._get_json(url, payload)", "def request_verification(data):\n if 'email' in data:\n if user_exists(data['email']):\n return get_user_id(data['email'])\n else:\n return 401\n else:\n return 400", "def test_get_consent(app, client, session, models, tokens):\n response = client.get(\n \"/consent\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200", "def search_email(self, accountId, email_address):\n p = {\"email\": email_address}\n if accountId:\n p['accountId'] = accountId\n return self.get_json('/verification/search', params=p)", "def test_confirm_cannot_contact_consent_when_getting_oauth2_token(self):\n # First perform an add request that creates the flow request with status 'PENDING'\n res = self._add_flow_request()\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n # Then confirm the request. This will cause a redirect to consent manager\n self.client.login(username='duck', password='duck')\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id, ERRORS_MESSAGE['INTERNAL_GATEWAY_ERROR']),\n fetch_redirect_response=False)", "async def fetch_account_status(account_id):\n res_object = requests.get(_ACCOUNTS_URL.format(account_id=account_id))\n return res_object.json() if res_object.status_code == 200 else {}", "def getMyStatus(self):\n my_rpath = self.getCalendar().getRpath()\n for attendee in self.attendees:\n if attendee['rpath'] == my_rpath:\n return attendee['status']", "def claim_email(request):\n email = request.POST.get('email', '')\n email_user = User.objects.filter(email=email)\n payload = {\n 'res': 'failed'\n }\n if email_user.exists() and \\\n not email_user[0].profile.send_mail:\n request.user.profile.add_email(email)\n payload['res'] = 'success'\n\n return payload", "def cmd_account_verification_status(client, args):\n email_verification_status = client.get_email_verification_status(args.username)\n generate_output({'email_verification_status': email_verification_status})", "def get_authorization_url(email_address, state):\n flow = flow_from_clientsecrets(CLIENTSECRET_LOCATION, ' '.join(SCOPES))\n flow.params['access_type'] = 'offline'\n flow.params['approval_prompt'] = 'force'\n flow.params['user_id'] = email_address\n flow.params['state'] = state\n flow.params['origin'] = ORIGIN\n return flow.step1_get_authorize_url(REDIRECT_URI)", "def request_verification_bypass(request, env, email):\n if request.method == 'POST':\n oauth_client = OAUTHCLIENT(env)\n token = oauth_client.get_token()\n content = {'message': email + \" has been requested for By-pass to \" + env}\n\n if 'access_token' in token:\n if env == 'qa32':\n host = 'http://qajb101.p2pcredit.local/users/email/'\n elif env == 'stg':\n host = 'http://stage-api-proxy-A.vip.c1.stg/users/email/'\n elif env == 'qa20':\n host = 'http://np97.c1.dev/users/email/'\n\n # create header with access token\n headers = {'Authorization': token['token_type'] + ' ' + token['access_token']}\n\n # request email verification by-pass with access-token\n response = requests.get(\n host + email,\n headers=headers\n )\n\n response_json = response.json()\n\n # build response message\n if response_json['email_exists']:\n if response_json['activation_key'] == \"\":\n content['result'] = \"VERIFIED\"\n content['message'] = email + \" is auto-verified on \" + env\n else:\n content['result'] = \"NOT VERIFIED\"\n content['message'] = email + \" is not verified yet on \" + env + \\\n \". Please verify your email by clicking 'Verify Email' link.\"\n else:\n content['result'] = \"USER NOT FOUND\"\n content['message'] = email + \" is not found on \" + env\n\n response_status = status.HTTP_200_OK\n content['response'] = response_json\n else:\n content['result'] = str(token)\n response_status = status.HTTP_500_INTERNAL_SERVER_ERROR\n content['response'] = 'No token generated'\n\n return Response(content, status=response_status)", "def get_credentialing_status(self):\n if self.is_credentialed:\n return 'Credentialed'\n else:\n application = self.credential_applications.last()\n if not application:\n return 'No application found'\n\n if application.status == CredentialApplication.Status.PENDING:\n return application.get_review_status()\n\n return 'No application found'", "def get_user_info(user_email: str) -> Tuple[bool, dict]:\n url = f'https://jsonplaceholder.typicode.com/users?email={user_email}'\n\n response = get(url)\n\n if response.status_code == 200 and response.json():\n return True, response.json()[0]\n\n return False, {}", "def verifysubscriptionstatusinaccounttab():\n pass", "def _handle_consent_confirmation(user, is_confirmed):\n if is_confirmed == \"yes\":\n # user has already given consent, continue flow\n response = server.create_authorization_response(grant_user=user)\n else:\n # user did not give consent\n response = server.create_authorization_response(grant_user=None)\n return response", "def check_db(self, email, clean_type=1):\r\n try:\r\n sql = \"\"\"\r\n SELECT * FROM emails WHERE email = '{}' \r\n AND clean_type = {}\r\n AND email_status IN('clean','catch-all')\r\n LIMIT 1\r\n \"\"\".format(email, clean_type)\r\n self.db.cur.execute(sql)\r\n resp = self.db.cur.fetchone()\r\n if resp:\r\n return {EMAIL:resp[EMAIL]}\r\n except:\r\n print(\"sql error: {}\".format(sql))\r\n return None", "def full_contact_email(self,email):\n if self.contact_api_key is None:\n click.secho(\"[!] No Full Contact API key, so skipping company lookup.\",fg=\"red\")\n return None\n else:\n headers = {\"Authorization\": \"Bearer %s\" % self.contact_api_key}\n payload = {\"email\": email}\n try:\n resp = requests.post(self.person_api_uri,data=json.dumps(payload),headers=headers,timeout=self.requests_timeout)\n if resp.status_code == 200:\n return resp.json()\n elif resp.status_code == 401:\n click.secho(\"[!] Full Contact says the provided API key is no good. Make sure you are using a valid key for API v3.\",fg=\"red\")\n return None\n except requests.exceptions.Timeout:\n click.secho(\"\\n[!] The connection to Full Contact timed out!\",fg=\"red\")\n except requests.exceptions.TooManyRedirects:\n click.secho(\"\\n[!] The connection to Full Contact encountered too many redirects!\",fg=\"red\")\n except requests.exceptions.RequestException as error:\n click.secho(\"\\n[!] The connection to Full Contact encountered an error!\",fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n return None", "def test_confirm_add_flow_request_invalid_consent(self):\n self.client.login(username='duck', password='duck')\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id=aaaaa')\n self.assertEqual(res.status_code, 400)\n self.assertEqual(res.content.decode('utf-8'), ERRORS_MESSAGE['INVALID_DATA'])", "def check_account_status(request):\n\n user = request.user\n\n if not user.is_authenticated():\n return {\n 'current_user': user,\n 'check_account_status_url': reverse('check_account_status'),\n }\n\n session = request.session\n\n flag = session.get('show_email_confirmation_dialog', True)\n show = not user.has_activated_account and flag\n session['show_email_confirmation_dialog'] = False\n\n # We don't want so show email confirmation when use is trying to buy a ticket.\n if 'payment-details' in request.path:\n show = False\n\n return {\n 'current_user': user,\n 'show_email_confirmation_dialog': False,\n 'check_account_status_url': reverse('check_account_status'),\n }", "async def view_email_address(self, ctx):\n author = ctx.message.author\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n await ctx.send(\n \"currently configured email address:{}\".format(self.email_list[str(author.id)]))\n else:\n await ctx.send(\"There is no email address configured..!\")\n return", "def test_get_authorization_status_vendor_v3(self):\n pass", "def getaccount(self, vergeaddress):\n return self.proxy.getaccount(vergeaddress)", "def test_confirm_add_flow_request_wrong_consent_status(self):\n self.client.login(username='duck', password='duck')\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id={}'.format(WRONG_CONFIRM_ID))\n self.assertEqual(res.status_code, 302)\n flow_request = FlowRequest.objects.get(flow_id='f_11111')\n self.assertEqual(flow_request.status, FlowRequest.PENDING)\n for channel in Channel.objects.filter(flow_request=flow_request):\n channel.status = Channel.CONSENT_REQUESTED", "def friendship_status_with(self):\n email_query = request.args.get('other')\n if not email_query:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"other\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"other\", 400\n email_token = auth.current_user()[0]\n response = \"no_contact\"\n if self.friend_database.are_friends(email_token, email_query):\n response = \"friends\"\n elif self.friend_database.exists_friend_request(email_query, email_token):\n response = \"received\"\n elif self.friend_database.exists_friend_request(email_token, email_query):\n response = \"sent\"\n return json.dumps({\"status\": response}), 200", "def check_participants_avaliability(emails, startTime, endTime, curr_schedule_id = None):\n\n unavailable, available_userIDs = [], []\n for email in emails:\n available, userID = Participants.is_available(email, startTime, endTime, curr_schedule_id)\n if available:\n available_userIDs.append(userID)\n else:\n unavailable.append(email)\n print(\"Unavailable: \", unavailable)\n return unavailable, available_userIDs", "def get_basic_verification(self):\n\n ach_model = self.ach_model\n\n return {\n \"verification_status\": \"VERIFICATION_PENDING\",\n \"account_type\": ach_model[\"account_type\"],\n \"name_on_account\": ach_model[\"name_on_account\"]\n }" ]
[ "0.66149795", "0.58070236", "0.57529634", "0.57529634", "0.5640133", "0.5606867", "0.55703324", "0.5566895", "0.55520564", "0.5475473", "0.54483193", "0.54387474", "0.541679", "0.53587914", "0.5347143", "0.53188384", "0.5258927", "0.525825", "0.52227", "0.5204715", "0.5188246", "0.5188163", "0.5165948", "0.5151443", "0.5140211", "0.51351434", "0.5110765", "0.51051176", "0.50998056", "0.50938857" ]
0.60032743
1
Try to get consent status for a list of email addresses
def test_get_many(self, requests_mock, accepts_marketing, emails): matcher = requests_mock.post( f'{settings.CONSENT_SERVICE_BASE_URL}' f'{consent.CONSENT_SERVICE_PERSON_PATH_LOOKUP}', json={ 'results': [ { 'email': email, 'consents': [ CONSENT_SERVICE_EMAIL_CONSENT_TYPE, ] if accepts_marketing else [], } for email in emails ], }, status_code=status.HTTP_200_OK, ) resp = consent.get_many(emails) assert resp == {email: accepts_marketing for email in emails} assert matcher.called_once assert matcher.last_request.query == f'limit={len(emails)}' assert matcher.last_request.json() == {'emails': emails}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Get_applicant_status(self, email):\n status = None\n if email in self.Attendees:\n status = ApplicantStatus.Accepted\n elif email in self.Waitlist:\n status = ApplicantStatus.Waitlisted\n else:\n raise MissingAddressException(email)\n return status", "def check_participants_avaliability(emails, startTime, endTime, curr_schedule_id = None):\n\n unavailable, available_userIDs = [], []\n for email in emails:\n available, userID = Participants.is_available(email, startTime, endTime, curr_schedule_id)\n if available:\n available_userIDs.append(userID)\n else:\n unavailable.append(email)\n print(\"Unavailable: \", unavailable)\n return unavailable, available_userIDs", "def test_get_one(self, requests_mock, accepts_marketing):\n matcher = requests_mock.post(\n f'{settings.CONSENT_SERVICE_BASE_URL}'\n f'{consent.CONSENT_SERVICE_PERSON_PATH_LOOKUP}',\n json={\n 'results': [\n {\n 'email': '[email protected]',\n 'consents': [\n CONSENT_SERVICE_EMAIL_CONSENT_TYPE,\n ] if accepts_marketing else [],\n },\n ],\n },\n status_code=status.HTTP_200_OK,\n )\n resp = consent.get_one('[email protected]')\n assert resp == accepts_marketing\n\n assert matcher.called_once\n assert matcher.last_request.query == 'limit=1'\n assert matcher.last_request.json() == {'emails': ['[email protected]']}", "def test_get_consent(app, client, session, models, tokens):\n response = client.get(\n \"/consent\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def test_confirm_cannot_contact_consent_when_getting_oauth2_token(self):\n # First perform an add request that creates the flow request with status 'PENDING'\n res = self._add_flow_request()\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n # Then confirm the request. This will cause a redirect to consent manager\n self.client.login(username='duck', password='duck')\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id, ERRORS_MESSAGE['INTERNAL_GATEWAY_ERROR']),\n fetch_redirect_response=False)", "def on_call_email_addresses(self):\n if self._on_call_email_addresses is not None:\n return self._on_call_email_addresses\n\n url = 'https://{}.pagerduty.com/api/v1/users/on_call'.format(self.pager_duty_domain_prefix)\n on_call = self._make_request(url, headers={'Authorization': 'Token token=' + self.pager_duty_token})\n users = set() # users can be in multiple schedule, this will de-dupe\n\n for user in on_call['users']:\n for schedule in user['on_call']:\n if schedule['level'] <= self.escalation_level:\n users.add(user['email'])\n\n log.info('Found %d users on-call', len(users))\n self._on_call_email_addresses = users\n return users", "def cmd_account_verification_status(client, args):\n email_verification_status = client.get_email_verification_status(args.username)\n generate_output({'email_verification_status': email_verification_status})", "async def view_email_address(self, ctx):\n author = ctx.message.author\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n await ctx.send(\n \"currently configured email address:{}\".format(self.email_list[str(author.id)]))\n else:\n await ctx.send(\"There is no email address configured..!\")\n return", "def test_retrieve_iso20022_account_statement_ids(self):\n pass", "def search_email(self, accountId, email_address):\n p = {\"email\": email_address}\n if accountId:\n p['accountId'] = accountId\n return self.get_json('/verification/search', params=p)", "def get_emails_stat_by_campaigns(self, emails):\n logger.info(\"Function call: get_emails_stat_by_campaigns\")\n if not emails:\n self.__handle_error(\"Empty emails\")\n try:\n emails = json.dumps(emails)\n except:\n logger.debug(\"Emails: {}\".format(emails))\n return self.__handle_error(\"Emails list can't be converted by JSON library\")\n return self.__handle_result(self.__send_request('emails/campaigns', 'POST', {'emails': emails}))", "def get_mailing_list():\n\t\tresult = {}\n\t\tconnection = DbHelper.connect()\n\n\t\twith connection.cursor() as cursor:\n\t\t\tsql = \"SELECT email FROM mail_list \\\n\t\t\t\t WHERE is_activated=1;\"\n\t\t\tcursor.execute(sql)\n\t\t\tresult = cursor.fetchall()\n\n\t\treturn [email_data['email'] for email_data in result]", "def list_active_emails():\n db_customers = Customers.select().where(Customers.status)\n LOGGER.debug(\"Returning list of active customer emails\")\n email_list = [x.email_address for x in db_customers]\n LOGGER.info(\"Email list: %s\", email_list)\n return email_list", "def test_get_consent_returns_unique_consents(\n app, client, session, models, tokens\n):\n response = client.get(\n \"/consent\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n user_id = jwt.decode(\n tokens[\"read\"], app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )[\"usr\"]\n # Get expected consents - Consents with most recent timestamp value\n # for each group of consents that have identical combination of user_id,\n # type, and category\n # NOTE: Intentionally ordering consents programmatically to test expected\n # functioning of the SQL query.\n\n def keyfunc(consent):\n return f\"{consent.type}-{consent.category}\"\n\n consents = Consent.query.filter(Consent.user_id == user_id).all()\n latest_consents = {\n key: max(group, key=lambda c: c.timestamp)\n for key, group in groupby(sorted(consents, key=keyfunc), key=keyfunc)\n }\n assert len(response.json) == len(latest_consents)\n # Test for correctness and unique combination of type and category for each\n # of the user's consents\n unique_consents = {}\n for consent in response.json:\n consent_key = f\"{consent['type']}-{consent['category']}\"\n assert consent_key not in unique_consents\n unique_consents[consent_key] = consent", "def get_referral_emails(self):\n profiles = self.profiles.filter(should_get_notifications=True)\n return [profile.user.email for profile in profiles]", "def get(self, request):\n user = self.request.user\n return Response({\"status\": user.confirmed_email}, status=status.HTTP_200_OK)", "def get_matching_emails(all_the_email,addrlist):\n l_addrlist = map(unicode.lower,addrlist)\n return [ e for e in all_the_email if e.l_address in l_addrlist ]", "def test_confirm_add_flow_request_wrong_consent_status(self):\n self.client.login(username='duck', password='duck')\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id={}'.format(WRONG_CONFIRM_ID))\n self.assertEqual(res.status_code, 302)\n flow_request = FlowRequest.objects.get(flow_id='f_11111')\n self.assertEqual(flow_request.status, FlowRequest.PENDING)\n for channel in Channel.objects.filter(flow_request=flow_request):\n channel.status = Channel.CONSENT_REQUESTED", "def do_get_invites_controlled_by_user(user_profile: UserProfile) -> List[Dict[str, Any]]:\n if user_profile.is_realm_admin:\n prereg_users = filter_to_valid_prereg_users(\n PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)\n )\n else:\n prereg_users = filter_to_valid_prereg_users(\n PreregistrationUser.objects.filter(referred_by=user_profile)\n )\n\n invites = []\n\n for invitee in prereg_users:\n assert invitee.referred_by is not None\n invites.append(\n dict(\n email=invitee.email,\n invited_by_user_id=invitee.referred_by.id,\n invited=datetime_to_timestamp(invitee.invited_at),\n expiry_date=get_invitation_expiry_date(invitee.confirmation.get()),\n id=invitee.id,\n invited_as=invitee.invited_as,\n is_multiuse=False,\n )\n )\n\n if not user_profile.is_realm_admin:\n # We do not return multiuse invites to non-admin users.\n return invites\n\n multiuse_confirmation_objs = Confirmation.objects.filter(\n realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE\n ).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))\n for confirmation_obj in multiuse_confirmation_objs:\n invite = confirmation_obj.content_object\n assert invite is not None\n\n # This should be impossible, because revoking a multiuse invite\n # deletes the Confirmation object, so it couldn't have been fetched above.\n assert invite.status != confirmation_settings.STATUS_REVOKED\n invites.append(\n dict(\n invited_by_user_id=invite.referred_by.id,\n invited=datetime_to_timestamp(confirmation_obj.date_sent),\n expiry_date=get_invitation_expiry_date(confirmation_obj),\n id=invite.id,\n link_url=confirmation_url(\n confirmation_obj.confirmation_key,\n user_profile.realm,\n Confirmation.MULTIUSE_INVITE,\n ),\n invited_as=invite.invited_as,\n is_multiuse=True,\n )\n )\n return invites", "def test_client_verification_list(self):\n pass", "def get_accounts_for_emails(cls, emails):\n return cls.get_by_key_name(['<%s>' % email for email in emails])", "def test_confirm_add_flow_request_invalid_consent(self):\n self.client.login(username='duck', password='duck')\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id=aaaaa')\n self.assertEqual(res.status_code, 400)\n self.assertEqual(res.content.decode('utf-8'), ERRORS_MESSAGE['INVALID_DATA'])", "def check_user_email(self, email):\n useremails = []\n for user in self.__users:\n if user['email'] == email:\n useremails.append(user)\n return useremails", "async def getstatuses(self, ctx):\n final_list = \"\"\n statuses = await ex.get_bot_statuses()\n if statuses is not None:\n for status in await ex.get_bot_statuses():\n final_list += f\"{status[0]}\\n\"\n else:\n final_list = \"None\"\n embed = discord.Embed(title=\"Statuses\", description=final_list)\n await ctx.send(embed=embed)", "def verifysubscriptionstatusinaccounttab():\n pass", "def get_email_addresses(startdate, enddate, user, password):\n emails = []\n page = 1\n more_pages = True\n\n while more_pages:\n response = requests.get(\n 'https://restapi.surveygizmo.com/v2/survey/{survey}'\n '/surveyresponse?'\n 'filter[field][0]=datesubmitted'\n '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0'\n '&filter[operator][1]=<&filter[value][1]={end}+0:0:0'\n '&filter[field][1]=status&filter[operator][1]=='\n '&filter[value][1]=Complete'\n '&resultsperpage=500'\n '&page={page}'\n '&user:pass={user}:{password}'.format(\n survey=EMAIL_COLLECTION_SURVEY_ID, start=startdate,\n end=enddate, page=page, user=user, password=password))\n\n results = json.loads(response.content)\n total_pages = results['total_pages']\n more_pages = page < total_pages\n emails = emails + [r['[question(13)]'] for r in results['data']]\n\n return emails", "async def fetch_account_status(account_id):\n res_object = requests.get(_ACCOUNTS_URL.format(account_id=account_id))\n return res_object.json() if res_object.status_code == 200 else {}", "def list_verified_email_addresses(self):\r\n return self._make_request('ListVerifiedEmailAddresses')" ]
[ "0.61944306", "0.6052294", "0.57121867", "0.5533557", "0.54612494", "0.54612494", "0.5418097", "0.52885437", "0.5284941", "0.5283386", "0.5248901", "0.5231524", "0.522646", "0.5220439", "0.5209896", "0.520168", "0.51967436", "0.5160234", "0.5155434", "0.5138707", "0.51335585", "0.5108304", "0.50931793", "0.509077", "0.50869274", "0.50745046", "0.5062677", "0.5059279", "0.5049786", "0.50480473" ]
0.6141746
1
Try to update consent status
def test_update(self, requests_mock, accepts_marketing): matcher = requests_mock.post( f'{settings.CONSENT_SERVICE_BASE_URL}' f'{consent.CONSENT_SERVICE_PERSON_PATH}', json={ 'consents': [ CONSENT_SERVICE_EMAIL_CONSENT_TYPE, ], 'modified_at': '2020-03-12T15:33:50.907000Z', 'email': '[email protected]', 'phone': '', 'key_type': 'email', }, status_code=status.HTTP_201_CREATED, ) result = consent.update_consent('[email protected]', accepts_marketing) assert result is None assert matcher.called_once
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_confirm_add_flow_request_wrong_consent_status(self):\n self.client.login(username='duck', password='duck')\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id={}'.format(WRONG_CONFIRM_ID))\n self.assertEqual(res.status_code, 302)\n flow_request = FlowRequest.objects.get(flow_id='f_11111')\n self.assertEqual(flow_request.status, FlowRequest.PENDING)\n for channel in Channel.objects.filter(flow_request=flow_request):\n channel.status = Channel.CONSENT_REQUESTED", "def _updateViewStateSync(self, consent):\n params = {'consent': consent}\n self.query(self.VIEWSTATESYNC, method=self._session.put, params=params)", "def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()", "def _handle_consent_confirmation(user, is_confirmed):\n if is_confirmed == \"yes\":\n # user has already given consent, continue flow\n response = server.create_authorization_response(grant_user=user)\n else:\n # user did not give consent\n response = server.create_authorization_response(grant_user=None)\n return response", "def on_update_after_submit(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()", "def test_confirm_cannot_contact_consent_when_getting_oauth2_token(self):\n # First perform an add request that creates the flow request with status 'PENDING'\n res = self._add_flow_request()\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n # Then confirm the request. This will cause a redirect to consent manager\n self.client.login(username='duck', password='duck')\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id, ERRORS_MESSAGE['INTERNAL_GATEWAY_ERROR']),\n fetch_redirect_response=False)", "def test_confirm_add_flow_request_confirmed_consent(self):\n self.client.login(username='duck', password='duck')\n # Gets the confirmation code installed with the test data\n c = ConsentConfirmation.objects.get(confirmation_id=CORRECT_CONFIRM_ID)\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id={}'.format(CORRECT_CONFIRM_ID))\n\n redirect_url = '{}?process_id={}&success=true'.format(c.destination_endpoint_callback_url,\n c.flow_request.process_id)\n self.assertRedirects(res, redirect_url, fetch_redirect_response=False)\n flow_request = c.flow_request\n self.assertEqual(flow_request.status, FlowRequest.ACTIVE)\n channel = ConsentConfirmation.objects.get(confirmation_id=CORRECT_CONFIRM_ID).channel\n # It remain CR until the consent notification consumer gets the change\n self.assertEqual(channel.status, Channel.CONSENT_REQUESTED)", "def set_status(self, accountid, action):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&accountid=' + accountid\n payload = {'isblock': action}\n url = SECURE_API_URL + \"raas/v1/account/status\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)", "def PostUserConsent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_create_consent_fail_on_incorrect_status(client, session, tokens):\n data = {\n \"type\": \"cookie\",\n \"category\": \"strictly_necessary\",\n \"status\": \"akcepted\",\n }\n response = client.post(\n \"/consent\",\n json=data,\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 422", "def UpdateAccessApprovalSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def test_consent_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"consent-example-notThis.json\"\n inst = consent.Consent.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Consent\" == inst.resource_type\n\n impl_consent_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Consent\" == data[\"resourceType\"]\n\n inst2 = consent.Consent(**data)\n impl_consent_1(inst2)", "def test_create_consent(client, session, tokens, input):\n response = client.post(\n \"/consent\",\n json=input,\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 201\n consent_id = response.json[\"id\"]\n assert Consent.query.filter(Consent.id == consent_id).count() == 1", "def test_confirm_fail_consent_oauth_token(self):\n # First perform an add request that creates the flow request with status 'PENDING'\n res = self._add_flow_request()\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n self.client.login(username='duck', password='duck')\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id, ERRORS_MESSAGE['INTERNAL_GATEWAY_ERROR']),\n fetch_redirect_response=False)", "def confirm_further(self, update, context):\n response_code = update.callback_query[\"data\"] # wouldyou_{yes|no}\n request_id = context.user_data[\"current_request\"]\n log.info(\"No further comments req:%s %s\", request_id, response_code)\n self.finalize_request(update, context, request_id)", "def test_client_nationlity_update(self):\n pass", "def test_consent_4(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"consent-example-notAuthor.json\"\n inst = consent.Consent.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Consent\" == inst.resource_type\n\n impl_consent_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Consent\" == data[\"resourceType\"]\n\n inst2 = consent.Consent(**data)\n impl_consent_4(inst2)", "async def change_status():\n await client.change_presence(activity=discord.Game(next(appearance.status)))", "def update(self):\n _LOGGER.debug(\"Updating status using the client AC instance...\")\n self.ac.update_status()\n _LOGGER.debug(\"Status updated using the client AC instance\")", "def updateTenantStatus(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def campaign_status(self, campaign_status):\n allowed_values = [\"on\", \"stopping\", \"off\", \"complete\", \"invalid\"]\n if campaign_status.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for campaign_status -> \" + campaign_status)\n self._campaign_status = \"outdated_sdk_version\"\n else:\n self._campaign_status = campaign_status", "def update_aid_status(self):\n aid = self.object\n\n # Check that submitted form data is still consistent\n current_status = self.request.POST.get('current_status', None)\n if aid.status != current_status:\n return\n\n STATES = AidWorkflow.states\n if aid.status == STATES.draft:\n aid.submit()\n elif aid.status in (STATES.reviewable, STATES.published):\n aid.unpublish()\n log_admins.delay(\n 'Aide dépubliée',\n 'Une aide vient d\\'être dépubliée.\\n\\n{}'.format(aid),\n aid.get_absolute_url())\n\n msg = _('We updated your aid status.')\n messages.success(self.request, msg)", "def UpdateFromServer(self):\n self.status = GetUserStatus(self.accesskey)", "def econsent(self, econsent):\n\n self._econsent = econsent", "def UpdateQuarantine(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def handle_application(sender, instance, **kwargs):\n if instance.accepted is not None:\n if instance.accepted:\n instance.user.userprofile.change_status_developer()\n else:\n instance.user.userprofile.change_status_player()", "def form_valid(self, form):\n redirect_url = self.accept_consent_request(None)\n return HttpResponseRedirect(redirect_url)", "def consent(self, account_id):\n from pureport_client.commands.accounts.consent import Command\n return Command(self.client, account_id)" ]
[ "0.6403219", "0.6326517", "0.6045587", "0.5909922", "0.5812921", "0.57628024", "0.5692828", "0.5603599", "0.55674136", "0.5560386", "0.549486", "0.5482111", "0.5482111", "0.5476812", "0.544154", "0.54324704", "0.5428267", "0.5428241", "0.54031616", "0.5402912", "0.540165", "0.53770113", "0.535746", "0.5356617", "0.53523666", "0.53468734", "0.533112", "0.5329992", "0.5328821", "0.5328298" ]
0.64573574
0
This function retrieves all ACISAs
def RetrieveACISA(): db = DBConnector() cur = db.cursor() SQLcmd = "SELECT * FROM snaps.SNAPsLocation" cur.execute(SQLcmd) returnList = [] count = 0 for item in cur.fetchall(): count += 1 tmplist = [item[1], item[2], count, str(item[0])] returnList.append(tmplist) return returnList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_acls():\n return config.get_cfg_storage(ID_ACL)", "def getAcdcs(url, requests):\n acdcs = []\n for request in requests:\n name=request['id']\n #if a wrong or weird name\n if len(request['key'])<3:\n print request\n continue\n if 'ACDC' not in name:\n continue\n status=request['key']\n #only completed requests\n if status != 'completed':\n continue\n #requestType=request['key'][2]\n #only acdcs\n #if requestType != 'Resubmission':\n # continue\n acdcs.append(name) \n return acdcs", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def get_all_ribs_per_as(self):\n return self._get_all_ribs(lambda r: 'as'+str(r.asn))", "def getACSIndex(self):\n\n return self._acsi", "def _get_arns(self):\n client = self._get_client()\n\n account_arns = set()\n\n for role in list_roles(**self.conn_details):\n account_arns.add(role['Arn'])\n\n for user in list_users(**self.conn_details):\n account_arns.add(user['Arn'])\n\n for page in client.get_paginator('list_policies').paginate(Scope='Local'):\n for policy in page['Policies']:\n account_arns.add(policy['Arn'])\n\n for page in client.get_paginator('list_groups').paginate():\n for group in page['Groups']:\n account_arns.add(group['Arn'])\n\n result_arns = set()\n for arn in self.arn_list:\n if arn.lower() == 'all':\n return account_arns\n\n if arn not in account_arns:\n self.current_app.logger.warn(\"Provided ARN {arn} not found in account.\".format(arn=arn))\n continue\n\n result_arns.add(arn)\n\n self.current_app.logger.debug(\"got %d arns\", len(result_arns))\n return list(result_arns)", "def get_sample_acls(self, ctx, params):\n # ctx is the context object\n # return variables are: acls\n #BEGIN get_sample_acls\n id_ = _get_id_from_object(params, 'id', required=True)\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_sample_acls', ctx.log_info, skip_check=not params.get('as_admin'))\n acls_ret = self._samples.get_sample_acls(id_, _UserID(ctx[_CTX_USER]), as_admin=admin)\n acls = _acls_to_dict(acls_ret)\n #END get_sample_acls\n\n # At some point might do deeper type checking...\n if not isinstance(acls, dict):\n raise ValueError('Method get_sample_acls return value ' +\n 'acls is not type dict as required.')\n # return the results\n return [acls]", "def iter_all_amino_acids(self):\n for model in self.model_list:\n for chain in model.chain_list:\n for frag in chain.iter_amino_acids():\n yield frag", "def accessControlList(self):\n return allACL", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def get_ad_entries(cohesity_client):\n resp = cohesity_client.active_directory.get_active_directory_entry()\n if resp:\n ad_list = list()\n for each_ad in resp:\n ad_list.append(each_ad.domain_name)\n config_dict[each_ad.domain_name] = [\n \"username\", \"password\", \"machine_accounts\"]\n exported_res_dict[\"Active directories\"] = ad_list\n return resp", "def listar_cadastros():\n return cadastro_alunos.listar_aluno()", "def get_accels (self):\n return (self.get_ax (), self.get_ay (), self.get_az ())", "def get_arcs(self):\n arcs = []\n for arcs_list in self._inc.values():\n record = arcs_list.get_first_record()\n while record is not None:\n arc = record.element\n arcs.append(arc)\n record = record._next\n return arcs", "def all_client_assoc_ap(ap_mac):\n\n url = CMX_URL + '/api/location/v2/clients'\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n clients_json = response.json()\n clients_mac_info = []\n for client in clients_json:\n if client['apMacAddress'] == ap_mac:\n clients_mac_info.append(client['macAddress'])\n return clients_mac_info", "async def test_get_ac_from_gene(test_db):\n resp = await test_db.get_ac_from_gene(\"BRAF\")\n assert resp == [\"NC_000007.14\", \"NC_000007.13\"]\n\n resp = await test_db.get_ac_from_gene(\"HRAS\")\n assert resp == [\"NC_000011.10\", \"NC_000011.9\"]\n\n resp = await test_db.get_ac_from_gene(\"dummy\")\n assert resp == []", "async def get_airfields_icao(eaip_date: datetime.datetime = None) -> typing.List[str]:\n if eaip_date is None:\n eaip_date = __get_current_version()\n\n formatted_date = get_formatted_date(eaip_date)\n\n async with aiohttp.ClientSession() as session:\n async with session.get(EAIP_MENU_URL.format(formatted_date)) as resp:\n menu_content = await resp.text()\n soup = BeautifulSoup(menu_content, 'html.parser')\n menu_element = soup.find(id='AD-2details')\n menu_item_elements = menu_element.find_all('div', 'Hx', recursive=False)\n\n icao_list = [re.findall(r'.*(EG\\w+)plus', next(menu_item.children).attrs['id'])[0]\n for menu_item in menu_item_elements]\n return icao_list", "def read_all():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n return data", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def list_cas():\n cas = []\n for ca in settings.ACM_PRIVATE_CA_SETTINGS:\n _ca = get_ca(ca)\n cas.append(_ca.get_certificate_authority_certificate())\n return cas", "def get_tacacs_servers(self):\n\n cmd = 'show tacacs'\n output = self.iosapi.bcp_send_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) get_tacacs_servers() : Attempting to retrieve TACACS+ servers\" %(__name__))\n\n return(self.iosapi.textfsm_extractor('cisco_ios_show_tacacs.template', output))", "def listaNacionalidades():\n nac = NacionalidadModel()\n\n return nac.listarTodos()", "def list_all_amenities():\n data = storage.all('Amenity')\n amenities = [v.to_dict() for k, v in data.items()]\n return jsonify(amenities)", "def getAminos(self):\n\t\treturn self.aminos", "def getArcs(self):\n return self.getArcsFrom()", "async def get_all_accesspoints(self) -> list[NetworkWirelessAP]:\n accesspoints_data = await self.dbus.Device.Wireless.call_get_all_access_points()\n accesspoints = [NetworkWirelessAP(ap_obj) for ap_obj in accesspoints_data]\n\n for err in await asyncio.gather(\n *[ap.connect(self.dbus.bus) for ap in accesspoints], return_exceptions=True\n ):\n if err:\n _LOGGER.warning(\"Can't process an AP: %s\", err)\n\n return accesspoints", "def all_amenities():\n amenities_list = []\n for amenity in storage.all(Amenity).values():\n amenities_list.append(amenity.to_dict())\n return jsonify(amenities_list)", "def get_all(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieve a list of STS chains\", uri,\n requires_modules=requires_modules,\n requires_version=requires_version)", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def show_all_amenities():\n\n amenities = storage.all(Amenity).values()\n new_list = []\n for amenity in amenities:\n new_list.append(amenity.to_dict())\n return jsonify(new_list)" ]
[ "0.6468968", "0.62329483", "0.6149895", "0.6125198", "0.6006901", "0.59983075", "0.59268194", "0.58295614", "0.58222836", "0.58132493", "0.5776532", "0.57738966", "0.5739669", "0.57129836", "0.57093567", "0.5707091", "0.5696865", "0.5693562", "0.5683094", "0.5662152", "0.5629573", "0.55708313", "0.55642647", "0.55252707", "0.5524794", "0.55236524", "0.55045813", "0.54837346", "0.54776263", "0.5451845" ]
0.63769305
1
Method to calculate a stat over all time steps
def time_stat(self, stat="mean"): # create cdo command and run it cdo_command = f"cdo -tim{stat}" run_this(cdo_command, self, output="ensemble")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_stat_values(self):", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def compute_statistics(self):", "def averageTime(self):\n \n pass", "def time_stats(df):", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def advancedStats():", "def getTimes():", "def getTimes():", "def getTimes():", "def sum(self):\n\n return time_stat(self, stat=\"sum\")", "def calc_calories(gpx_track, wt = 175, activity='Run'):", "def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True", "def calc_timestep_statistic(self, statistic, time):\n ti = np.where(self.times == time)[0][0]\n ma = np.where(self.masks[ti].ravel() == 1)\n if statistic in ['mean', 'max', 'min', 'std', 'ptp']:\n stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()\n elif statistic == 'median':\n stat_val = np.median(self.timesteps[ti].ravel()[ma])\n elif 'percentile' in statistic:\n per = int(statistic.split(\"_\")[1])\n stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)\n elif 'dt' in statistic:\n stat_name = statistic[:-3]\n if ti == 0:\n stat_val = 0\n else:\n stat_val = self.calc_timestep_statistic(stat_name, time) - \\\n self.calc_timestep_statistic(stat_name, time - 1)\n else:\n stat_val = np.nan\n return stat_val", "def stats(self):", "def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)", "def calc_stats(results):\r\n all_res = []\r\n count = 0\r\n for values in results.status_code_counter.values():\r\n all_res += values\r\n count += len(values)\r\n\r\n cum_time = sum(all_res)\r\n\r\n if cum_time == 0 or len(all_res) == 0:\r\n rps = avg = min_ = max_ = amp = 0\r\n else:\r\n if results.total_time == 0:\r\n rps = 0\r\n else:\r\n rps = len(all_res) / float(results.total_time)\r\n avg = sum(all_res) / len(all_res)\r\n max_ = max(all_res)\r\n min_ = min(all_res)\r\n amp = max(all_res) - min(all_res)\r\n stdev = math.sqrt(sum((x-avg)**2 for x in all_res) / count)\r\n\r\n return (\r\n RunStats(count, results.total_time, rps, avg, min_, max_, amp, stdev)\r\n )", "def iterate(self, datapoint, reset=False):\n\n if isinstance(datapoint, np.ndarray):\n if datapoint.ndim == 1:\n measured_statistic = datapoint[0]\n else:\n measured_statistic = datapoint[0, 0]\n else:\n measured_statistic = datapoint\n\n # increment time\n self.time += 1\n\n # update g\n increment = measured_statistic - self.gamma\n self.g += increment\n if self.g < 0:\n self.g = 0\n\n # check if reached the threshold\n if self.g >= self.threshold:\n alarm = True\n if reset:\n self.reset()\n else:\n alarm = False\n\n # return the increment\n return alarm, increment", "def evaluate(self, time) -> float:\n ...", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def mean(self):\n\n return time_stat(self, stat=\"mean\")", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def iteration_stats(self, k, frcxd):\n\n tk = self.timer.elapsed(self.opt['IterTimer'])\n if self.opt['Monotone']:\n tpl = (k,) + self.objfn \\\n + (frcxd, self.F, self.Q, self.iterBTrack, self.L) \\\n + self.itstat_extra() + (tk,)\n else:\n tpl = (k,) + self.eval_objfn() \\\n + (frcxd, self.F, self.Q, self.iterBTrack, self.L) \\\n + self.itstat_extra() + (tk,)\n return type(self).IterationStats(*tpl)", "def compute_total_times(self):\n rval = {}\n for fgraph, node in self.apply_time:\n if node not in rval:\n self.fill_node_total_time(fgraph, node, rval)\n return rval", "def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)", "def hourly_stats():\r\n count_total.delay()\r\n count_unique.delay()\r\n count_tags.delay()", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p", "def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")", "def _tstat_all(self):\n return np.squeeze(self.solution) / self._se_all", "def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc" ]
[ "0.7152318", "0.6610999", "0.64921373", "0.6394629", "0.63097644", "0.62772894", "0.6209305", "0.6110621", "0.6110621", "0.6110621", "0.61086136", "0.6045909", "0.596642", "0.5933937", "0.5890365", "0.58721894", "0.58537525", "0.5809977", "0.58057666", "0.57885444", "0.5734363", "0.5729793", "0.57265365", "0.5699492", "0.5685674", "0.56843495", "0.566741", "0.5661211", "0.56552684", "0.5653898" ]
0.68386406
1
Calculate the aggregated mean and stds.
def _get_aggregated_mean_std(self, means, stds, n): mean = means.view(n, -1).sum(0) / n std = ( stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n ) return mean.detach(), std.detach()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )", "def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]", "def mean_std_calc(dataloader):\n mean = 0\n std = 0\n samples = 0\n for data, _, _ in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n samples += batch_samples\n\n return (mean / samples),(std / samples)", "def _compute_mean_std(self, sum_, ssum, size):\n assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'\n mean = sum_ / size\n sumvar = ssum - sum_ * mean\n unbias_var = sumvar / (size - 1)\n bias_var = sumvar / size\n\n self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data\n self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data\n\n return mean, bias_var.clamp(self.eps) ** -0.5", "def get_mean_stddev(self):\n return self.get_mean(), self.get_std_dev()", "def _loss_std_mean(self, iterations):\n\n loss_array = np.array(self._loss_list[-iterations:])\n return loss_array.mean(), loss_array.std()", "def _get_mean_and_log_std(self, *inputs):\n return self._shared_mean_log_std_network(*inputs)", "def compute_mean_std(x):\n x = np.hstack(x)\n return (np.mean(x).astype(np.float32),\n np.std(x).astype(np.float32))", "def get_mean_and_std(arr):\r\n return np.round(np.mean(arr), 3), np.round(np.std(arr), 3)", "def _get_mean_and_log_std(self, *inputs):\n return self._mean_module(*inputs), self._log_std_module(*inputs)", "def std_mean(self):\n std = self.std\n if self.ddof != 0:\n # ddof correction, (need copy of std)\n std = std * np.sqrt(\n (self.sum_weights - self.ddof) / self.sum_weights\n )\n\n return std / np.sqrt(self.sum_weights - 1)", "def get_mean_and_variance(self):\n self._set_statistics()\n return self.statistics_object.get_mean(), self.statistics_object.get_variance()", "def calc_mean_stdev(data):\n\n pop_stdev = pstdev(data)\n pop_mean = mean(data)\n\n return pop_mean, pop_stdev", "def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res", "def mean_STD(self,counter):\n \n \n pass", "def mean_stddev(self):\n if len(self.vs) == 0:\n raise StdDevFilterException\n\n mx = self.mean()\n # compute variance\n variance = sum([(x - mx)**2 for x in self.vs])/len(self.vs)\n # return mean value and standard deviation (square root of variance)\n return mx,math.sqrt(variance)", "def mean(self):\n return self.aggregate(np.mean)", "def find_mean_std(self, data):\n if self._data_mean is None:\n self._data_mean = np.mean(data)\n if self._data_std is None:\n self._data_std = np.std(data)", "def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)", "def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print(\"==> Computing mean and std..\")\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std", "def _compute_mean_std(self, history, window=28):\n history = np.array(history[-window - 1: -1])\n decay_weights = [self.decay ** a for a in range(len(history), 0, -1)]\n weighted = history * decay_weights\n mean = weighted.mean()\n std = weighted.std()\n return mean, std", "def std(self):\n return self._summarize(lambda c: c.std)", "def compute_training_stats():\n means, stds = [], []\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n print(i)\n img, _ = data[i]\n std, mean = t.std_mean(input=img, dim=(1, 2))\n means.append(mean)\n stds.append(std)\n means = t.sum(t.vstack(means), dim=0) / len(means)\n stds = t.sum(t.vstack(stds), dim=0) / len(stds)\n print(means, stds)", "def get_mean_and_std(dataloader):\n mean = torch.zeros(3)\n std = torch.zeros(3)\n len_dataset = 0\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n len_dataset += 1\n for i in range(len(inputs[0])):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len_dataset)\n std.div_(len_dataset)\n return mean, std", "def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)", "def calculate_std(self) -> float:\n\n if self.data:\n return np.std(self.data)\n else:\n return self.sigma", "def _get_mean_and_log_std(self, x):\n mean = self._mean_module(x)\n return mean, self._log_std", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu" ]
[ "0.75994617", "0.7462015", "0.73687404", "0.72231203", "0.7077105", "0.6966083", "0.69622374", "0.69211644", "0.691378", "0.6900489", "0.6860511", "0.6856823", "0.6847475", "0.6832801", "0.68259156", "0.6784394", "0.67798734", "0.6750028", "0.67471087", "0.6724783", "0.6687738", "0.66768366", "0.6640656", "0.66091186", "0.6555734", "0.6544757", "0.6541889", "0.65296435", "0.65035236", "0.6500766" ]
0.75622755
1
Synchronize running_mean, and running_var. Call this before eval.
def aggregate_stats(self): if self.split_bn.track_running_stats: ( self.bn.running_mean.data, self.bn.running_var.data, ) = self._get_aggregated_mean_std( self.split_bn.running_mean, self.split_bn.running_var, self.num_splits, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _synchronize_vars_using_mean(new_var: NestedMap,\n old_var: NestedMap) -> NestedMap:\n delta = new_var - old_var\n delta_mean = jax.lax.pmean(delta, axis_name=data_parallel_axis_name)\n updated_var = old_var + delta_mean\n return updated_var", "def _force_updates():\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)", "def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints", "def PostTrainingStepUpdate(self, global_step):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)", "def PostTrainingStepUpdate(self):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.ops.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.ops.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)", "def __reset_variables(self):\r\n self.__running = True", "def sync_batch_stats(state: TrainState) -> TrainState:\n # Each device has its own version of the running average batch\n # statistics and those are synced before evaluation\n return state.replace(batch_stats=cross_replica_mean(state.batch_stats))", "def set_eval(self):\n self.eval()\n self.volatile = True\n self.scheduled_sampling = False", "def sync_batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n process_group=None,\n):\n if process_group is None:\n kwargs = locals()\n kwargs.pop('process_group')\n return batch_norm(**kwargs)\n return FunctionLib.apply(\n 'SyncBatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum, **process_group.arguments)", "def _compute_mean_variance(self, mean_dict: Dict) -> None:\n self.eval_dict = defaultdict()\n for key, value in mean_dict.items():\n tmp_mean = np.mean(value)\n tmp_variance =np.var(value)\n self.eval_dict[key + '_mean'] = tmp_mean\n self.eval_dict[key + '_variance'] = tmp_variance", "def _val(self):\r\n lr, hr = self.sess.run(self.train_batch)\r\n res = self.sess.run(\r\n [self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_gan_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: False\r\n })\r\n\r\n return res", "def update_state(self, makespan_train, makespan_baseline):\n self.mean_makespan_train.assign_add(tf.reduce_mean(makespan_train))\n self.mean_makespan_baseline.assign_add(tf.reduce_mean(makespan_baseline))\n self.step.assign_add(1)", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True", "def exponential_running_var_from_demeaned(demeaned_data, factor_new,\n start_var=None,\n init_block_size=None, axis=None):\n # TODELAY: split out if and else case into different functions\n # i.e. split apart a common function having a start value (basically the loop)\n # and then split if and else into different functions\n factor_old = 1 - factor_new\n # first preallocate the shape for the running vars for performance (otherwise much slower)\n # shape depends on which axes will be removed\n running_vars_shape = list(demeaned_data.shape)\n if axis is not None:\n for ax in axis:\n running_vars_shape.pop(ax)\n running_vars = (np.ones(running_vars_shape) * np.nan).astype(np.float32)\n\n if start_var is None:\n if axis is not None:\n axes_for_start_var = (0,) + axis # also average across init trials\n else:\n axes_for_start_var = 0\n\n # possibly temporarily upcast to float32 to avoid overflows in sum\n # that is computed to compute mean\n start_running_var = np.mean(\n np.square(demeaned_data[0:init_block_size].astype(np.float32)),\n axis=axes_for_start_var, keepdims=True).astype(demeaned_data.dtype)\n running_vars[0:init_block_size] = start_running_var\n current_var = start_running_var\n start_i = init_block_size\n else:\n current_var = start_var\n start_i = 0\n\n for i in range(start_i, len(demeaned_data)):\n squared = np.square(demeaned_data[i:i + 1])\n if axis is not None:\n this_var = np.mean(squared, axis=axis, keepdims=True)\n else:\n this_var = squared\n next_var = factor_new * this_var + factor_old * current_var\n running_vars[i] = next_var\n current_var = next_var\n assert not np.any(np.isnan(running_vars))\n return running_vars", "def run_recurrent_dynamics(self,record_mean_max=True,activation='tanh',r_max=100):\n \n print '\\nRunning recurrent dynamics'\n\n activation_fun=get_activation_fun(activation,r_max)\n\n \n r_e=np.zeros((self.N_e,self.NX))\n r_i=np.zeros((self.N_i,self.NX))\n r=np.vstack([r_e,r_i])\n\n num_steps=int(self.recdyn_time/self.dt)\n\n if record_mean_max is True: \n self.rec_input_mean_vect=np.zeros((self.N,num_steps))\n self.rec_input_max_vect=np.zeros((self.N,num_steps))\n self.r_mean_vect=np.zeros((self.N,num_steps))\n self.r_max_vect=np.zeros((self.N,num_steps))\n\n self.r_evo=np.zeros((self.N,self.NX,self.recdyn_num_snaps))\n self.rec_input_evo=np.zeros((self.N,self.NX,self.recdyn_num_snaps))\n \n delta_snap=num_steps/self.recdyn_num_snaps\n \n snap_idx=0\n \n rec_input=np.zeros_like(r)\n start_clock=time.time()\n \n for t in xrange(num_steps):\n \n if np.remainder(t,delta_snap)==0:\n \n sl.print_progress(snap_idx,self.recdyn_num_snaps,start_clock=start_clock,step=1)\n\n self.rec_input_evo[:,:,snap_idx]=rec_input\n self.r_evo[:,:,snap_idx]=r\n snap_idx+=1\n\n if record_mean_max:\n self.rec_input_mean_vect[:,t]=np.mean(rec_input,axis=1)\n self.rec_input_max_vect[:,t]=np.max(rec_input,axis=1)\n self.r_mean_vect[:,t]=np.mean(r,axis=1)\n self.r_max_vect[:,t]=np.max(r,axis=1)\n \n # recurrent input \n rec_input=np.dot(self.W,r)\n \n # total input, add feed-forward inhibition if recurrent inhibition is not explicitely modeled\n tot_input=self.h+rec_input \n if self.N_i==0:\n tot_input+=self.r0\n \n tot_activation = activation_fun(tot_input)\n \n r=r+(self.dt/self.tau)*(-r+tot_activation)\n \n\n self.r=r", "def __call__(self,\n inputs,\n use_running_stats = None,\n weights = None):\n use_running_stats = nn.module.merge_param(\n \"use_running_stats\", self.use_running_stats, use_running_stats)\n\n # Normalization is independent per spin per channel.\n num_spins, num_channels = inputs.shape[-2:]\n feature_shape = (1, 1, 1, num_spins, num_channels)\n reduced_feature_shape = (num_spins, num_channels)\n\n initializing = not self.has_variable(\"batch_stats\", \"variance\")\n\n running_variance = self.variable(\"batch_stats\", \"variance\",\n lambda s: jnp.ones(s, jnp.float32),\n reduced_feature_shape)\n\n if self.centered:\n running_mean = self.variable(\"batch_stats\", \"mean\",\n lambda s: jnp.zeros(s, jnp.complex64),\n reduced_feature_shape)\n\n if use_running_stats:\n variance = running_variance.value\n if self.centered:\n mean = running_mean.value\n else:\n # Compute the spherical mean over the spherical grid dimensions, then a\n # conventional mean over the batch.\n if self.centered:\n mean = sphere_utils.spin_spherical_mean(inputs)\n mean = jnp.average(mean, axis=0, weights=weights)\n # Complex variance is E[x x*] - E[x]E[x*].\n # For spin != 0, E[x] should be zero, although due to discretization this\n # is not always true. We only use E[x x*] here.\n # E[x x*]:\n mean_abs_squared = sphere_utils.spin_spherical_mean(inputs *\n inputs.conj())\n mean_abs_squared = jnp.average(mean_abs_squared, axis=0, weights=weights)\n # Aggregate means over devices.\n if self.axis_name is not None and not initializing:\n if self.centered:\n mean = lax.pmean(mean, axis_name=self.axis_name)\n mean_abs_squared = lax.pmean(mean_abs_squared, axis_name=self.axis_name)\n\n # Imaginary part is negligible.\n variance = mean_abs_squared.real\n\n if not initializing:\n running_variance.value = (self.momentum * running_variance.value +\n (1 - self.momentum) * variance)\n if self.centered:\n running_mean.value = (self.momentum * running_mean.value +\n (1 - self.momentum) * mean)\n\n if self.centered:\n outputs = inputs - mean.reshape(feature_shape)\n else:\n outputs = inputs\n\n factor = lax.rsqrt(variance.reshape(feature_shape) + self.epsilon)\n if self.use_scale:\n scale = self.param(\"scale\",\n self.scale_init,\n reduced_feature_shape).reshape(feature_shape)\n factor = factor * scale\n\n outputs = outputs * factor\n\n if self.use_bias:\n bias = self.param(\"bias\",\n self.bias_init,\n reduced_feature_shape).reshape(feature_shape)\n outputs = outputs + bias\n\n return outputs", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def set_eval(self):\n self.eval()\n self.volatile = True", "def compute(self, global_step, params, batch_loss):\n if self.is_active(global_step):\n norm_test = self._compute(params, batch_loss).item()\n\n if self._verbose:\n print(f\"[Step {global_step}] NormTest: {norm_test:.4f}\")\n\n self.output[global_step][\"norm_test\"] = norm_test\n\n if self._check:\n self.__run_check(params, batch_loss)\n else:\n pass", "def _force_loop(self):\n NUM_SAMPLES = 10.0\n\n # Get the initial readings\n time.sleep(1)\n readings = []\n for i in range(0, int(NUM_SAMPLES)):\n readings.insert(0, self._sensor.value)\n time.sleep(self._sampling_rate)\n\n self._average_force = sum(r for r in readings) / NUM_SAMPLES\n\n # Average the readings\n while True:\n readings.insert(0, self._sensor.value)\n readings.pop()\n\n self._average_force = sum(r for r in readings) / NUM_SAMPLES\n\n time.sleep(self._sampling_rate)", "def update(self, batch_mean, batch_var, batch_count):\n delta = batch_mean - self.mean\n new_count = (self.count + batch_count)\n new_mean = self.mean + delta * (batch_count / new_count)\n new_var = self.count * self.var + batch_count * batch_var\n new_var += (delta**2) * self.count * batch_count / new_count\n new_var /= new_count\n self.count.copy_(new_count)\n self.mean.copy_(new_mean)\n self.var.copy_(new_var)\n self.std = torch.sqrt(self.var)", "def finish_online_evaluation_extended(self, task):\n # -- Get current True-Positive, False-Positive and False-Negative -- #\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n # -- Calculate the IoU -- #\n global_iou_per_class = [i for i in [i / (i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Calculate the Dice -- #\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Store IoU and Dice values. Ensure it is float64 so its JSON serializable -- #\n # -- Do not use self.all_val_eval_metrics since this is used for plotting and then the -- #\n # -- plots do not build correctly because based on self.save_every more dice values than -- #\n # -- expected (epochs) are in there --> see plot_progress function in network_trainer.py -- #\n iou = np.mean(global_iou_per_class, dtype=\"float64\")\n dice = np.mean(global_dc_per_class, dtype=\"float64\")\n\n # -- Update the log file -- #\n self.print_to_log_file(\"Average global foreground IoU for task {}: {}\".format(task, str(global_iou_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the IoU of the different classes. This is not \"\n \"exact.)\")\n self.print_to_log_file(\"Average global foreground Dice for task {}: {}\".format(task, str(global_dc_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n # -- Add the results to self.validation_results based on task and epoch -- #\n if self.validation_results.get('epoch_'+str(self.epoch), None) is None:\n self.validation_results['epoch_'+str(self.epoch)] = { task: {\n 'IoU': iou,\n 'Dice': dice\n }\n }\n else: # Epoch entry does already exist in self.validation_results, so only add the task with the corresponding values\n self.validation_results['epoch_'+str(self.epoch)][task] = { 'IoU': iou,\n 'Dice': dice\n }\n \n # -- Empty the variables for next iteration -- #\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []", "def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray):\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n mean += np.mean(dataset[i]) / len(dataset)\n self.global_mean.assign(mean, session)\n return mean", "def current_mean(self):\r\n values = self._timings\r\n return np.mean(values)", "def __call__(self, trainer):\n observation = trainer.observation\n summary = self._summary\n key = self._key\n if key in observation:\n summary.add({key: observation[key]})\n\n if not self._interval_trigger(trainer):\n return False\n\n if self._max_trigger(trainer):\n return True\n\n stats = summary.compute_mean()\n value = float(stats[key]) # copy to CPU\n self._init_summary()\n\n if not self._best_value or self._compare(self._best_value, value):\n self._best_value = value\n self._waited = 0\n return False\n elif self._waited >= self._patience:\n return True\n else:\n self._waited += 1\n if self._waited >= self._patience:\n return True\n else:\n return False", "def _run_computation(self):\n with self.swap(stats_jobs_continuous.StatisticsAggregator,\n 'get_statistics', self._mock_get_statistics):\n ModifiedUserImpactAggregator.start_computation()\n self.process_and_flush_pending_tasks()", "def _update_mean_and_sigma_inv_times_centered_y(self):\n\n if self._sigma_inv_times_centered_y is not None:\n update_condition = (\n len(self._sigma_inv_times_centered_y) != len(self.list_y)\n )\n else:\n update_condition = self._mean_y is None\n\n if update_condition:\n y, mean = self._solve_linear_system()\n self._sigma_inv_times_centered_y = y\n self._mean_y = mean", "def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray) and not self.global_mean_pc:\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n if not self.global_mean_pc:\n mean += np.mean(dataset[i]) / len(dataset)\n else:\n mean += (np.mean(dataset[i], axis=(0, 1),\n keepdims=True) / len(dataset))[0][0]\n self.global_mean.assign(mean, session)\n return mean", "def _fill_mean_dict(self, running_metrics: Dict, mean_dict: Dict) -> None:\n for key, value in running_metrics.items():\n mean = np.mean(value)\n mean_dict[key].append(mean)" ]
[ "0.61866266", "0.59162766", "0.5869464", "0.5706126", "0.5659805", "0.55348825", "0.54772735", "0.54338247", "0.5433502", "0.5417435", "0.5414351", "0.53781146", "0.5374504", "0.53155285", "0.53055024", "0.5271861", "0.5214751", "0.52096206", "0.520088", "0.5198845", "0.5172831", "0.51610935", "0.5159951", "0.5134131", "0.5128221", "0.5118458", "0.5112371", "0.5109548", "0.50916296", "0.5042067" ]
0.5927717
1
Return duration in years
def duration(self): if self.is_valid: return relativedelta(self.expiry, datetime.date.today()).years else: return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def days_to_years(datum):\n return datum/DAYS_PER_YEAR", "def year(self):\n return self._years", "def unit_yr(self):\n return ((self.time_base * 60.0) * 24.0) * 365.0", "def periods_in_a_year(self) -> float:\n return self.length / self.yearfrac", "def _unit_yr(self):\n return ((self.time_base * 60.0) * 24.0) * 365.0", "def yearfrac(self) -> float:\n return (self.last_idx - self.first_idx).days / 365.25", "def get_age(self):\n return int(CURRENT_YEAR[:4]) - self.year # String-slice only the year", "def get_age(self):\n return Guitar.CURRENT_YEAR - self.year", "def years_to_pay(self) -> float:\n return round(self.term / self.term_multiplier * self.n_periods / 12, 1)", "def get_years():\n if request.method == 'OPTIONS':\n logging.info(\"Handle options\")\n return create_response({}, 200, '*', 'content-type, token')\n\n logging.info(\"Getting recruitment years\")\n\n role, response = handle_request_token(request)\n\n if role is None:\n logging.warning(\"Role is None!\")\n return response\n\n years = set()\n for rec in Recruitment.query.all():\n if rec.end_date.year not in years:\n years.add(rec.end_date.year)\n\n years = list(years)\n years.sort(reverse=True)\n\n return create_response(years, 200, '*')", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_year(self):\n return self.year", "def get_year(self) -> str:\n return str(self.movie.releasedate.year)", "def yearlyDepreciation():\n return .10", "def calculateAgeInYears(year_born):\r\n # ref https://stackoverflow.com/questions/4436957/pythonic-difference-between-two-dates-in-years\r\n current_year = int(d.datetime.now().year)\r\n difference_in_years = abs(current_year - year_born)\r\n return int(difference_in_years)", "def getagefromyear(year=None):\n if year is None:\n print(\"Please enter the year to assign class to them\")\n try:\n t = datetime.datetime.today()\n b = datetime.datetime.strptime(str(year), '%Y')\n a = (t - b).days / 365\n a = int(a)\n if (a < 10) or (a > 80):\n a = None\n except:\n a = None\n return a", "def age(self):\n return datetime.now().year - self.birth_day.year", "def get_age(self):\n age = CURRENT_YEAR - self.year\n return age", "def year(self) -> int:\r\n return self._year", "def year(self) -> int:\n return self.arxiv_id.year", "def get_age(self):\n age = 2021 - self.year\n return age", "def num_years():\n years = movies['Year']\n return ('num_years', years.nunique())", "def age(self):\n today = datetime.date(2001, 5, 12)\n yyyy = self.birthday[0:4]\n mm = int(self.birthday[4:6])\n dd = int(self.birthday[6:8])\n dob = datetime.date(int(yyyy), mm, dd)\n age_in_days = (today - dob).days\n age_in_years = age_in_days / 365\n return int(age_in_years)", "def new_year(dacycle):\n\n this_year = dacycle['time.start'].year\n prev_year = (dacycle['time.start']-dacycle['cyclelength']).year\n\n return (this_year != prev_year)", "def test_interval_to_seconds_with_years(self):\n self.assert_interval_to_seconds(0, \"0y\", \"0year\", \"0years\")\n self.assert_interval_to_seconds(31536000, \"1y\", \"1year\", \"1years\")\n self.assert_interval_to_seconds(5 * 31536000, \"5y\", \"5year\", \"5years\")\n self.assert_interval_to_seconds(\n 123 * 31536000, \"123y\", \"123year\", \"123years\")\n self.assert_interval_to_seconds(\n 2 * 31536000, \"02y\", \"02year\", \"02years\")", "def year(self) -> int:\n if self.is_old_style:\n yy = int(self.split('/', 1)[1][0:2])\n else:\n yy = int(self[:2])\n if yy > 90:\n return 1900 + yy\n return 2000 + yy", "def get_age(self, name=None):\n now = datetime.now()\n delta = relativedelta(now, self.date_of_birth)\n years_months_days = str(delta.years) + 'y ' + str(delta.months) + \\\n 'm ' + str(delta.days) + 'd'\n return years_months_days", "def age(self):\n today = datetime.date.today()\n\n return today.year - int(self.birthday[0:4])", "def get_years(self):\n\n year = cast(extract('year', ArchivedResult.date), Integer)\n query = self.session.query\n query = query(distinct(year))\n query = query.order_by(desc(year))\n\n return list(r[0] for r in query.all())" ]
[ "0.72326726", "0.7115902", "0.70439005", "0.70209014", "0.6918941", "0.68189144", "0.6782731", "0.67128307", "0.66573805", "0.6547447", "0.6528867", "0.6528867", "0.6477337", "0.64250714", "0.6410062", "0.64020795", "0.63959104", "0.6354012", "0.6353335", "0.6349158", "0.6343395", "0.63021785", "0.6301902", "0.62806255", "0.6271723", "0.62552905", "0.6250305", "0.6238676", "0.62346345", "0.62339747" ]
0.7217856
1
Transition from iceboot to domapp by uploading 'domappFile', uncompressing it and executing from iceboot. Load domapp FPGA first.
def uploadDomapp2(self, domappFile): if not os.path.exists(domappFile): raise DomappFileNotFoundException(domappFile) size = os.stat(domappFile)[ST_SIZE] if size <= 0: return (False, "size error: %s %d bytes" % (domappFile, size)) # Load domapp FPGA ok, txt = self.se("s\" domapp.sbi.gz\" find if fpga-gz endif\r\n", ">") if not ok: return (False, "%s\nFPGA reload failed!" % txt) # Prepare iceboot to receive file ok, txt = self.se("%d read-bin\r\n" % size, "read-bin") if not ok: return (False, "%s\nread-bin failed!" % txt) # Send file data if not self.sendFile(domappFile): return (False, "send file failed!") # See if iceboot is still ok ok, txt = self.se("\r\n", ">") if not ok: return (False, "%s\ndidn't get iceboot prompt!" % txt) # Exec the new domapp program ok, txt = self.se("gunzip exec\r\n", "READY") if not ok: return (False, "%s\ndidn't get READY!" % txt) return (True, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(app, verbose, replay, exp_config=None):\n if replay:\n exp_config = exp_config or {}\n exp_config[\"replay\"] = True\n log(header, chevrons=False)\n loader = LoaderDeployment(app, Output(), verbose, exp_config)\n loader.run()", "def software_load(self, filename: str) -> None:\n pass # Most boards can use serialboot.", "def _loadFilesToIsoDomain():\n #TODO: add more iso files in the future\n fileList = [basedefs.FILE_VIRTIO_WIN_VFD, basedefs.FILE_VIRTIO_WIN_ISO, basedefs.FILE_RHEV_GUEST_TOOLS_ISO]\n\n # Prepare the full path for the iso files\n targetPath = os.path.join(controller.CONF[\"NFS_MP\"], controller.CONF[\"sd_uuid\"], \"images\", \"11111111-1111-1111-1111-111111111111\")\n\n try:\n # Iterate the list and copy all the files\n for filename in fileList:\n utils.copyFile(filename, targetPath, basedefs.CONST_VDSM_UID, basedefs.CONST_KVM_GID)\n except:\n # We don't want to fail the setup, just log the error\n logging.error(traceback.format_exc())\n logging.error(output_messages.ERR_FAILED_TO_COPY_FILE_TO_ISO_DOMAIN)", "def loadFlash(portName, flashFile):\n command = [\n \"./avrdude\",\n \"-c\", \"avrisp\",\n \"-p\", \"m32u4\",\n \"-P\", portName,\n \"-B\", \"1\",\n \"-U\" \"flash:w:%s:i\" % flashFile,\n ]\n\n return subprocess.call(command)", "def x(appname, firejail=False):\n z = Zap(appname)\n if not z.is_installed:\n print(\"{} is not installed yet.\".format(appname))\n return\n path_to_appimage = z.appdata().get('path')\n Execute(path_to_appimage, use_firejail=firejail)\n print(\"Done!\")", "def load(self):\n\n if self.useCached:\n return self.loadDicomsFromDatabase(self.extractedFiles)\n\n\n if not os.path.exists(self._dst):\n return \n \n\n \n #--------------------\n # Make sure Slicer's DICOMdatabase is set up.\n # Show a popup informing the user if it's not.\n # The user has to restart the process if it's not.\n #--------------------\n m = slicer.util.mainWindow()\n if not slicer.dicomDatabase:\n msg = \"It doesn\\'t look like your DICOM database directory is\"\n msg += \"setup. Please set it up in the DICOM module. You can \"\n msg += \"load your downloaded files here: '***HERE***'.\"\"\"\n msg = msg.replace('***HERE***', self._dst)\n self.terminateLoad(['DICOM load', msg ])\n m.moduleSelector().selectModule('DICOM') \n\n\n\n #--------------------\n # UNZIP dst\n #--------------------\n self.extractDst()\n\n \n\n #--------------------\n # Add DICOM files to slicer.dicomDataase\n #--------------------\n dicomIndexer = ctk.ctkDICOMIndexer()\n try:\n dicomIndexer.addListOfFiles(slicer.dicomDatabase, \\\n self.extractedFiles)\n except Exception as e:\n \n #\n # If the database is uninitialized, then initialize it.\n #\n errorString = str(e)\n if 'uninitialized ctkDICOMItem' in errorString:\n #print (MokaUtils.debug.lf(), \"The slicer.dicomDabase is \" + \\\n #\"unitialized (%s). Initializing it.\"%(errorString))\n slicer.dicomDatabase.initialize()\n dicomIndexer.addListOfFiles(slicer.dicomDatabase, \n self.extractedFiles)\n\n #--------------------\n # Delete dst\n #--------------------\n os.remove(self._dst)\n\n\n #--------------------\n # Load the 'downloaded' DICOMS from Slicer's database.\n #--------------------\n return self.loadDicomsFromDatabase(self.extractedFiles)", "def load_app(self):\n self.driver.wdvr.start_activity(PACKAGE.DROPBOX, LAUNCH_ACTIVITY.DROPBOX_HOME, app_wait_activity=WAIT_ACTIVITY.DROPBOX)", "def _load_fw_file(fw_fpath: str):\n mouth_iface_fname = os.environ.get(\"SWD_CONFIG_MOUTH\", \"raspberrypi-mouth-swd.cfg\")\n openocd_cmds = f'program {fw_fpath} verify reset exit'\n logging.info(f\"Attempting to load {fw_fpath} into MCU...\")\n cmd = f'openocd -f interface/{mouth_iface_fname} -f target/rp2040.cfg -c '\n result = subprocess.run(cmd.split() + [openocd_cmds], capture_output=True, encoding='utf-8')\n\n if result.returncode != 0:\n logging.error(f\"Non-zero return code when attempting to load FW. Got return code {result.returncode}\")\n logging.error(f\"Mouth MCU may be non-responsive.\")\n logging.error(f\"Got stdout and stderr from openocd subprocess:\")\n logging.error(f\"STDOUT: {result.stdout}\")\n logging.error(f\"STDERR: {result.stderr}\")\n else:\n logging.info(\"Loaded FW successfully.\")", "def serve_application(filename):\n storeapps = APP.config[\"storage\"]\n return flask.send_from_directory(storeapps, filename, mimetype=\"application/octet-stream\")", "def __get_packed_xwalk_app_template(self, dest_dir):\n input_file = urllib2.urlopen(self.updated_url)\n contents = input_file.read()\n input_file.close()\n file_path = os.path.join(dest_dir, self.file_name)\n if os.path.isfile(file_path):\n os.remove(file_path)\n file_dir = dest_dir + '/' + self.file_name.split('.tar.gz')[0]\n if os.path.exists(file_dir):\n shutil.rmtree(file_dir)\n output_file = open(file_path, 'w')\n output_file.write(contents)\n output_file.close()", "def run_flash(board, addr, common_paths, input_file):\n # Extract path information.\n (ocd_bin, scripts_path, helpers_path) = common_paths\n # Load and extract flash information.\n (soc, flash_addr) = load_flash_cfg(board, addr)\n\n cmd = [ocd_bin, '-s', scripts_path, '-s', helpers_path,\n '-f', 'flash_{}.cfg'.format(soc),\n '-c', 'load_image {} {}'.format(input_file, flash_addr),\n '-c', 'verify_image {} {}'.format(input_file, flash_addr),\n '-f', '{}-release.cfg'.format(soc)]\n try:\n subprocess.call(cmd)\n except subprocess.CalledProcessError:\n print('Command failed.')\n exit(1)", "def uploadAppRedir():\n logger.debug('[FLASKWEB /app] Redirect to /apps')\n return uploadApp()", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def import_idb(self, idb_file):\n self.__run_import_script(file=idb_file, is_bin=False)", "def flash_binary(\n mount_point: pathlib.Path, program_path: pathlib.Path, build_dir: pathlib.Path, mbed_target: str, hex_file: bool\n) -> pathlib.Path:\n fw_file = _build_binary_file_path(program_path, build_dir, hex_file)\n _flash_dev(mount_point, fw_file)\n return fw_file", "def actionFromweb(self):\n print(\"Grabbing %x firmware.\" % self.dev_id)\n print(\"%s\" % firmware[self.dev_id])\n fn=\"/tmp/.goodfet.hex\"\n os.system(\"curl %s >%s\" % (firmware[self.dev_id],fn))\n\n fw=Memory(fn)\n #fw.loadIhex(open(fn,\"rb\"))\n\n sys.stderr.write(\"Program ...\\n\")\n sys.stderr.flush()\n self.programData(fw, self.ACTION_PROGRAM | self.ACTION_VERIFY)\n sys.stderr.write(\"%i bytes programmed.\\n\" % self.byteCtr)\n sys.stderr.flush()", "def load_device():", "def bootloader() -> NoReturn:", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def _binary_app(self):\n self.make_binary()", "def main():\n\n if os.path.isfile(os.path.join(os.getcwd(), 'fose_loader.exe')):\n util.replace_command('FalloutLauncher.exe', 'fose_loader.exe')", "def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)", "def fusion_api_upgrade_appliance_firmware(self, localfile, api=None, headers=None):\n param = '?file=%s' % localfile\n return self.appfirmware.update(api, headers, param)", "def _load_disk(self):", "def _load_disk(self):", "def flashUboot(self):\n\t\tif self.settings.getKeyValue('flash.uboot?') == 'y':\n\t\t\tloadAddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\t\tcmd = self.settings.getKeyValue('u-boot.load.command')\n\t\t\tcmd = cmd.replace('<u-boot>', 'u-boot.bin.12x.2430')\n\t\t\tself.socket.send(cmd, 5)\n\t\t\t#self.socket.send('protect off 1:0-1\\r', 2)\n\t\t\t#self.socket.send('erase 1:0-1\\r', 2)\n\t\t\t#self.socket.send('cp.b 80000000 %s 2ffff\\r' % loadAddress)\n\t\t\treturn None\n\t\t\t#cmd = cmd.replace('<u-bootloadadress>', self.u-bootloadaddress)", "def PrepareFlasher(self, uboot, payload, update, verify, boot_type, bus):\n fdt = self._fdt.Copy(os.path.join(self._tools.outdir, 'flasher.dtb'))\n payload_data = self._tools.ReadFile(payload)\n\n # Make sure that the checksum is not negative\n checksum = binascii.crc32(payload_data) & 0xffffffff\n\n script, replace_me = self._GetFlashScript(len(payload_data), update,\n verify, boot_type, checksum, bus)\n data = self._tools.ReadFile(uboot)\n fdt.PutString('/config', 'bootcmd', script)\n fdt_data = self._tools.ReadFile(fdt.fname)\n\n # Work out where to place the payload in memory. This is a chicken-and-egg\n # problem (although in case you haven't heard, it was the chicken that\n # came first), so we resolve it by replacing the string after\n # fdt.PutString has done its job.\n #\n # Correction: Technically, the egg came first. Whatever genetic mutation\n # created the new species would have been present in the egg, but not the\n # parent (since if it was in the parent, it would have been present in the\n # parent when it was an egg).\n #\n # Question: ok so who laid the egg then?\n payload_offset = len(data) + len(fdt_data)\n\n # NAND driver expects 4-byte alignment. Just go whole hog and do 4K.\n alignment = 0x1000\n payload_offset = (payload_offset + alignment - 1) & ~(alignment - 1)\n\n load_address = self.text_base + payload_offset,\n new_str = '%08x' % load_address\n if len(replace_me) is not len(new_str):\n raise ValueError(\"Internal error: replacement string '%s' length does \"\n \"not match new string '%s'\" % (replace_me, new_str))\n matches = len(re.findall(replace_me, fdt_data))\n if matches != 1:\n raise ValueError(\"Internal error: replacement string '%s' already \"\n \"exists in the fdt (%d matches)\" % (replace_me, matches))\n fdt_data = re.sub(replace_me, new_str, fdt_data)\n\n # Now put it together.\n data += fdt_data\n data += \"\\0\" * (payload_offset - len(data))\n data += payload_data\n flasher = os.path.join(self._tools.outdir, 'flasher-for-image.bin')\n self._tools.WriteFile(flasher, data)\n\n # Tell the user about a few things.\n self._tools.OutputSize('U-Boot', uboot)\n self._tools.OutputSize('Payload', payload)\n self._out.Notice('Payload checksum %08x' % checksum)\n self._tools.OutputSize('Flasher', flasher)\n return flasher", "def ExynosFlashImage(self, flash_dest, flash_uboot, bl1, bl2, payload,\n kernel):\n if flash_dest:\n image = self.PrepareFlasher(flash_uboot, payload, self.update,\n self.verify, flash_dest, '1:0')\n else:\n bl1, bl2, image = self._ExtractPayloadParts(payload)\n\n vendor_id = 0x04e8\n product_id = 0x1234\n\n # Preserve dut_hub_sel state.\n preserved_dut_hub_sel = self._DutControl(['dut_hub_sel',]\n ).strip().split(':')[-1]\n required_dut_hub_sel = 'dut_sees_servo'\n args = ['warm_reset:on', 'fw_up:on', 'pwr_button:press', 'sleep:.1',\n 'warm_reset:off']\n if preserved_dut_hub_sel != required_dut_hub_sel:\n # Need to set it to get the port properly powered up.\n args += ['dut_hub_sel:%s' % required_dut_hub_sel]\n # TODO(sjg) If the board is bricked a reset does not seem to bring it\n # back to life.\n # BUG=chromium-os:28229\n args = ['cold_reset:on', 'sleep:.2', 'cold_reset:off'] + args\n self._out.Progress('Reseting board via servo')\n self._DutControl(args)\n\n # If we have a kernel to write, create a new image with that added.\n if kernel:\n dl_image = os.path.join(self._tools.outdir, 'image-plus-kernel.bin')\n data = self._tools.ReadFile(image)\n\n # Pad the original payload out to the original length\n data += '\\0' * (os.stat(payload).st_size - len(data))\n data += self._tools.ReadFile(kernel)\n self._tools.WriteFile(dl_image, data)\n else:\n dl_image = image\n\n self._out.Progress('Uploading image')\n download_list = [\n # The numbers are the download addresses (in SRAM) for each piece\n # TODO([email protected]): Perhaps pick these up from the fdt?\n ['bl1', 0x02021400, bl1],\n ['bl2', 0x02023400, bl2],\n ['u-boot', 0x43e00000, dl_image]\n ]\n try:\n for upto in range(len(download_list)):\n item = download_list[upto]\n if not self._WaitForUSBDevice('exynos', vendor_id, product_id, 4):\n if upto == 0:\n raise CmdError('Could not find Exynos board on USB port')\n raise CmdError(\"Stage '%s' did not complete\" % item[0])\n self._out.Notice(item[2])\n self._out.Progress(\"Uploading stage '%s'\" % item[0])\n\n if upto == 0:\n # The IROM needs roughly 200ms here to be ready for USB download\n time.sleep(.5)\n\n args = ['-a', '%#x' % item[1], '-f', item[2]]\n self._tools.Run('smdk-usbdl', args, sudo=True)\n if upto == 1:\n # Once SPL starts up we can release the power buttom\n args = ['fw_up:off', 'pwr_button:release']\n self._DutControl(args)\n\n finally:\n # Make sure that the power button is released and dut_sel_hub state is\n # restored, whatever happens\n args = ['fw_up:off', 'pwr_button:release']\n if preserved_dut_hub_sel != required_dut_hub_sel:\n args += ['dut_hub_sel:%s' % preserved_dut_hub_sel]\n self._DutControl(args)\n\n self._out.Notice('Image downloaded - please see serial output '\n 'for progress.')\n return True", "def assemble_firmware(self):\n\n # Check that the layout is available from the firmware configuration file\n if \"layout\" not in self.project.firmware_definition:\n self.project.logging.critical(\"The firmware layout is not defined in configuration file\")\n exit(1)\n\n # Check that the stacking method is available from the firmware configuration file\n if \"method\" not in self.project.firmware_definition[\"layout\"]:\n self.project.logging.critical(\"The firmware stacking method is not defined\")\n exit(1)\n\n # Ensure firmware generation path exists and is a dir\n if not os.path.isdir(self.project.firmware_directory):\n os.makedirs(self.project.firmware_directory)\n\n # Ensure firmware exists\n # TODO : iterate the list of squashfs files\n if not os.path.isfile(self.project.firmware_filename):\n logging.critical(\"The firmware does not exist (\" +\n self.project.firmware_filename + \")\")\n exit(1)\n\n # Remove existing initscript if needed\n if os.path.isfile(self.project.init_filename):\n os.remove(self.project.init_filename)\n\n # Copy the init script to the target directory\n\n # Generate the stacking script\n self.generate_stack_script()", "def before_running(res, src_iso_path, dest_path, project, XML_FILE):\n path_list = [\"common_setting/Generic\",\"common_setting/DVD\"]\n if os.name == 'nt':\n if project.upper() == 'DVDFAB 8' or project.upper() == 'DVDFAB8':\n for path in path_list:\n tree, nodes = windows_xml.read_xml(XML_FILE, path, xml_temp) \n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder')\n else:\n for path in path_list:\n tree, nodes = windows_xml.read_xml(XML_FILE, path, xml_temp) \n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BDBurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder') \n else:\n if project.upper() == 'DVDFAB 8' or project.upper() == 'DVDFAB8':\n for path in path_list:\n tree, nodes = myxml.read_xml(XML_FILE, path, xml_temp)\n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder')\t\n else:\n for path in path_list:\n tree, nodes = myxml.read_xml(XML_FILE, path, xml_temp) \n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BDBurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder')\t\t\t\t\n \n dest_path = tempfolder_path if '.ISO' == os.path.splitext(res[6].upper())[1] else dest_path\n initlog('before running, dest_path is: %s' % dest_path) \n tempfolder_path = ''.join((tempfolder_path, 'ReportCrash')).replace(\"_nbsp;\",\" \")\n fab_logpath = fab_logpath.replace(\"_nbsp;\",\" \")\n initlog(\"fab_logpath is: %s; tempfolder_path is: %s\" %(fab_logpath, tempfolder_path))\n logpath = (fab_logpath, tempfolder_path) \n remove_fab_logfile(fab_logpath)\n return dest_path, logpath, burn_engine_type" ]
[ "0.55233026", "0.5206039", "0.51236546", "0.50307345", "0.4985857", "0.49178445", "0.48859143", "0.48607603", "0.48495775", "0.48479488", "0.4829573", "0.4828603", "0.48110473", "0.47907218", "0.47523627", "0.4730287", "0.46991777", "0.46957707", "0.46943602", "0.4688376", "0.46691245", "0.46654052", "0.46586403", "0.46411362", "0.46411362", "0.46399564", "0.46369302", "0.46310285", "0.46164933", "0.45965567" ]
0.7968039
0
Function decorator for unittest test cases to specify test case timeout.
def timeout(time_limit): class TimeoutException(Exception): """ Subclass Exception to catch timer expiration during search """ pass def handler(*args, **kwargs): """ Generic handler to raise an exception when a timer expires """ raise TimeoutException("Test aborted due to timeout. Test was " + "expected to finish in less than {} second(s).".format(time_limit)) def wrapUnitTest(testcase): @wraps(testcase) def testWrapper(self, *args, **kwargs): signal.signal(signal.SIGALRM, handler) signal.alarm(time_limit) try: return testcase(self, *args, **kwargs) finally: signal.alarm(0) return testWrapper return wrapUnitTest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_timeout(timeout):\n def decor(f):\n @functools.wraps(f)\n def inner(self, *args, **kwargs):\n self.useFixture(fixtures.Timeout(timeout, gentle=True))\n return f(self, *args, **kwargs)\n return inner\n return decor", "def pytest_timeout_set_timer(item, settings):", "def timeout(time):\n\n def wrapper(f):\n @wraps(f)\n def wrapped_f(self, event, context):\n return f(self, event, context)\n\n wrapped_f.timeout = time\n return wrapped_f\n\n return wrapper", "def timeout(time_limit):\n\n def wrapUnitTest(testcase):\n\n @wraps(testcase)\n def testWrapper(self):\n\n queue = Queue()\n\n try:\n p = Thread(target=handler, args=(self, testcase, queue))\n p.daemon = True\n p.start()\n err, res = queue.get(timeout=time_limit)\n p.join()\n if err:\n raise err[0](err[1]).with_traceback(err[2])\n return res\n except QueueEmptyError:\n raise TimeoutError(\"Test aborted due to timeout. Test was \" +\n \"expected to finish in less than {} second(s).\".format(time_limit))\n\n return testWrapper\n\n return wrapUnitTest", "def pytest_timeout_cancel_timer(item):", "def testTimeout(self):\n\n class TimeoutTestCase(cros_test_lib.TestCase):\n \"\"\"Test case that raises a TimeoutError because it takes too long.\"\"\"\n\n TEST_CASE_TIMEOUT = 1\n\n def testSleeping(self):\n \"\"\"Sleep for 2 minutes. This should raise a TimeoutError.\"\"\"\n time.sleep(2 * 60)\n raise AssertionError('Test case should have timed out.')\n\n # Run the test case, verifying it raises a TimeoutError.\n test = TimeoutTestCase(methodName='testSleeping')\n self.assertRaises(timeout_util.TimeoutError, test.testSleeping)", "def assert_timeout(self) -> None:", "def func_with_timeout(*args, **kwargs):\n kwargs['timeout'] = self._timeout\n return func(*args, **kwargs)", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def get_test_timeout(self):\n return None", "def set_timeout(self, timeout):\n self.timeout = timeout", "def timeout(\n seconds: int, err_msg: str = \"Timeout after {} seconds.\"\n) -> Callable[[Callable], Callable]:\n\n def timeout_decorator(func):\n \"\"\"The real decorator used for setup, teardown and testcase methods.\"\"\"\n\n def _new_func(result, old_func, old_func_args, old_func_kwargs):\n try:\n result.append(old_func(*old_func_args, **old_func_kwargs))\n except Exception:\n result[0] = False\n result.append(traceback.format_exc())\n\n def wrapper(*args, **kwargs):\n result = [True]\n new_kwargs = {\n \"result\": result,\n \"old_func\": func,\n \"old_func_args\": args,\n \"old_func_kwargs\": kwargs,\n }\n thd = KThread(target=_new_func, args=(), kwargs=new_kwargs)\n thd.start()\n thd.join(seconds)\n if thd.is_alive():\n thd.kill()\n thd.join()\n raise TimeoutException(err_msg.format(seconds))\n else:\n return result\n\n return functools.wraps(func)(wrapper)\n\n return timeout_decorator", "def pytest_timeout_set_timer(item, settings):\n tle.lib.set(int(settings.timeout), str(item).encode(\"utf-8\"))\n return True", "def __call__(self, func):\n @general_helpers.wraps(func)\n def func_with_timeout(*args, **kwargs):\n \"\"\"Wrapped function that adds timeout.\"\"\"\n kwargs['timeout'] = self._timeout\n return func(*args, **kwargs)\n return func_with_timeout", "def func_with_timeout(*args, **kwargs):\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)", "def timeout_handler(interval, recurring = None):\n def decorator(func):\n \"\"\"The decorator\"\"\"\n func._pyxmpp_timeout = interval\n func._pyxmpp_recurring = recurring\n return func\n return decorator", "def timeout_decorator(func):\n\n def _new_func(result, old_func, old_func_args, old_func_kwargs):\n try:\n result.append(old_func(*old_func_args, **old_func_kwargs))\n except Exception:\n result[0] = False\n result.append(traceback.format_exc())\n\n def wrapper(*args, **kwargs):\n result = [True]\n new_kwargs = {\n \"result\": result,\n \"old_func\": func,\n \"old_func_args\": args,\n \"old_func_kwargs\": kwargs,\n }\n thd = KThread(target=_new_func, args=(), kwargs=new_kwargs)\n thd.start()\n thd.join(seconds)\n if thd.is_alive():\n thd.kill()\n thd.join()\n raise TimeoutException(err_msg.format(seconds))\n else:\n return result\n\n return functools.wraps(func)(wrapper)", "def handler(*args, **kwargs):\n raise TimeoutException(\"Test aborted due to timeout. Test was \" +\n \"expected to finish in less than {} second(s).\".format(time_limit))", "def time_limit():\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n timer.wait_if_needed()\n return fn(*args, **kwargs)\n\n return wrapper\n\n return decorator", "def pytest_addoption(parser):\n group = parser.getgroup(\n \"timeout\",\n \"Interrupt test run and dump stacks of all threads after a test times out\",\n )\n group.addoption(\"--timeout\", type=float, help=TIMEOUT_DESC)\n parser.addini(\"timeout\", TIMEOUT_DESC)\n parser.addini(\"timeout_func_only\", FUNC_ONLY_DESC, type=\"bool\")", "def test_timeout_kwarg():\n\n testutil.add_response(\"login_response_200\")\n testutil.add_response(\"query_response_200\")\n testutil.add_response(\"logout_response_200\")\n\n client_args = {\n \"username\": testutil.username,\n \"password\": testutil.password,\n \"client_id\": testutil.client_id,\n \"client_secret\": testutil.client_secret,\n \"version\": \"37.0\",\n \"timeout\": \"10\"}\n\n with sfdc.client(**client_args) as client:\n qr = client.query(\"SELECT Id, Name FROM Account LIMIT 10\")\n assert qr[1].timeout == 10.0, 'Timeout value in request is different to client kwarg value'", "def set_timeout(self, timeout):\n pass", "def inner_test(param: datetime.timedelta):\n pass", "def test_timeout(self, mocker, mock_timedelta):\n\n tid = 289466\n site = \"mysite\"\n\n exception_response = self.generate_task_dictionary(\n tid, state=\"started\", completed=None\n )\n\n responses = [{\"json\": exception_response}]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n with self.assertRaises(exceptions.AcquiaCloudTimeoutError):\n self.client.site(site).task(tid).wait(0)", "def settimeout(self, value: int) -> None:\n ...", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_get_timeouts_with_default(self):\n\n self.set_options(timeouts=True, timeout_default=2)\n task = self.create_task(self.context())\n\n self.assertEquals(task._timeout_for_targets([targetA, targetB]), 3)", "def limit_query_time(timeout, default=None):\n\n def decorator(function):\n def _limit_query_time(*args, **kwargs):\n with transaction.atomic(), connection.cursor() as cursor:\n cursor.execute(\"SET LOCAL statement_timeout TO %s;\", (timeout,))\n try:\n return function(*args, **kwargs)\n except OperationalError:\n return default\n\n return _limit_query_time\n\n return decorator" ]
[ "0.79690397", "0.75481164", "0.7531505", "0.74053466", "0.7395922", "0.7079493", "0.70790184", "0.69973075", "0.69770676", "0.69770676", "0.69739807", "0.6917172", "0.67718583", "0.6764333", "0.67116076", "0.6696234", "0.668591", "0.6672928", "0.6670006", "0.6657833", "0.6647855", "0.6627386", "0.661824", "0.6605323", "0.6584675", "0.64180976", "0.641626", "0.641626", "0.6404784", "0.63910925" ]
0.7551795
1
Return counts of (total, unique) nodes visited
def counts(self): return sum(self.counter.values()), len(self.visited)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def count_unvisited(data):\n count = sum(n.count(\"n\") for n in data)\n return count", "def count(self):\n\t\treturn len(list(self.nodes))", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])", "def count(self):\r\n return self.count_helper(self.top_node)", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def get_node_count(self) -> Iterable:\n return self._g.V().count().toList()[0]", "def get_nodes_pixel_count(self):\n sum_count = self.pixel_count\n for i in range(8):\n node = self.children[i]\n if node:\n sum_count += node.pixel_count\n return sum_count", "def nodes_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [len(set(node for edge in graph.get_edges() for node in edge.get_incident_nodes())) for graph in graphs]", "def count_unique_relations(graph):\n return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def node_count(self):\n return self._node_count", "def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt", "def count(self):\n return self.__tree.node_count", "def compute_num_nodes(graph):\n return len(graph.keys()) # return the number of nodes in the graph", "def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def node_count(self):\n return self._root.count()", "def find_nodes(shp):\n node_count = {}\n for road in shp:\n vrts = road.vertices\n for node in vrts:\n if node not in node_count:\n node_count[node] = 0\n node_count[node] += 1\n node_count[vrts[0]] += 1\n node_count[vrts[-1]] += 1\n return set([node for node,c in node_count.iteritems() if c > 1])", "def size(self):\n return (len(self.nodes), sum([len(x.outgoing_relations) for x in self.nodes.values()]))", "def count_nodes(self, term=None, labels: istr = None) -> int:", "def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot", "def connected_components(self) -> int:\n # visited = set()\n def get_component(vert: Tuple[int, int]) -> Set[Tuple[int, int]]:\n \"\"\" \"\"\"\n nonlocal visited\n visited.add(vert)\n if graph.vertices[vert]:\n for neighbor in graph.vertices[vert]:\n if neighbor not in visited:\n visited.add(neighbor)\n neighbor_components = get_component(neighbor)\n visited = visited.union(neighbor_components)\n else:\n continue\n\n return visited\n else:\n return visited\n\n components: List[Set[Tuple[int, int]]] = list()\n for vertex in graph.vertices.keys():\n visited: Set[Tuple[int, int]] = set()\n component = get_component(vertex)\n if component not in components:\n components.append(component)\n else:\n continue\n \n return len(components)", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def n(self):\n return sum(list(self.nodes.values()))", "def count_nodes(self):\n if self.children is None:\n return 0\n\n total_count = 0\n for child in self.children:\n if child is None:\n return 0\n child_count = child.count_nodes()\n total_count = total_count + child_count\n\n return total_count+1", "def count_pathologies(graph):\n return Counter(_pathology_iterator(graph))", "def get_visited_nodes(self):\n return self.visited_nodes", "def node_state_counts(self) -> pulumi.Output['outputs.NodeStateCountsResponse']:\n return pulumi.get(self, \"node_state_counts\")" ]
[ "0.7426861", "0.736728", "0.7362337", "0.71854764", "0.7132183", "0.7039101", "0.70088387", "0.6983478", "0.67954326", "0.678729", "0.6775083", "0.6736858", "0.6726595", "0.67247325", "0.6679473", "0.66466904", "0.66429496", "0.6642772", "0.6615163", "0.66069305", "0.6603299", "0.6571744", "0.6561365", "0.6560194", "0.65410596", "0.6534271", "0.65207374", "0.6519669", "0.651527", "0.64963645" ]
0.78785557
1
Get data from an Amarok database. We fetch rating and score as well as Amarok's unique id for the track to have more reliable syncing after the initial import.
def get_amarok_data(item, db): if hasattr(item, 'amarok_uid') and item.amarok_uid: condition = "REPLACE(uniqueid, 'amarok-sqltrackuid://', '') = '%s'" % MySQLdb.escape_string(item.amarok_uid) else: condition = "REPLACE(CONCAT_WS('/',lastmountpoint, rpath), '/./', '/') = '%s'" % MySQLdb.escape_string(item.path) query = "SELECT REPLACE(uniqueid, 'amarok-sqltrackuid://', '') AS uniqueid, rating, score \ FROM statistics \ INNER JOIN urls ON statistics.url = urls.id \ INNER JOIN devices ON devices.id = urls.deviceid \ WHERE %s \ LIMIT 1" % condition try: cursor = db.cursor() cursor.execute(query) row = cursor.fetchone() except MySQLdb.Error, e: log.error(u'Could not fetch metadata from amarok database: {0}'.format(e)) row = (None, 0, 0) if row is None: log.info(u'Could not find entry for \'{0}\' in amarok database'.format(displayable_path(item.path))) row = (None, 0, 0) item.amarok_uid = row[0] showdiff_rating = False showdiff_score = False if hasattr(item, 'rating') and item.rating and long(item.rating) != row[1]: showdiff_rating = True if hasattr(item, 'score') and item.score and float(item.score) != row[2]: showdiff_score = True if showdiff_rating or showdiff_score: print item.artist, " - ", item.album, " - ", item.title if showdiff_rating: ui.commands._showdiff('rating', item.rating, row[1]) if showdiff_score: ui.commands._showdiff('score', item.score, row[2]) item.rating = row[1] item.score = row[2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retrieve_from_db(self):\n pass", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def fetch_data(self):", "def fetch():\n current_count = docs_count\n query = Query(feedback_db, selector = {'_id':{'$gt':0}}, fields = ['Rating', 'timestamp'])\n for doc in query()['docs']:\n if ratings.has_key(doc['timestamp']):\n pass\n else:\n ratings[doc['timestamp']] = doc['Rating']\n publish()# to publish the rating to iotf\n time.sleep(10)", "def onair_datas(userid):\n\n songid = cache.get('onair_songid')\n artist = cache.get('onair_artist')\n title = cache.get('onair_title')\n album = cache.get('onair_album')\n\n song_key = 'song_{}_user_{}'.format(songid, userid)\n\n voted = cache.get(song_key)\n\n if voted is None:\n voted = 'null'\n\n datas = {'artist': artist,\n 'title': title,\n 'album': album,\n 'songid': songid,\n 'user_vote': voted}\n\n return datas", "def _get_omdb_data(self):\n url = \"http://www.omdbapi.com/?i=\" + self.imdb_id + \"&plot=short&r=json\"\n try:\n json_data = urllib2.urlopen(url).read()\n except urllib2.HTTPError as e:\n print('The server couldn\\'t fulfill the request.')\n print 'Error code:', e.code\n exit()\n except urllib2.URLError as e:\n print('We failed to reach a server.')\n print 'Reason:', e.reason\n exit()\n else:\n data = json.loads(json_data)\n self._omdb_data[\"title\"] = data[\"Title\"].encode('utf-8', 'ignore') # encode to prevent encoding errors\n self._omdb_data[\"storyline\"] = data[\"Plot\"].encode('utf-8', 'ignore')\n self._omdb_data[\"poster_image_url\"] = data[\"Poster\"].encode('utf-8', 'ignore')\n self._omdb_data[\"age_rating\"] = data[\"Rated\"].encode('utf-8', 'ignore')\n self._omdb_data[\"imdb_rating\"] = float(data[\"imdbRating\"])\n self._omdb_data[\"genre\"] = data[\"Genre\"].encode('utf-8', 'ignore')\n self._omdb_data[\"directors\"] = data[\"Director\"].encode('utf-8', 'ignore').split(\", \")\n self._omdb_data[\"actors\"] = data[\"Actors\"].encode('utf-8', 'ignore').split(\", \")\n self._omdb_data[\"awards\"] = data[\"Awards\"].encode('utf-8', 'ignore')\n self._omdb_data[\"release_date\"] = data[\"Released\"].encode('utf-8', 'ignore')", "def mrkdata():\n data = get_mrkdata(db, MyTable)\n\n return data", "def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )", "def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)", "def fetch(self, movie_id: str) -> AVInfo:\n raise NotImplementedError()", "def FromId(self, id):\n\n self.persistant = self.db.GetOneRow('select * from tracks where '\n 'id=%d;'\n % id)", "def get_audiobook(_id):\r\n return [Audiobook.audiobook_json(Audiobook.query.filter_by(id=_id).first())]\r\n # Audiobook.audiobook_json() coverts our output to the json format defined earlier\r\n # the filter_by method filters the query by the id\r\n # since our id is unique we will only get one result\r\n # the .first() method will get that first value returned\r", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def fetchQuestions (self):\n # Create query and get data\n query = \"SELECT * from \" + self.dbTable + \" where main_ID = '\" + str(self.ID) + \"'\";\n data = self.sqlConnection.executeSelectQuery(query);\n \n # Convert the data into Question objects\n self.convertQuestions(data)", "async def oak_data_push(self):\n now = datetime.utcnow()\n with Sql() as cursor:\n try:\n clan = await self.bot.coc.get_clan(clans['Reddit Oak'])\n sql1 = (\"INSERT INTO coc_oak (tag, playerName, XPLevel, trophies, donations, donReceived, league, \"\n \"leagueIcon, thLevel, warStars, attackWins, defenseWins, bestTrophies, vsTrophies, \"\n \"bestVsTrophies, versusBattleWins, builderHall, timestamp) \"\n \"VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\")\n sql2 = (\"UPDATE coc_oak \"\n \"SET barbKing = ?, archQueen = ?, grandWarden = ?, royalChamp = ?, battleMachine = ?, \"\n \"clanGames = ?, wallWrecker = ?, battleBlimp = ?, stoneSlammer = ?, siegeBarracks = ? \"\n \"WHERE tag = ? AND timestamp = ?\")\n self.bot.logger.debug(\"Starting member loop for SQL\")\n to_google = []\n async for m in clan.get_detailed_members():\n clan_games = m.get_achievement(\"Games Champion\").value if m.get_achievement(\"Games Champion\") else 0\n barb_king = m.get_hero(\"Barbarian King\").level if m.get_hero(\"Barbarian King\") else 0\n arch_queen = m.get_hero(\"Archer Queen\").level if m.get_hero(\"Archer Queen\") else 0\n grand_warden = m.get_hero(\"Grand Warden\").level if m.get_hero(\"Grand Warden\") else 0\n royal_champ = m.get_hero(\"Royal Champion\").level if m.get_hero(\"Royal Champion\") else 0\n battle_mach = m.get_hero(\"Battle Machine\").level if m.get_hero(\"Battle Machine\") else 0\n wall_wrecker = m.siege_machines[0].level if len(m.siege_machines) > 0 else 0\n battle_blimp = m.siege_machines[1].level if len(m.siege_machines) > 1 else 0\n stone_slammer = m.siege_machines[2].level if len(m.siege_machines) > 2 else 0\n barracks = m.siege_machines[3].level if len(m.siege_machines) > 3 else 0\n cursor.execute(sql1, m.tag[1:], m.name, m.exp_level, m.trophies, m.donations, m.received,\n m.league.name, m.league.icon.url, m.town_hall, m.war_stars, m.attack_wins,\n m.defense_wins, m.best_trophies, m.versus_trophies, m.best_versus_trophies,\n m.versus_attack_wins, m.builder_hall, now)\n cursor.execute(sql2, barb_king, arch_queen, grand_warden, royal_champ, battle_mach, clan_games,\n wall_wrecker, battle_blimp, stone_slammer, barracks, m.tag[1:], now)\n # Prep dict for Google\n to_google.append({\"tag\": m.tag, \"townHall\": m.town_hall, \"warStars\": m.war_stars,\n \"attackWins\": m.attack_wins, \"defenseWins\": m.defense_wins,\n \"bestTrophies\": m.best_trophies, \"barbKing\": barb_king,\n \"archQueen\": arch_queen, \"grandWarden\": grand_warden, \"batMach\": battle_mach,\n \"builderHallLevel\": m.builder_hall, \"versusTrophies\": m.versus_trophies,\n \"bestVersusTrophies\": m.best_versus_trophies, \"versusBattleWins\": m.versus_attack_wins,\n \"clanGames\": clan_games, \"name\": m.name, \"expLevel\": m.exp_level, \"trophies\": m.trophies,\n \"donations\": m.donations, \"donationsReceived\": m.received, \"clanRank\": 0,\n \"league\": m.league.name, \"role\": m.role.name})\n except:\n self.bot.logger.exception(\"Background failed. You may need to reload. <@251150854571163648>\")\n self.bot.logger.debug(\"Done with SQL - Starting Google\")\n payload = {\"type\": \"players\", \"data\": to_google}\n url = \"https://script.google.com/macros/s/AKfycbzhXbO1CCcRuPzTU0mos7MowcucvclAKokkTiq91463xW1ftQEO/exec\"\n requests.post(url, data=json.dumps(payload))\n self.bot.logger.info(\"Oak data push complete.\")", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)", "async def _get_art(self) -> Art:\n if not self.arts_cache:\n async with self.bot.pool.acquire() as con:\n query = f'SELECT url, artist_id, artist_name FROM {TABLE_ARTS} ORDER BY random() LIMIT 20'\n rows: List[asyncpg.Record] = await con.fetch(query)\n self.arts_cache = [Art(url, artist_id, artist_name) for (url, artist_id, artist_name) in rows]\n\n return self.arts_cache.pop()", "async def _fetch_data(self, ctx: commands.Context, query: str):\n params = {\n \"query\": query,\n \"maxResults\": 10,\n \"sort\": \"FavoritedTimes\",\n \"preferAccurateMatches\": \"true\",\n \"nameMatchMode\": \"Words\",\n \"fields\": \"Artists,Lyrics,Names,ThumbUrl\",\n }\n headers = {\n \"User-Agent\": f\"Red-DiscordBot/{red_version} Fixator10-cogs/VocaDB/{self.__version__}\"\n }\n try:\n async with self.session.get(BASE_API_URL, params=params, headers=headers) as resp:\n if resp.status != 200:\n return f\"https://http.cat/{resp.status}\"\n result = await resp.json()\n except asyncio.TimeoutError:\n return \"Request timed out\"\n\n all_items = result.get(\"items\")\n if not all_items:\n return None\n\n filtered_items = [x for x in all_items if x.get(\"lyrics\")]\n if not filtered_items:\n return None\n\n if len(filtered_items) == 1:\n return filtered_items[0]\n\n items = \"\\n\".join(\n f\"**`[{i}]`** {x.get('defaultName')} - {x.get('artistString')}\"\n f\" (published: {self._parse_date(x.get('publishDate'))})\"\n for i, x in enumerate(filtered_items, start=1)\n )\n\n prompt = await ctx.send(\n f\"Found below **{len(filtered_items)}** result(s). Pick one in 60 seconds:\\n\\n{items}\"\n )\n\n def check(msg: discord.Message) -> bool:\n return bool(\n msg.content.isdigit()\n and int(msg.content) in range(len(filtered_items) + 1)\n and msg.author.id == ctx.author.id\n and msg.channel.id == ctx.channel.id\n )\n\n try:\n choice = await self.bot.wait_for(\"message\", timeout=60.0, check=check)\n except asyncio.TimeoutError:\n choice = None\n\n if choice is None or choice.content.strip() == \"0\":\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n await prompt.edit(content=\"Cancelled.\", delete_after=5.0)\n return None\n\n choice = int(choice.content.strip()) - 1\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n await prompt.delete()\n return filtered_items[choice]", "def retrieve_data():\r\n\r\n print(\"\\n[i] Running scheduled query for page {} at {}.\".format(page, ut.get_time()))\r\n # Instanciating main class for Facebook call\r\n fbs = FacebookScrapper()\r\n\r\n # Getting hourly data from Facebook\r\n data = fbs.get_page_fan_count(page=page)\r\n\r\n # Sending data to database\r\n dba.insert_data_db(data)", "def query_by_id(self, title: str) -> dict:\n if not self.client:\n self.connect()\n return self.db.find_one({'Imdb_Title_id': title})", "def meta(id):\n db = core.connect()\n return db[id][\"meta\"]", "def get_dataset():\n\n return db.store.all()", "def _fetch_data(self):\n pass", "def read_DB(self):\n mgdb = mongodata.db\n client = MongoClient(mgdb)\n db = client.local\n db.authenticate(mongodata.user, mongodata.passwd)\n minLat, maxLat, minLon, maxLon = self.city[1]\n cityname = self.city[2]\n if type(self.application) != list:\n col = db[mongodata.collection[self.application]]\n\n c = col.find({'city': cityname,\n 'lat': {'$gt': minLat, '$lt': maxLat},\n 'lng': {'$gt': minLon, '$lt': maxLon},\n # 'time': {'$gt': intinit, '$lt': intend}\n }, {'lat': 1, 'lng': 1, 'time': 1, 'user': 1})\n\n qsize = c.count()\n self.dataset = np.zeros((qsize,), dtype='f8,f8,i4,S20')\n cnt = 0\n for val in c:\n if cnt < qsize:\n self.dataset[cnt][0] = val['lat']\n self.dataset[cnt][1] = val['lng']\n self.dataset[cnt][2] = val['time']\n self.dataset[cnt][3] = val['user']\n cnt += 1\n else:\n lcol = []\n lcount = []\n for app in self.application:\n col = db[mongodata.collection[app]]\n\n c = col.find({'city': cityname,\n 'lat': {'$gt': minLat, '$lt': maxLat},\n 'lng': {'$gt': minLon, '$lt': maxLon},\n # 'time': {'$gt': intinit, '$lt': intend}\n }, {'lat': 1, 'lng': 1, 'time': 1, 'user': 1})\n\n lcount.append(c.count())\n lcol.append(c)\n\n self.dataset = np.zeros((sum(lcount),), dtype='f8,f8,i4,S20')\n for c, qsize in zip(lcol, lcount):\n cnt = 0\n for val in c:\n if cnt < qsize:\n self.dataset[cnt][0] = val['lat']\n self.dataset[cnt][1] = val['lng']\n self.dataset[cnt][2] = val['time']\n self.dataset[cnt][3] = val['user']\n cnt += 1", "def get_movie_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM movie_table\")\n rows = cur.fetchall()\n return rows", "def _load(self):\n if not self._loaded:\n url = f\"https://api.opendota.com/api/matches/{self.id}\"\n logger.info(\"Loading match details for match id: %s from url %s\",\n self._id, url)\n self.data = requests.get(url).json()\n self._duration = self.data.get('duration')\n self._chat = self.data.get('chat')\n self._cluster = self.data.get('cluster')\n self._engine = self.data.get('engine')\n self._first_blood_time = self.data.get('first_blood_time')\n self._game_mode = self.data.get('game_mode')\n self._human_players = self.data.get('human_players')\n self._league_id = self.data.get('league_id')\n self._lobby_type = self.data.get('lobby_type')\n self._match_seq_num = self.data.get('match_seq_num')\n self._negative_votes = self.data.get('negative_votes')\n self._positive_votes = self.data.get('positive_votes')\n self._objectives = self.data.get('objectives')\n self._picks_bans = self.data.get('picks_bans')\n self._barracks_status_dire = self.data.get('barracks_status_dire')\n self._dire_score = self.data.get('dire_score')\n self._dire_team = self.data.get('dire_team')\n self._tower_status_dire = self.data.get('tower_status_dire')\n self._barracks_status_radiant = self.data.get('barracks_status_radiant')\n self._radiant_gold_adv = self.data.get('radiant_gold_adv')\n self._radiant_xp_adv = self.data.get('radiant_xp_adv')\n self._radiant_score = self.data.get('radiant_score')\n self._radiant_team = self.data.get('radiant_team')\n self._radiant_win = self.data.get('radiant_win')\n self._tower_status_radiant = self.data.get('tower_status_radiant')\n self._start_time = self.data.get('start_time')\n self._teamfights = self.data.get('teamfights')\n self._version = self.data.get('version')\n self._replay_salt = self.data.get('replay_salt')\n self._series_id = self.data.get('series_id')\n self._series_type = self.data.get('series_type')\n self._league = self.data.get('league')\n self._skill = self.data.get('skill')\n self._players = self.data.get('players')\n self._patch = self.data.get('patch')\n self._region = self.data.get('region')\n self._all_word_counts = self.data.get('all_word_counts')\n self._version = self.data.get('version')\n self._throw = self.data.get('throw')\n self._comeback = self.data.get('comeback')\n self._cosmetics = self.data.get('cosmetics')\n self._draft_timings = self.data.get('draft_timings')\n self._loss = self.data.get('loss')\n self._win = self.data.get('win')\n self._replay_url = self.data.get('replay_url')\n self._loaded = True", "def fetch_mrs_data(conn, file_id):\n # Fetch specified MRS data from the database.\n return _fetch_entry_from_table(conn, TABLE_NAME_BRAINSCANS, file_id)", "def fetchTAC(self):\n\n last_hour = datetime.datetime.now().date() - datetime.timedelta(hours = 1)\n last_hour = \"{}{}{}\".format(\"'\", last_hour, \"'\")\n last_hour = datetime.date(2011, 4, 5)\n\n self.hlr_cur.execute(\"SELECT id FROM Subscriber WHERE updated >= {date};\".format(date = last_hour))\n subscribers = self.hlr_cur.fetchall()\n\n parsed_data = {}\n unique_imei = {}\n #uid_count = 0\n\n for subscriber in subscribers:\n self.hlr_cur.execute(\"SELECT IMEI FROM Equipment WHERE id = (SELECT equipment_id FROM EquipmentWatch WHERE subscriber_id = {s_id});\".format(s_id = subscriber[0]))\n parsed_imei = self.hlr_cur.fetchall()\n\n if len(parsed_imei) > 0:\n for imei in parsed_imei:\n imei_number = imei[0] \n\n if imei_number not in unique_imei:\n unique_imei[imei_number] = subscriber[0]\n\n uid = unique_imei[imei_number]\n parsed_data.setdefault((uid), str(imei_number)[:8])\n\n self.saveRecords(parsed_data)", "def read_data(self):\n print 'Getting team stats...'\n self.team_stats = get_team_stats(self.recent_years)\n\n print 'Getting matches...'\n self.matches = get_matches(\n with_team_stats=True,\n duplicate_with_reversed=self.duplicate_with_reversed,\n exclude_ties=self.exclude_ties,\n recent_years=self.recent_years,\n use_these_team_stats=self.team_stats,\n )", "def readAirHumidity():\n\n id_environment = environment_coll.find_one({'description': 'Ar'}, {'_id': 1})\n id_pquantity = pquantity_coll.find_one({'type': 'Umidade'}, {'_id': 1})\n\n id_environment = id_environment.get('_id')\n id_pquantity = id_pquantity.get('_id')\n\n print(\"Reading and inserting HUMIDITY data into DB...\")\n read_humidity = readUnity(HUMIDITY_CHARACTER)\n if read_humidity != -1:\n print(\"The read AIR humidity is \" + str(read_humidity) + \"%\")\n # columns: id_user, id_envrmt, read_value\n measures = db.measures\n measures_coll.insert_one({'id_user': user_id,\n 'id_environment': id_environment,\n 'id_pquantity': id_pquantity,\n 'read_value': read_humidity}\n )\n print(\"Success! Data inserted into database.\\n\")\n else:\n print(\"Failed to read temperature. Try again in 5 seconds.\")" ]
[ "0.5660886", "0.54901147", "0.53773314", "0.53103805", "0.5294919", "0.5287652", "0.5280436", "0.5218586", "0.5123491", "0.51149195", "0.50887793", "0.50697964", "0.50557685", "0.50500786", "0.5047312", "0.5042681", "0.50382304", "0.5036712", "0.50281346", "0.5021935", "0.5017402", "0.50082046", "0.49967209", "0.49916947", "0.49823338", "0.49630198", "0.49590695", "0.49454883", "0.4931494", "0.49203312" ]
0.719275
0
This is the method to call and analyze text with the supplied features
def analyze(self, features, text=None, url=None, html=None, clean=True, xpath=None, fallback_to_raw=True, return_analyzed_text=False, language=None): body = { 'clean': clean, 'fallback_to_raw': fallback_to_raw, 'return_analyzed_text': return_analyzed_text, 'xpath': xpath, 'language': language, 'text': text, 'url': url, 'html': html } feature_dict = {} for feature in features: feature_dict[feature.name()] = feature.toDict() body['features'] = feature_dict if text is None and html is None and url is None: msg = "html, text, or url must have content" raise ValueError(msg) if len(features) < 1: raise ValueError("Must supply at least one feature") return self.request(method='POST', url='/v1/analyze', params={"version": self.version}, headers={'content-type': 'application/json'}, json=body, accept_json=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def get_dataset_features(text):\n return model.extract(text)", "def analyse_text(custom_text, classifier, Resource, threshold, language='en'):\n return [(bytes(custom_text, 'utf-8'),\n _minimal_analysis(bytes(custom_text, 'utf-8'), classifier, Resource, threshold, language))]", "def text_analyzer(*text):\n if len(text) > 1:\n print(\"ERROR\")\n return\n if len(text) == 0 or isinstance(text[0], str) == 0:\n text = []\n text.append(input(\"What is the text to analyse?\\n>> \"))\n ponctu_list = string.punctuation\n nb_upper = 0\n nb_lower = 0\n nb_ponct = 0\n nb_spaces = 0\n letters = 0\n for char in text[0]:\n letters += 1\n if char == ' ':\n nb_spaces += 1\n elif char.isupper():\n nb_upper += 1\n elif char.islower():\n nb_lower += 1\n elif char in ponctu_list:\n nb_ponct += 1\n print(\"The text contains {} characters:\" .format(letters), '\\n')\n print(\"-\", nb_upper, \"upper letters\\n\")\n print(\"-\", nb_lower, \"lower letters\\n\")\n print(\"-\", nb_ponct, \"punctuation marks\\n\")\n print(\"-\", nb_spaces, \"spaces\")", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def _minimal_analysis(text, classifier, Resource, threshold, language='en'):\n list_text = clean_text(text, get_correct_stop_word(Resource, language))\n m_features = list()\n m_features.append(characteristic_vector(list_text, Resource))\n return classifier.predict(array(m_features), threshold), m_features", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def learn(self, documents, labels):\n raise NotImplementedError('FeatureExtractorBase:learn(self, text_list) is not defined')", "def analyze(self, text):\n\n # start from 0 for each Analyser variable\n self.positives = 0\n self.negatives = 0\n\n # precise self text value\n self.text = text\n\n # declare a tokenased word\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n\n # indicate the length of list tokens\n size = len(tokens)\n\n # all the word stuff to ckeck\n for word in tokens:\n\n # chaque mots est converti en mot sans majuscule\n word = str.lower(word)\n\n linespos = [line.rstrip('\\n') for line in open('positive-words.txt')]\n linesneg = [line.rstrip('\\n') for line in open('negative-words.txt')]\n\n # check for positive or negative or neutral words\n if word in linespos:\n self.positives += 1\n elif word in linesneg:\n self.negatives += 1\n else:\n continue\n\n # score calculculated and reurned\n score = self.positives - self.negatives\n\n return score", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def evaluate(self, featureset):\r\n #sequence, tag = featureset\r\n gs, labels = [], []\r\n for s, t in featureset:\r\n gs.append(t)\r\n label = self.tagger.choose_tag(s)\r\n labels.append(label)\r\n print (t, label)\r\n\r\n assert(len(gs) == len(labels))\r\n self.write_to_file(labels)\r\n words = self.tagger.test(self.r.test_sents, word=True)\r\n print (accuracy_score(gs, labels))", "def _extract(texts: list[str], tokens: list[list[str]], sentences: list[list[str]], /,\n avg_words=True, avg_sentences=True, pos_distribution=True,\n foreign_words_ratio=True, lexicon=True, punctuation_distribution=True,\n n_jobs=1) -> pd.DataFrame:\n\n def process(function, objects: list, feature_name: str):\n result_ = np.vstack(Parallel(n_jobs)(delayed(function)(objects_) for objects_ in objects))\n\n # Build a list of the column names to create a features DataFrame\n n_columns = result_.shape[1]\n columns_name = [feature_name + f'_{i}' for i in range(1, n_columns + 1)]\n\n return pd.DataFrame(result_, columns=columns_name)\n\n results = []\n # Average length of words\n if avg_words:\n results.append(process(funcs.avg_length, tokens, AVG_WORDS))\n # Average length of sentences\n if avg_sentences:\n results.append(process(funcs.avg_length, sentences, AVG_SENTENCES))\n # POS distribution\n if pos_distribution:\n results.append(process(funcs.pos_distribution, tokens, POS_DISTRIBUTION))\n # Lexicon size\n if lexicon:\n results.append(process(funcs.lexicon, tokens, LEXICON_SIZE))\n # Foreign words ratio\n if foreign_words_ratio:\n results.append(process(funcs.foreign_words_ratio, tokens, FOREIGN_RATIO))\n # Punctuations distribution\n if punctuation_distribution:\n results.append(process(funcs.punctuations_distribution, texts, PUNCTUATIONS_DISTRIBUTION))\n\n if not results:\n raise ValueError(\"At least one feature must be chosen\")\n\n return pd.concat(results, axis=1)", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score", "def Classify_Text(self, overview):\n\n # convert text to lower case\n overview = overview.lower()\n\n path = self.path\n\n # start time\n time0 = time.process_time()\n\n # Use ensemble classifier - voting with weights\n\n # model = joblib.load(path + \"MULTINOMIAL NB_TFIDF VECTORIZER\" + \".pkl\")\n model = joblib.load(\n \"/home/do/PycharmProjects/pythonProject/information-retrival-search-engine/informationRetrival/frontend/static/frontend/text/SVM_COUNT VECTORIZER.pkl\")\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n vec = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n\n print(vec)\n # overview=\"An undercover cop and a mole in the police\"\n Y = vec.fit_transform([overview]).toarray()\n print(vec.get_feature_names())\n print(Counter(Y[0]))\n # print(Counter(Y[1]))\n print(model)\n predicted_genre = model.predict(Y)\n print(predicted_genre)\n\n # Return predicted genre and time taken for classification\n return predicted_genre, str(round(time.process_time() - time0, 3)) + \" seconds\"", "def main():\n logging.basicConfig(level=logging.WARN)\n\n text = extract()\n text, char_indices, indices_char, x, y = transform(text)\n model(text, char_indices, indices_char, x, y)\n\n pass", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def text_features_df(spark):\n # Replaces formatted text that has already been processed\n FILLER = ''\n # Parser helper column\n COLNAME = 'processed_text'\n COL = col(COLNAME)\n \n # Data loading\n post_history_df = spark.read.parquet(\"/user/***REMOVED***/StackOverflow/PostHistory.parquet\") \\\n .select(['_PostId', '_Text', '_PostHistoryTypeId']) \\\n .filter(col('_PostHistoryTypeId') == 2) \\\n .drop('_PostHistoryTypeId')\n post_df = spark.read.parquet('/user/***REMOVED***/StackOverflow/Posts.parquet') \\\n .select(['_Id', '_PostTypeId']) \\\n .filter(col('_PostTypeId') == 1) \\\n .drop(\"_PostTypeId\")\n df = post_history_df.join(post_df, post_df['_Id'] == post_history_df['_PostId'])\n\n # Remove code snippets from the Markdown formatted text\n df = df.withColumn(COLNAME, regexp_replace(col('_Text'), regex.CODE_BLOCK_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.HTML_BLOCK_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.FENCED_CODE_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.ESCAPE_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.HTML_RE, FILLER))\n\n # Calculate features\n df = df.withColumn('#characters', length(COL)) \\\n .withColumn('#punctuation_characters', size(split(COL, r'[-\\[\\]{}()*+?.,\\\\^$|#]')) - 1) \\\n .withColumn('punctuation_ratio', col('#punctuation_characters') / col('#characters')) \\\n .withColumn('#lines', size(split(COL, r'\\n'))) \\\n .withColumn('average_line_length', col('#characters') / col('#lines')) \\\n .withColumn('#words', size(split(COL, r'\\s+'))) \\\n .withColumn('average_word_length', col('#characters') / col('#words'))\n\n # Remove unnecessary columns, including parser helper column\n df = df.drop('_Text', '_PostHistoryTypeId', '_PostId', COLNAME)\n return df", "def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features", "def text_calculations(input_text, fb_name):\r\n \r\n token_text = nltk.word_tokenize(input_text)\r\n #print(token_text)\r\n nltk_text = nltk.Text(token_text)\r\n #print(nltk_text)\r\n \r\n #The number of words incl. numbers and signs.\r\n number_words = len(token_text)\r\n #print(\"number_words: %i\" % (number_words)) \r\n \r\n #The number of unique words. \r\n unique_words = len(set([each_word.lower() for each_word in nltk_text if each_word.isalpha()]))\r\n #print(\"unique_words: %i\" % (unique_words))\r\n \r\n #The number of characters (including whitespaces) in all words (incl. numbers and signs)\r\n characters_text = len(input_text)\r\n #print(\"characters_text: %i\" % (characters_text))\r\n \r\n #The number of characters (without whitespaces) in all words (incl. numbers and signs)\r\n characters_words = sum([len(each_word) for each_word in nltk_text])\r\n #print(\"characters_words: %i\" % (characters_words))\r\n \r\n #The average number of characters in a word in this text.\r\n average_character_length = float(characters_words) / number_words\r\n #print(\"average_character_length: %0.2f\" % (average_character_length))\r\n \r\n #number of signs\r\n signs = re.findall(r'[^\\w\\s]', input_text) # [not,( Any whitespace character, Any alphanumeric character)]\r\n #print(signs)\r\n #print(\"len(signs): %i\" % len(signs))\r\n \r\n #number of instances of multiple following signs - could be smileys, !!!!!\r\n multiple_signs = re.findall(r'[^\\w\\s]{2,}', input_text) # At least 2 repeats of signs.\r\n #print(multiple_signs)\r\n #print(\"len(multiple_signs): %i\" % len(multiple_signs))\r\n \r\n #If text contains questions based on \"?\"\r\n contain_question = re.findall(r'[?]', input_text)\r\n #print(\"len(contain_question): %i\" % len(contain_question))\r\n \r\n #if it contains statements based on \"!\"\r\n contain_exclamation = re.findall(r'[!]', input_text)\r\n #print(\"len(contain_exclamation): %i\" % len(contain_exclamation))\r\n \r\n #If the text contain the users name \r\n contain_user_name = re.findall('%s'%fb_name, input_text)\r\n #print(\"len(contain_user_name): %i\" % len(contain_user_name))\r\n \r\n return {'number_words':number_words, \r\n 'average_character_length':average_character_length, \r\n 'signs':len(signs), 'multiple_signs':len(multiple_signs), \r\n 'question':len(contain_question), 'exclamation':len(contain_exclamation), \r\n 'name':len(contain_user_name) }", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def extract_features(self, docs_train, docs_test, word_ngram_range=(1, 3), dim_reduce=False):\n\n\t\t# Build a vectorizer that splits strings into sequences of i to j words\n\t\tword_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='word', ngram_range=word_ngram_range,\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\t\t# Build a vectorizer that splits strings into sequences of 3 to 5 characters\n\t\tchar_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='char', ngram_range=(3, 5),\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\n\t\t# Build a transformer (vectorizer) pipeline using the previous analyzers\n\t\t# *FeatureUnion* concatenates results of multiple transformer objects\n\t\tself.ngrams_vectorizer = Pipeline([('feats', FeatureUnion([('word_ngram', word_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('char_ngram', char_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ])),\n\t\t\t\t\t\t\t\t # ('clff', LinearSVC(random_state=42))\n\t\t\t\t\t\t\t\t ])\n\n\t\t# Fit (learn vocabulary and IDF) and transform (transform documents to the TF-IDF matrix) the training set\n\t\tX_train_ngrams_tfidf = self.ngrams_vectorizer.fit_transform(docs_train)\n\t\t'''\n\t\t↳ Check the following attributes of each of the transformers (analyzers)—*word_vectorizer* and *char_vectorizer*:\n\t\tvocabulary_ : dict. A mapping of terms to feature indices.\n\t\tstop_words_ : set. Terms that were ignored\n\t\t'''\n\t\tprint(\"%.2f seconds: Finished fit_transforming the training dataset\" % time.process_time())\n\t\tprint(\"Training set word & character ngrams .shape = \", X_train_ngrams_tfidf.shape)\n\n\t\tfeature_names_ngrams = [word_vectorizer.vocabulary_, char_vectorizer.vocabulary_]\n\n\t\t'''\n\t\tExtract the features of the test set (transform test documents to the TF-IDF matrix)\n\t\tOnly transform is called on the transformer (vectorizer), because it has already been fit to the training set.\n\t\t'''\n\t\tX_test_ngrams_tfidf = self.ngrams_vectorizer.transform(docs_test)\n\t\tprint(\"%.2f seconds: Finished transforming the test dataset\" % time.process_time())\n\t\tprint(\"Test set word & character ngrams .shape = \", X_test_ngrams_tfidf.shape)\n\n\t\t# • Dimensionality reduction using truncated SVD (aka LSA)\n\t\tif dim_reduce:\n\t\t\t# Build a truncated SVD (LSA) transformer object\n\t\t\tself.svd_reducer = TruncatedSVD(n_components=300, random_state=43)\n\t\t\t# Fit the LSI model and perform dimensionality reduction\n\t\t\tX_train_ngrams_tfidf_reduced = self.svd_reducer.fit_transform(X_train_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the training dataset\", time.process_time())\n\t\t\tX_test_ngrams_tfidf_reduced = self.svd_reducer.transform(X_test_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the test dataset\", time.process_time())\n\n\t\t\tX_train = X_train_ngrams_tfidf_reduced\n\t\t\tX_test = X_test_ngrams_tfidf_reduced\n\t\telse:\n\t\t\tX_train = X_train_ngrams_tfidf\n\t\t\tX_test = X_test_ngrams_tfidf\n\n\t\treturn X_train, X_test, feature_names_ngrams", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def time_question_features(self, text):\n features = {}\n\n # A list of all words from the known sentences\n all_words = \" \".join(self.positive + self.negative).split()\n\n # A list of the first word in each of the known sentence\n all_first_words = []\n for sentence in self.positive + self.negative:\n all_first_words.append(\n sentence.split(' ', 1)[0]\n )\n\n for word in text.split():\n features['first_word({})'.format(word)] = (word in all_first_words)\n\n for word in text.split():\n features['contains({})'.format(word)] = (word in all_words)\n\n for letter in 'abcdefghijklmnopqrstuvwxyz':\n features['count({})'.format(letter)] = text.lower().count(letter)\n features['has({})'.format(letter)] = (letter in text.lower())\n\n return features", "def run_tests():\n source1 = TextModel(\"Barack Obama\")\n source1.add_file('project/source_texts/barackobama_source_text.txt')\n\n source2 = TextModel('Donald Trump')\n source2.add_file('project/source_texts/donaldtrump_source_text.txt')\n\n new1 = TextModel('More Obama')\n new1.add_file('project/source_texts/moreobama_source_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('More Trump')\n new2.add_file('project/source_texts/moretrump_source_text.txt')\n new2.classify(source1, source2)\n\n new1 = TextModel('Gucci Gang by Lil Pump')\n new1.add_file('project/source_texts/guccigang_source_text.txt')\n new1.classify(source1, source2)\n\n new1 = TextModel(\"Spongebob Transcripts\")\n new1.add_file('project/source_texts/spongebobeps_source_text.txt')\n new1.classify(source1, source2)", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y" ]
[ "0.65689343", "0.6477436", "0.64146715", "0.6368037", "0.6330938", "0.6260085", "0.6223099", "0.6171409", "0.616159", "0.61302584", "0.61197525", "0.6115434", "0.6103395", "0.6098626", "0.60901034", "0.60735404", "0.60727555", "0.6036561", "0.6018226", "0.6005281", "0.6003854", "0.6003626", "0.5992649", "0.5969228", "0.59652513", "0.5959914", "0.5951894", "0.59510285", "0.59442264", "0.5927903" ]
0.72335
0
Remove ``>`` from beginning of a line.
def clean(self, line): m = self.RE.match(line) if line.strip() == ">": return "" elif m: return m.group(2) else: return line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self, line):\r\n m = self.RE.match(line)\r\n if line.strip() == \">\":\r\n return \"\"\r\n elif m:\r\n return m.group(2)\r\n else:\r\n return line", "def dealFirstLine(line):\n\n print \"%s\" % (line.strip('\\n'))", "def _remove_beginning_newlines(lines):\n first_non_blank_line = 0\n\n for line in lines:\n if line.strip():\n break\n\n first_non_blank_line += 1\n\n return lines[first_non_blank_line:]", "def strip_start(h, s):\n if h.startswith(s):\n h = h[len(s):]\n return h", "def remove_starting_carriage_return_in_output(self, text):\n\n # Display info message\n log.info(\"remove_starting_carriage_return_in_output\")\n\n # Remove the carriage return at the beginning of the string\n output = text.lstrip(\"\\r\\n\\r\")\n\n # Display info message\n log.info(f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n\n # Return the string without the starting carriage return\n return output", "def delete_first_line(string):\n lines = string.split('\\n')\n return '\\n'.join(lines[1:])", "def rstrip_line(line):\n return line.rstrip()", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def _strip_position(line: str) -> str:\n line = \".py\".join(line.split(\".py:\")[1:])\n line = \" \".join(line.split(\" \")[1:])\n return line", "def strip_line(line):\n line = line.strip()\n line = line.rstrip('\\n')\n line = line.rstrip('\\t')\n line = (line.split(\"//\"))[0]\n return line", "def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line", "def trim(line):\n index = 0\n for i in range(len(line)):\n if line[i].isalpha():\n break\n index = index + 1\n return line[index:]", "def scratch(line):\n if line.count('~~') >= 2:\n for i in range(0, line.count('~~') - line.count('~~') % 2):\n if i % 2 == 0:\n line = line.replace('~~', '<del>', 1)\n else:\n line = line.replace('~~', '</del>', 1)\n return line", "def _PreParse(line: str) -> str:\n line = line.rstrip(\"\\n\")\n\n commentIndex = line.find(\"/\")\n\n # no comment found\n if commentIndex == - 1:\n return line\n\n # truncate\n return line[0:commentIndex]", "def strip_tags(line):\n return re.sub(r'<sup[^>]*>.*?</sup>|<a[^>]*>.*?</a>|<[^>]+>', '', unescape(line), flags=re.S)", "def _trunc_lines_prepend(self):\n\t\tp = self._edit.get_buffer()\n\t\tnLines = p.get_line_count()\n\t\twhile nLines > 0:\n\t\t\tif nLines <= self._maxLines +1:\n\t\t\t\tbreak\n\t\t\tend = p.get_end_iter()\n\t\t\tstart = p.get_end_iter()\n\t\t\tstart.backward_line()\n\t\t\tp.delete(start, end)\n\t\t\tnLines = p.get_line_count()", "def oneline(value):\r\n try:\r\n return mark_safe(newlines.sub('', inbetween.sub('><', value)))\r\n except:\r\n return value", "def precmd(self, line):\n if line == \"EOF\":\n return line\n return line.lower()", "def filter_line(self, line):\n if line.startswith(\"<\"):\n # Simply filter out all lines beginning with '<', which are metadata\n return None\n\n # Some metadata-like text is also included at the start of lines, followed by \". - \"\n if u\". - \" in line:\n __, __, line = line.partition(u\". - \")\n\n # Remove -s and spaces from the start of lines\n # Not sure why they're often there, but it's just how the transcripts were formatted\n line = line.lstrip(u\"- \")\n\n # Skip lines that are fully surrounded by brackets: they're typically descriptions of what happened\n # E.g. (Applause)\n if line.startswith(u\"(\") and line.endswith(u\")\"):\n return None\n\n # It's common for a speaker's first utterance to start with a marker indicating the original language\n line = language_indicator_re.sub(u\"\", line)\n return line", "def precmd(self, line):\n return line.strip()", "def redirect_leading(self):\n while len(self.cmd) >= 3 and self.cmd[0] == \"<\":\n self.stdin = safe_open(self.cmd[1], \"r\")\n self.cmd = self.cmd[2:]", "def remove_first_line(fname):\n with codecs.open(fname, 'r', 'utf-8') as fin:\n data = fin.read().splitlines(True)\n with codecs.open('temp_file.tsv', 'w','utf-8') as fout:\n fout.writelines(data[1:])\n\n fin.close()\n fout.close()\n # Delete original file and rename temp file to original name\n os.remove(fname)\n os.rename('temp_file.tsv',fname)", "def ltrim(self, name, start, end):\r\n return self.format_inline('LTRIM', name, start, end)", "def clean_hanging_newline(t):\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t", "def remove_leading_blanks(self, sentence):\n pass", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def rstrip(self) -> String:\n pass", "def _remove_new_line(self, message):\n if message.endswith('\\n'):\n return message[:-1]\n return message" ]
[ "0.7322892", "0.65217817", "0.6428228", "0.64100033", "0.6187956", "0.61572856", "0.6113667", "0.6068053", "0.6057279", "0.6052678", "0.5984401", "0.59601843", "0.5782217", "0.5741116", "0.5701244", "0.5687602", "0.56819564", "0.5668284", "0.5666465", "0.5642651", "0.5623701", "0.5614793", "0.5612808", "0.5611076", "0.5607446", "0.55495036", "0.55462813", "0.5538137", "0.5527812", "0.54940754" ]
0.7390938
0
Convert ttyrec files to videos
def main(ctx, ttyrec, encoding, ibm, outfile, size, fps, font_size, font_file, bold_font_file, info, info_all): if ibm: encoding = 'cp437' fp, def_outfile = open_or_get(ttyrec) try: with fp: updates = list(read_ttyrec(fp, encoding=encoding, errors='replace')) except ShortTTYRecError as e: ctx.fail(str(e)) if info or info_all: about = ttyrec_info(updates, show_all=info_all) click.echo(json.dumps(about, sort_keys=True, indent=4)) return if len(updates) < 2: ctx.fail( 'ttyrec only has {} update{}; need at least two to make a video' .format(len(updates), 's' if len(updates) != 1 else '') ) duration = updates[-1].timestamp - updates[0].timestamp click.echo( f'ttyrec length: {duration} ({len(updates)} distinct frames)', err=True, ) imgr = ScreenRenderer( font = ImageFont.truetype(font_file, size=font_size), bold_font = ImageFont.truetype(bold_font_file, size=font_size), font_size = font_size, columns = size[0], lines = size[1], ) imageio.plugins.ffmpeg.download() if outfile is None: outfile = def_outfile click.echo(f'Writing {outfile} ...', err=True) with click.progressbar( imgr.render_updates(updates, fps, block_size=MACRO_BLOCK_SIZE), length=ceil(duration.total_seconds() * fps), ) as mov_frames: imageio.mimwrite(outfile, map(np.asarray, mov_frames), fps=fps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2.VideoWriter(out, -1, fps, (w, h))\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=N)\n\n for f in range(N):\n I, ts = sr.getFrame(f)\n sw.writeFrame(Image.fromarray(I))\n # sw.write(I)\n timer.update(f)\n timer.finish()\n # cv2.destroyAllWindows()\n # sw.release()\n sw.close()\n sr.close()\n print(out + ' converted')", "def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)", "def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: [email protected]: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def __convert_video(self, v_dir):\r\n self.video_dir = v_dir\r\n vid_capt = cv2.VideoCapture(v_dir)\r\n curr_frame = 0\r\n # clear directory if it already exists, else create it\r\n if os.path.exists(self.FRAME_FOLDER):\r\n for file in os.listdir(self.FRAME_FOLDER):\r\n file_path = os.path.join(self.FRAME_FOLDER, file)\r\n try:\r\n if os.path.isfile(file_path):\r\n os.unlink(file_path)\r\n except Exception as e:\r\n print(e)\r\n else:\r\n os.makedirs(self.FRAME_FOLDER)\r\n\r\n while True:\r\n # ret is return value, once it turns False, video is over\r\n ret, frame = vid_capt.read()\r\n if not ret:\r\n break\r\n f_name = self.FRAME_FOLDER + '/' + self.vid_name + 'frame' + str(curr_frame) + '.jpg'\r\n cv2.imwrite(f_name, frame)\r\n curr_frame += 1\r\n\r\n vid_capt.release()\r\n cv2.destroyAllWindows()", "def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()", "def start_recording(codec, filename=time.strftime(\"%Y-%m-%d_%H-%M-%S\")):\n global video_writer\n folder = 'video_out/' # eventually replace this with the SD card folder\n # TODO: also include branch name and/or commit ID\n path = folder + filename + '.' + filetype\n print \"Saving video to: %s\" % path\n\n height = videoinput.frame_height\n if settings.sidebyside:\n width = 2*videoinput.frame_width\n else:\n width = videoinput.frame_width\n\n try:\n video_writer = cv2.VideoWriter(path, codec, 30, (width, height))\n except:\n print \"Failed to open video file for writing!\"", "def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()", "def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath", "def stream_frames(video_capture):", "def play_video_file(fname : str):\n cap = cv2.VideoCapture(fname)\n fps = cap.get(5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 1\n fontColor = (0, 0, 0)\n lineType = 2\n\n myvideo = []\n while cap.isOpened():\n ret, frame = cap.read()\n\n if ret is True:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.putText(gray, 'Time: ' + str(round(cap.get(0) / 1000, 2)),\n (10, 30),\n font,\n fontScale,\n fontColor,\n lineType)\n cv2.putText(gray, 'Frame: ' + str(int(cap.get(1))),\n (10, 70),\n font,\n fontScale,\n fontColor,\n lineType)\n myvideo.append(gray)\n #cv2.imshow('frame', gray)\n #cv2.waitKey(10)\n #if cv2.waitKey(delay=2) & 0xFF == ord('q'):\n # break\n else:\n break\n\n cap.release()\n\n if fps < 60:\n for frame in myvideo:\n cv2.imshow('frame', frame)\n cv2.waitKey(10)\n else:\n for ind, frame in enumerate(myvideo):\n if ind % 3 == 0:\n cv2.imshow('frame', frame)\n cv2.waitKey(10)\n else:\n continue\n cv2.destroyAllWindows()", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def convert_video(video_file, output_file_name):\n video_stream = cv2.VideoCapture(video_file)\n total_frames = video_stream.get(cv2.CAP_PROP_FRAME_COUNT)\n background = get_median_frame(video_stream)\n video_stream.release()\n #reopen for processing:\n video_stream = cv2.VideoCapture(video_file)\n #ready an output writer\n writer = cv2.VideoWriter(output_file_name, \n cv2.VideoWriter_fourcc(*\"MP4V\"), fps,(1080,1920)) #(1920,1080))\n frameCnt=0\n pos = [] #Array for the coordinates\n while(frameCnt < total_frames-1):\n frameCnt+=1\n ret, frame = video_stream.read()\n dframe = background_subtraction(frame,background)\n cnts = find_contours(dframe)\n x,y = find_lowest_contour(cnts)\n pos.append([x,y])\n if len(pos): \n cv2.polylines(frame,np.int32([pos]),False,(0, 255, 0),2)\n writer.write(cv2.resize(frame, (1080,1920))) ## size probably shoudn't be fixed.\n writer.release()\n video_stream.release()\n return pos", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')", "def make_seret(processed_files_directory='files/',fps=5):\r\n # Sort files in processed images directory\r\n files = sort_files(processed_files_directory)\r\n # Create list as container for the movie.\r\n img_array = []\r\n # For each file\r\n for file in files:\r\n file_format = file.split(\".\")\r\n if file_format[-1] == 'jpg': # verify that we will include jpg files only in the movie\r\n # Read the file\r\n img = cv2.imread(file)\r\n # Extract height, width, channels from image\r\n height, width, layers = img.shape\r\n # size = (width, height)\r\n size = (width, height)\r\n # Append image to movie container\r\n img_array.append(img)\r\n # Create a video writer for the movie\r\n out = cv2.VideoWriter(processed_files_directory+'initial.avi', cv2.VideoWriter_fourcc(*'DIVX'), fps, size)\r\n # For each image in container\r\n for image in img_array:\r\n # Write image by video writer\r\n out.write(image)\r\n # Release video writer.\r\n out.release()", "def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False", "def show_video(path: str): \n video_path = sorted(glob(path + \"/*.mp4\"))[-1]\n video = io.open(video_path, 'r+b').read()\n encoded = base64.b64encode(video)\n\n return HTML(data='''<video alt=\"test\" controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" /> </video>'''\n .format(encoded.decode('ascii')))", "def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()", "def make_movie(processed_files_directory='files/', WITH_SUBTITLES=False, WITH_AUDIO=False):\r\n # Declare the text for sub-titles\r\n\r\n if WITH_SUBTITLES: # if the user is willing to have subtitles in the movie\r\n with open(processed_files_directory+'subtitles.txt', 'r', encoding='utf8') as f:\r\n txt = f.read() # read the subtitles file\r\n # Split text to lines.\r\n subtitles = txt.split('\\n')\r\n # Declare VideoFileClip from the movie that I already have.\r\n clip = VideoFileClip(processed_files_directory + \"initial.avi\")\r\n # Declare duration of one sub-title as total duration of the video divided by number of lines.\r\n duration = clip.duration/len(subtitles)\r\n # Set start to zero.\r\n start=0\r\n # Set container for the clips.\r\n videos=[]\r\n # Loop all sub-titles\r\n for line in subtitles:\r\n # Make text clip from the reversed Hebrew text\r\n txt_clip = TextClip(line[::-1], fontsize=30, color='yellow', font='Calibri')\r\n # Set position to the bottom of screen.\r\n txt_clip = txt_clip.set_position('bottom').set_duration(duration)\r\n # Make sub clip of the movie with same duration as text clip.\r\n sub_clip = clip.subclip(start,start+duration)\r\n # Set CompositeVideoClip from the text clip and sub clip.\r\n video = CompositeVideoClip([sub_clip, txt_clip])\r\n # Insert the video to the clips container\r\n videos.append(video)\r\n # Set start time for next sub-title.\r\n start+=duration\r\n # Concatenate all clips of the container.\r\n res = concatenate_videoclips(videos)\r\n clip = res # now the clip is res\r\n else:\r\n clip = VideoFileClip(processed_files_directory+ \"initial.avi\") # the clip won't have subtitles\r\n\r\n\r\n # Set audio clip from mp3 file.\r\n if WITH_AUDIO: # if the user has chosen to include soundtrack in the movie\r\n f = 'audio.mp3' # change to mp3 soundtrack file of the movie\r\n # set the duration of the audioclip to max(duration of clip), even if the audioclip is longer\r\n audioclip = AudioFileClip(processed_files_directory+f)\r\n\r\n # check if the clip length is bigger than the\r\n if clip.duration > audioclip.duration:\r\n number_of_duplicated = int(np.ceil(clip.duration/audioclip.duration))\r\n # duplicate the audioclip in order to later fit the movie's duration\r\n audioclip = concatenate_audioclips([AudioFileClip(processed_files_directory+f) for i in range(number_of_duplicated)])\r\n\r\n # Now fit the audioclip duration to the movie's\r\n audioclip = audioclip.set_duration(clip.duration)\r\n\r\n # Set audio for the container.\r\n if not WITH_SUBTITLES: # if the user wanted to have audio included without subtitles\r\n videoclip = clip.set_audio(audioclip)\r\n else: # if the user wanted to have both audio and subtitles\r\n videoclip = res.set_audio(audioclip)\r\n else:\r\n videoclip = clip # if the user didn't want audio in the movie\r\n\r\n # Write the video file.\r\n f = 'final_movie.mp4' # change to the desired movie filename\r\n videoclip.write_videofile(processed_files_directory+f)", "def readVideo(self):\n vid = cv2.VideoCapture(self.fname)\n imgstack = []\n # grab = True\n grab, img = vid.read()\n while grab:\n imgstack.append(\n Frame(\n cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),\n self.starttime\n + datetime.timedelta(seconds=self.frame_dt * self.length),\n )\n )\n self.length += 1\n grab, img = vid.read()\n self.frames = imgstack", "def write_video_ffmpeg(\n itr: Iterator[np.ndarray],\n out_file: str | Path,\n fps: int = 30,\n out_fps: int = 30,\n vcodec: str = \"libx264\",\n input_fmt: str = \"rgb24\",\n output_fmt: str = \"yuv420p\",\n quite=False\n) -> None:\n\n first_img = next(itr)\n height, width, _ = first_img.shape\n\n stream = ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=input_fmt, s=f\"{width}x{height}\", r=fps)\n stream = ffmpeg.output(stream, str(out_file), pix_fmt=output_fmt, vcodec=vcodec, r=out_fps)\n if quite:\n stream = stream.global_args('-loglevel', 'quiet')\n stream = ffmpeg.overwrite_output(stream)\n stream = ffmpeg.run_async(stream, pipe_stdin=True)\n\n def write_frame(img: np.ndarray) -> None:\n stream.stdin.write(as_uint8(img).tobytes())\n\n # Writes all the video frames to the file.\n write_frame(first_img)\n for img in itr:\n write_frame(img)\n\n stream.stdin.close()\n stream.wait()\n print('Done.')", "def split_video_random(file_path, start_pos, split_length, out_path):\n s_cmd = \" -i '%s'\"%(file_path) #use default CODEC\n try:\n\tfileext = file_path.split(\".\")[-1]\n except IndexError as e:\n\traise IndexError(\"No ext. in filename. Error: \" + str(e))\n\n split_start = start_pos\n split_length = split_length\n head, tail = os.path.split(file_path)\n name, ext = tail.split('.')\n filebase=name+'_'+str(start_pos)+'-'+str(split_length)\n\n dstfilebase = out_path + '/' + filebase # create output file base\n\n #split_str = \"\"\n #split_str += \" -ss \" + str(split_start) + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n\n s_str = \"\"\t\n #s_str += \"ffmpeg\"+\" -ss \"+str(split_start)+\" -t \"+str(split_length) + s_cmd + \" '\"+dstfilebase + \".\" + fileext + \"'\"\n s_str += \"ffmpeg\" + \" -ss \" + str(split_start) + s_cmd + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n print(\"########################################################\")\n #print \"About to run: \"+split_cmd+split_str\n print(\"About to run: \"+s_str)\n print(\"########################################################\")\n #output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read()\n output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read()", "def video_files_to_tfrecords(output_file, filepaths, label_dict, downsample_graph):\n\n if type(filepaths) != list:\n filepaths = [filepaths] # catch single inputs (not a normal case)\n\n tqkws = {\n 'total': len(filepaths),\n 'unit': ' videos',\n 'desc': 'Serializing video frames'\n }\n\n video_placeholder, downsampled = downsample_graph\n\n with tf.python_io.TFRecordWriter(output_file) as writer:\n for path in tqdm.tqdm(filepaths, **tqkws):\n video_array = video_to_array(path)\n label = label_dict[os.path.split(os.path.abspath(os.path.join(path, os.pardir)))[-1]]\n\n l = video_array.shape[0]\n w = video_array.shape[2]\n h = video_array.shape[1]\n\n if h != 240 or w != 320:\n continue\n\n downsampled_video_array = downsampled.eval({video_placeholder: video_array})\n feature_dict = {\n 'height': _int_feature(h),\n 'width': _int_feature(w),\n 'length': _int_feature(l),\n 'video': _bytes_feature(downsampled_video_array.astype(np.uint8).tostring()),\n 'label': _int_feature(label)\n }\n\n observation = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n\n writer.write(observation.SerializeToString())" ]
[ "0.67228967", "0.6123915", "0.6026224", "0.6026062", "0.6018919", "0.60161096", "0.5921599", "0.58913285", "0.58782333", "0.58761436", "0.5870621", "0.5852439", "0.58420694", "0.5806366", "0.5802016", "0.57724094", "0.57724094", "0.574259", "0.5720971", "0.5720353", "0.57061726", "0.57024807", "0.56999785", "0.56236637", "0.5607386", "0.5605964", "0.55996925", "0.55873966", "0.5584279", "0.55832636" ]
0.70133644
0
Set the "entity_class_registry" field
def set_entity_class_registry(self, entity_class_registry): self.entity_class_registry = entity_class_registry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_class(self, entity_class):\n key = entity_class.__collection_name__\n\n if key not in self._registered_types:\n self._registered_types[key] = entity_class", "def _extract_entity_class_registry(self):\n for description in self.sa_query.column_descriptions:\n if \"entity\" in description:\n declarative_meta = description[\"entity\"]\n _class_registry = getattr(\n declarative_meta, \"_decl_class_registry\", None)\n if _class_registry is not None:\n entity_class_registry = {}\n for elmnt in _class_registry.values():\n if type(elmnt) is DeclarativeMeta:\n description = elmnt.__table__.description\n entity_class_registry[description] = elmnt\n return entity_class_registry\n return None", "def set_target_registry(args):\n if 'target_registry' not in args:\n return\n\n if args['target_registry'] == '':\n args['target_registry'] = None\n return\n\n args['target_registry'] = (\n AuthenticatedRegistry.query.filter_by(\n base_name=args['target_registry'])).first()\n\n if args['target_registry'] is None:\n raise NoModelError('Registry')", "def entity_reg(hass):\n return mock_registry(hass)", "def setEntityLoader(resolver):\n ret = libxml2mod.xmlSetEntityLoader(resolver)\n return ret", "def register_bundle(self, cls):\n return self.register_entity('bundle', cls)", "def _register(registry, cls):\n assert issubclass(cls, Registrable)\n\n reg_attr = f\"_{cls.__name__}_registered\"\n if getattr(cls, reg_attr, False):\n return cls\n\n name = cls.__fieldtype__()\n assert (\n name not in registry\n ), f\"{cls!r} cannot be registered as {name!r}: already used by {registry[name]!r}\"\n\n registry[name] = cls\n setattr(cls, reg_attr, True)\n return cls", "def register_driver(self, key, cls):\n self.drivers.update({key: cls})", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def register_group(self, cls):\n return self.register_entity('group', cls)", "def __init__(self):\n self.registry = {}", "def _register(cls):\n clsid_path = \"Software\\\\Classes\\\\CLSID\\\\\" + cls._reg_clsid_\n progid_path = \"Software\\\\Classes\\\\\" + cls._reg_progid_\n spec = cls.__module__ + \".\" + cls.__name__\n\n # register the class information\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\ProgID\", win32con.REG_SZ, cls._reg_progid_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\PythonCOM\", win32con.REG_SZ, spec)\n hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\InprocServer32\")\n win32api.RegSetValueEx(hkey, None, None, win32con.REG_SZ, pythoncom.__file__)\n win32api.RegSetValueEx(hkey, \"ThreadingModel\", None, win32con.REG_SZ, \"Both\")\n\n # and add the progid\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path + \"\\\\CLSID\", win32con.REG_SZ, cls._reg_clsid_)", "def entity_type(self, entity_type):\n self._entity_type = entity_type", "def __init__(self):\n self._registry = {}", "def register(cls):\n register(cls, cls.provided_class)", "def _class(self, _class):\n\n self.__class = _class", "def _class(self, _class):\n\n self.__class = _class", "def entity_registry_enabled_default(self):\n return False", "def entity_type(self, entity_type):\n\n self._entity_type = entity_type", "def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1", "def set_entity(cls, entity):\n # Preparing auto increment\n entity_count = cls.get_entity_count()\n new_key = \"entity:\" + str(entity_count + 1)\n\n # Set key to Entity\n entity.entity_key = new_key\n\n # Execute HMSET for assigning hash structure\n result = cls.db.hmset(new_key, entity.extract())\n\n # If success, increase key\n if result:\n cls.set_entity_count(entity_count + 1)\n return result", "def entity_registry_enabled_default(self) -> bool:\n return False", "def setEntityTypes(self, value):\n return self._set(entityTypes=value)", "def entity_type(self, entity_type: str):\n\n self._entity_type = entity_type", "def entity(self, entity):\n\n self._entity = entity", "def include(self, registry):\n for cls in registry.values():\n db_to_element = {}\n\n props = sorted([(k,v) for k,v in cls.__dict__.items()\n if isinstance(v, Property)]\n , key=lambda p:p[1].instance_idx)\n for prop_name, prop_value in props:\n value_name = prop_value.name\n if value_name:\n db_to_element[value_name] = prop_name\n prop_name = value_name\n else:\n db_to_element[prop_name] = prop_name\n\n self.guard_reserved_words(prop_name, cls)\n\n self.props_from_db[cls] = self.create_props_mapping(db_to_element)\n self.init_broker_for_class(cls)\n self.registry[cls.registry_name] = cls", "def write_registry(self) -> None:\n self.manager.write_registry()", "def set(self, obj: _T) -> None:\n\n self.registry[self.scopefunc()] = obj", "def save(self, *args, **kwargs):\n self.entity_type = \"Person\"\n super().save(*args, **kwargs)", "def _register(self, aggregate_root_entity):\n if not isinstance(aggregate_root_entity, self._aggregate_root_entity_class()):\n raise TypeError(\"{!r} is not of type {} therefore cannot be store in a {}\"\n .format(aggregate_root_entity, self._aggregate_root_entity_class().__name__, self.__class__.__name__))\n self._track(aggregate_root_entity)\n self._intern(aggregate_root_entity)" ]
[ "0.6296474", "0.6145517", "0.5784777", "0.57402545", "0.5640025", "0.5552346", "0.54653853", "0.52816427", "0.5269132", "0.5262372", "0.5256132", "0.52219176", "0.5213515", "0.5207139", "0.5203314", "0.52020997", "0.52020997", "0.51808226", "0.5144154", "0.51290417", "0.51211494", "0.50926924", "0.50775343", "0.50489837", "0.5045451", "0.5003249", "0.49861822", "0.49723077", "0.49671492", "0.49576122" ]
0.8463285
0
Extract an entity class registry from one of the models of the inner SQLAlchemy query. This result of this function is used by several SQLAlchemy components during the extraction of the SQL query from a SQLAlchemy query.
def _extract_entity_class_registry(self): for description in self.sa_query.column_descriptions: if "entity" in description: declarative_meta = description["entity"] _class_registry = getattr( declarative_meta, "_decl_class_registry", None) if _class_registry is not None: entity_class_registry = {} for elmnt in _class_registry.values(): if type(elmnt) is DeclarativeMeta: description = elmnt.__table__.description entity_class_registry[description] = elmnt return entity_class_registry return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_real_object(self):\n query_string = dedent(f\"\"\"\\\n import app.config.models_importer as models_importer\n\n class_ = models_importer.all_models['{self.ref_class}']\n \n class_.query.get({self.ref_id})\"\"\")\n\n return exec(query_string)", "def _namespaced_query(session, obj_outer, cls_inner, name_inner):\n return session.query(cls_inner) \\\n .filter_by(owner = obj_outer) \\\n .filter_by(label = name_inner).first()", "def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query", "def resolve_model(root: Entity, *args) -> str:\n return 'entities.entity'", "def models(self, model=None):\n for query in self.__queries:\n if isinstance(query, orb.Query):\n yield query.model(model)\n else:\n for model in query.models(model):\n yield model", "def expand(self, graph):\n for triple in self.g:\n graph.add(triple)\n entity_tags = defaultdict(set)\n res = graph.query(\n \"\"\"SELECT ?ent ?tag WHERE {\n ?ent brick:hasTag ?tag\n }\"\"\"\n )\n for ent, tag in res:\n entity_tags[ent].add(tag)\n for entity, tagset in entity_tags.items():\n tagset = list(map(lambda x: x.split(\"#\")[-1], tagset))\n lookup = self.lookup_tagset(tagset)\n if len(lookup) == 0:\n continue\n klasses = list(lookup[0][0])\n graph.add((entity, A, BRICK[klasses[0]]))", "def register_orm_base(self, base):\n for model in utils.searchable_sqlalchemy_models(base):\n self.register_type(model.es_type_name, model.es_properties, model)", "def entity_classes_to_map_over(cls):\n return [exp_models.ExplorationSnapshotMetadataModel]", "def infer_model(self, model):\n\n from .graph import Graph\n\n entities = model[\"rows\"]\n # index the entities by their ID field\n entities = {e[\"id\"].replace('\"', \"\"): {\"tags\": e} for e in entities}\n # TODO: add e['dis'] for a descriptive label?\n brickgraph = Graph(load_brick=False)\n\n # marker tag pass\n for entity_id, entity in entities.items():\n marker_tags = {\n k for k, v in entity[\"tags\"].items() if v == \"m:\" or v == \"M\"\n }\n for f in self._filters:\n marker_tags = list(filter(f, marker_tags))\n # translate tags\n entity_tagset = list(self._translate_tags(marker_tags))\n\n equip_ref = entity[\"tags\"].get(\"equipRef\")\n # infer tags for single entity\n triples, _ = self.infer_entity(\n entity_tagset, identifier=entity_id, equip_ref=equip_ref\n )\n brickgraph.add(*triples)\n self._generated_triples.extend(triples)\n\n # take a pass through for relationships\n for entity_id, entity in entities.items():\n relships = {k: v for k, v in entity[\"tags\"].items() if k.endswith(\"Ref\")}\n # equip_entity_id = entity_id.replace(' ', '_') + '_equip'\n point_entity_id = entity_id.replace(\" \", \"_\") + \"_point\"\n if \"equipRef\" not in relships:\n continue\n reffed_equip = (\n relships[\"equipRef\"].replace(\" \", \"_\").replace('\"', \"\") + \"_equip\"\n )\n if self._BLDG[point_entity_id] in brickgraph.nodes:\n triple = (\n self._BLDG[reffed_equip],\n BRICK.hasPoint,\n self._BLDG[point_entity_id],\n )\n brickgraph.add(triple)\n self._generated_triples.append(triple)\n return brickgraph", "def reverse_entity_type_subclass_map(entity_type_subclass_map):\n entity_type_superclass_map = {}\n\n for superclass, subclasses in tqdm(entity_type_subclass_map.items()):\n for subclass in subclasses:\n if subclass['id'] in entity_type_superclass_map:\n entity_type_superclass_map[subclass['id']].append(superclass)\n else:\n entity_type_superclass_map[subclass['id']] = [superclass]\n\n return entity_type_superclass_map\n\n\n#def get_type_index():\n \"\"\"\n Create an index s.t. each type gets an ID (e.g. person -> 1,\n organization -> 2, ...\n \"\"\"\n #return dict(enumerate(type_list))\n\n\n#def get_index_of_type(entity_type):\n \"\"\"\n Return the index of a given entity type.\n \"\"\"\n #return type_list.index(entity_type)\n\n\n#def get_type_by_index(index):\n \"\"\"\n Return the type of a given index.\n \"\"\"\n #return type_list[index - 1] # TODO re-train model with new indices", "def to_entity(cls, model_obj: \"SqlalchemyModel\"):\n item_dict = {}\n for field_name in attributes(cls.meta_.entity_cls):\n item_dict[field_name] = getattr(model_obj, field_name, None)\n return cls.meta_.entity_cls(item_dict)", "def _get_model_class_from_table(self, table):\r\n try:\r\n model_class = [m for m in get_models() if connection.introspection.table_name_converter(m._meta.db_table) in map(connection.introspection.table_name_converter,[table])][0] \r\n m2m = False \r\n except IndexError:\r\n try: \r\n # this is a many to many field \r\n model_class = [f.rel.to for m in get_models() for f in m._meta.local_many_to_many if f.m2m_db_table() == table][0] \r\n m2m = True \r\n except IndexError: \r\n # this is an inner join \r\n table = self.query.alias_map[table][0]\r\n return self._get_model_class_from_table(table)\r\n return model_class, m2m", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.single:\n for row in rows:\n yield self.__instance_from_db(self.model, row)\n else:\n for row in rows:\n yield tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def all(self, cls=None):\n results = {}\n if cls is not None:\n # TODO cls always is a class\n if type(cls) == str:\n cls = eval(cls)\n for instance in self.__session.query(cls):\n key = \"{}.{}\".format(cls.__name__, instance.id)\n results[key] = instance\n return results\n else:\n for table in self.__tables:\n for instance in self.__session.query(eval(table)):\n key = \"{}.{}\".format(table, instance.id)\n results[key] = instance\n return results", "def _default_registry(self):\n registry = JSONSerializer.instance().registry.copy()\n registry.update({m.__model__: m for m in find_sql_models()})\n return registry", "def _get_related_models(self, parent_model):\r\n related_models = set()\r\n rev_reversemapping = dict([(v,k) for k,v in self._reversemapping.iteritems()])\r\n if rev_reversemapping:\r\n for attname, related in self._get_reverse_relations(parent_model):\r\n related_models.add((rev_reversemapping[attname], related.model))\r\n\r\n for field in parent_model._meta.fields:\r\n if field.rel and field.rel.to._meta.db_table in self.query.tables and field.rel.to != parent_model:\r\n related_models.add((field.attname, field.rel.to))\r\n \r\n for attname, model_class in related_models:\r\n yield attname, model_class\r\n if attname.endswith(\"_id\"):\r\n attname = attname[:-3]\r\n for join_attname, model_klass in self._get_related_models(model_class):\r\n yield LOOKUP_SEP.join((attname,join_attname)), model_klass", "def query(cls):\n query_class = cls.query_class\n return query_class(orm_class=cls)", "def all(self, cls=None):\n obj_dict = {}\n all_objs = []\n if cls:\n all_objs = self.__session.query(eval(cls)).all()\n else:\n for table in self.all_classes:\n all_objs += self.__session.query(eval(table)).all()\n obj_dict = {obj.__class__.__name__ + '.' + obj.id: obj\n for obj in all_objs}\n # TODO BUG: includes <sqlalchemy> object in dict\n return obj_dict", "def expand(self, model=None, ignoreFilter=False):\n model = self.__model or model\n if not model:\n raise orb.errors.QueryInvalid('Could not traverse: {0}'.format(self.__column))\n\n schema = model.schema()\n parts = self.__column.split('.')\n\n # expand the current column\n lookup = schema.column(parts[0], raise_=False) or schema.collector(parts[0])\n\n if lookup:\n # utilize query filters to generate\n # a new filter based on this object\n query_filter = lookup.queryFilterMethod()\n if callable(query_filter) and not ignoreFilter:\n new_q = query_filter(model, self)\n if new_q:\n return new_q.expand(model, ignoreFilter=True)\n else:\n return None\n\n # otherwise, check to see if the lookup\n # has a shortcut to look through\n elif isinstance(lookup, orb.Column) and lookup.shortcut():\n parts = lookup.shortcut().split('.')\n lookup = schema.column(parts[0], raise_=False)\n\n if len(parts) == 1:\n return self\n else:\n if isinstance(lookup, orb.Collector):\n return orb.Query(model).in_(lookup.collectExpand(self, parts))\n\n elif isinstance(lookup, orb.ReferenceColumn):\n rmodel = lookup.referenceModel()\n sub_q = self.copy()\n sub_q._Query__column = '.'.join(parts[1:])\n sub_q._Query__model = rmodel\n records = rmodel.select(columns=[rmodel.schema().idColumn()], where=sub_q)\n return orb.Query(model, parts[0]).in_(records)\n\n else:\n raise orb.errors.QueryInvalid('Could not traverse: {0}'.format(self.__column))", "def resolve_entities(root, info, ids: list[int], **kwargs):\n return Entity.objects.filter(id__in=ids)", "def matching_objects(self, filter_deleted):\n from rome.core.orm.utils import get_literal_query\n from rome.lang.sql_parser import QueryParser\n from rome.core.rows.rows import construct_rows\n\n read_deleted = self.read_deleted\n if filter_deleted:\n read_deleted = \"no\"\n\n if self._autoflush:\n if self.session is not None:\n self.session.commit()\n\n if not self.query_tree:\n sql_query = get_literal_query(self.sa_query)\n parser = QueryParser()\n query_tree = parser.parse(sql_query)\n else:\n query_tree = self.query_tree\n\n if not self.entity_class_registry:\n self.entity_class_registry = self._extract_entity_class_registry()\n entity_class_registry = self.entity_class_registry\n\n # Collecting variables of sub queries\n subqueries_variables = {}\n for (variable_name, sub_query_tree) in query_tree.variables.iteritems():\n sub_query = Query()\n sub_query.set_query_tree(sub_query_tree)\n sub_query.set_entity_class_registry(entity_class_registry)\n result = sub_query.all()\n subqueries_variables[variable_name] = result\n\n rows = construct_rows(query_tree,\n entity_class_registry,\n read_deleted=read_deleted,\n subqueries_variables= subqueries_variables)\n\n def row_function(row, column_descriptions, decoder):\n from rome.core.session.utils import ObjectAttributeRefresher\n final_row = []\n one_is_an_object = False\n object_attribute_refresher = ObjectAttributeRefresher()\n for column_description in column_descriptions:\n if type(column_description[\"type\"]) in [Integer, String]:\n row_key = column_description[\"entity\"].__table__.name.capitalize(\n )\n property_name = column_description[\"name\"]\n value = None\n if row_key in row and property_name in row[row_key]:\n value = row[row_key].get(property_name, None)\n else:\n # It seems that we are parsing the result of a function call\n column_description_expr = column_description.get(\"expr\",\n None)\n if column_description_expr is not None:\n property_name = str(column_description_expr)\n value = row.get(property_name, None)\n if value is not None:\n final_row += [value]\n else:\n logging.error(\n \"Could not understand how to get the value of '%s' with this: '%s'\"\n % (column_description.get(\"expr\", \"??\"), row))\n elif type(column_description[\"type\"]) == DeclarativeMeta:\n one_is_an_object = True\n row_key = column_description[\"entity\"].__table__.name\n new_object = column_description[\"entity\"]()\n attribute_names = map(lambda x: x.key, list(\n column_description[\"entity\"].__table__.columns))\n for attribute_name in attribute_names:\n value = decoder.decode(row[row_key].get(attribute_name,\n None))\n setattr(new_object, attribute_name, value)\n\n if \"___version_number\" in row[row_key]:\n setattr(new_object, \"___version_number\", row[row_key][\"___version_number\"])\n\n load_options = None\n if hasattr(self.sa_query, \"_with_options\"):\n load_options = self.sa_query._with_options\n object_attribute_refresher.refresh(new_object, load_options=load_options)\n final_row += [new_object]\n else:\n logging.error(\"Unsupported type: '%s'\" %\n (column_description[\"type\"]))\n if not one_is_an_object:\n return [final_row]\n else:\n return final_row\n\n def row_function_subquery(row, attributes, decoder):\n result = []\n for attribute in attributes:\n tablename = attribute.split(\".\")[0]\n attribute_name = attribute.split(\".\")[1]\n result += [row[tablename][attribute_name]]\n return result\n\n decoder = Decoder()\n\n if len(self.sa_query.column_descriptions) > 0:\n final_rows = map(lambda r: row_function(\n r, self.sa_query.column_descriptions, decoder), rows)\n else:\n final_rows = map(lambda r: row_function_subquery(\n r, self.query_tree.attributes, decoder), rows)\n\n if len(self.sa_query.column_descriptions) <= 1:\n # Flatten the list\n final_rows = [item for sublist in final_rows for item in sublist]\n\n # Add watcher on objects\n if self.session is not None:\n for obj in final_rows:\n if hasattr(obj, \"id\"):\n self.session.watch(obj)\n\n return final_rows", "def init_model(connection):\n db = connection\n\n for obj in common.__dict__.itervalues():\n if type(obj) == type and issubclass(obj, common.Model) and hasattr(obj, '__tablename__'):\n tablename = getattr(obj, '__tablename__')\n obj._object_store = Domain(db, tablename)\n collection_to_class[obj._object_store] = obj", "def entity_classes_to_map_over(cls):\n\n return [user_models.UserSubscriptionsModel]", "def apply_model_to_query(self, query):\n pass", "def entity_classes_to_map_over(cls):\n return [exp_models.ExplorationRightsSnapshotMetadataModel]", "def get_model(self):\n return QueryS", "def _egg_module(self, query):\n return process.extractOne(query, self.egg_names,\n scorer=fuzz.ratio,\n score_cutoff=self.egg_threshold)", "def get_query(self):\n return self.query_class(self)", "def _get_model_from_table_name(table_name: str) -> Optional[Type[RDSModel]]:\n table_model = None\n try:\n if hasattr(Base, '_decl_class_registry'):\n models = Base._decl_class_registry.values() # sqlalchemy < 1.4\n else:\n models = Base.registry._class_registry.values()\n\n for model in models:\n if hasattr(model, '__tablename__') and model.__tablename__ == table_name:\n table_model = model\n except Exception as e:\n LOGGER.exception(f'Failed to get model for the table: {table_name} from rds model base')\n raise e\n\n return table_model", "def to_orm(self):\n data = {}\n for key, value in self:\n # If field is Type[ORMModel],\n # recursively convert to an ORM object.\n if hasattr(value, \"__orm__\"):\n data[key] = value.to_orm()\n # If the field is a dictionary, iterate over\n # values and convert any ORM models to ORM objects\n # else leave them alone.\n elif isinstance(value, dict):\n nested_data = {}\n for nested_key, nested_value in value:\n if hasattr(nested_value, \"__orm__\"):\n nested_data[key] = nested_value.to_orm()\n else:\n nested_data[key] = value\n data[key] = nested_data\n # If the field is an iterable, iterate through list\n # and convert ORM Models to ORM objects.\n #\n # There has to be a better way to write this conditional...\n elif (\n isinstance(value, collections.Iterable) and\n type(value) not in (str, bytearray, bytes)\n ):\n nested_data = []\n for nested_value in value:\n if hasattr(nested_value, \"__orm__\"):\n nested_data.append(nested_value.to_orm())\n else:\n nested_data.append(nested_value)\n # Convert iterable to the appropriate type at the\n # end.\n data[key] = type(value)(nested_data)\n # Leave the value alone if its not an ORMModel\n else:\n data[key] = value\n return self.__orm__(**data)" ]
[ "0.5555714", "0.5259711", "0.5109347", "0.50943285", "0.50772786", "0.49984002", "0.4953237", "0.49206263", "0.47721955", "0.47709483", "0.47572222", "0.47535753", "0.47392863", "0.47341767", "0.47276932", "0.46964145", "0.46887028", "0.46723166", "0.4664276", "0.4661921", "0.4645294", "0.4644923", "0.4642833", "0.4636559", "0.4629262", "0.4627161", "0.46072057", "0.4599009", "0.45899713", "0.45689103" ]
0.75623906
0
Call f on each item in seq, calling inter() in between.
def interleave(inter, f, seq): seq = iter(seq) try: f(next(seq)) except StopIteration: pass else: for x in seq: inter() f(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sequence(f, lst: list) -> list:\n ret = []\n for ele in lst:\n ret.append(f(ele))\n return ret", "def intersperse(value, seq):\n seq = iter(seq)\n\n try:\n yield next(seq)\n except StopIteration:\n return\n\n for item in seq:\n yield value\n yield item", "def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def scanl(f, base, l):\n yield base\n for x in l:\n base = f(base, x)\n yield base", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def intercalate(value, seq):\n return flatten(intersperse(value, seq))", "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "def each(self, func):\n\n for i in self._:\n func(i)\n return self", "def sequence_side_effect(*args):\n seq = list(args)\n\n def rv_fun(*args, **kw):\n return seq.pop(0)\n return rv_fun", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def imap_c(func):\n return functools.partial(imap, func)", "def enumerate_list(seq):\n return zip(xrange(len(seq)), seq)", "async def a_enumerate(seq, start=0):\n i = start\n async for val in seq:\n yield i, val\n i += 1", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def scanl(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanl must be callable\")\n itr = iter(itr)\n\n return _scanl(func, start, itr)", "def coroutine(f, *a, **kw):\n i = f(*a, **kw)\n i.next()\n return i", "def foreach(func, iterable):\n\n\tfor x in iterable:\n\t\tfunc(x)", "def scan(func, iterable, start=_EMPTY, *, echo_start=True):\n it = iter(iterable)\n if start is _EMPTY:\n start = next(it)\n if echo_start:\n yield start\n for item in it:\n start = func(start, item)\n yield start", "def foldl(func, start, itr):\n return _foldl(func, start, iter(itr))", "def mapcatting(f):\n @coroutine\n def gen(target):\n while True:\n xs = yield\n for x in f(xs):\n target.send(x)\n\n return gen", "def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]", "def apply_(self, function):\n self.sequences = [function(seq) for seq in self.sequences]\n return self", "def progression(first_item:int, amount:int,func):\n item = first_item\n count = 0\n stop = False\n while count < amount and not stop:\n stop = yield item\n item = func(item)\n count += 1", "def foreach(function):\n return partial(map, function)", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def mapg(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n for x in C:\n yield f(x)", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item" ]
[ "0.61680573", "0.6034688", "0.6013112", "0.5890083", "0.5763176", "0.5744706", "0.5691687", "0.5686545", "0.56322396", "0.5617056", "0.5564194", "0.55386853", "0.55105126", "0.5491585", "0.5479231", "0.5478667", "0.5475974", "0.54754245", "0.5453396", "0.5437749", "0.5408432", "0.53924096", "0.53812087", "0.53787", "0.5364877", "0.53592646", "0.53576183", "0.53561157", "0.5329946", "0.5317975" ]
0.77389354
0
Gets the device function name by translating a typed Python version to a templated cpp version. Python functions looks like getVariableFloatArray6 and translate to getVariable This function will detect and test against a set of known types and also extract the Array length This function returns None if the string is invalid in format but only throws an error if the format is correct but the type is invalid.
def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True): cpp_func_name = "" py_func = tree.attr # extract function name start for prefix in permitted_prefixes: if py_func.startswith(prefix): cpp_func_name = prefix py_func = py_func[len(prefix):] break # dont allow the else else: return None # check type and lengths if allow_lengths: #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) type_and_length = py_func.split("Array") if type_and_length[0] not in self._fgpu_types: self.RaiseError(tree, f"'{type_and_length[0]}' is not a valid FLAME GPU type") t = self._fgpu_types[type_and_length[0]] # generate template args if (len(type_and_length) == 1): cpp_func_name += f"<{t}>" elif (len(type_and_length) == 2): cpp_func_name += f"<{t}, {type_and_length[1]}>" else: return None else: if py_func not in self._fgpu_types: self.RaiseError(tree, f"'{py_func}' is not a valid FLAME GPU type") t = self._fgpu_types[py_func] cpp_func_name += f"<{t}>" # return return cpp_func_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"", "def cython_functionname(self, t, cycyt=None):\n if cycyt is None:\n t = self.canon(t)\n if isinstance(t, basestring):\n return t, self.cython_functionnames[t]\n elif t[0] in self.base_types:\n return t, self.cython_functionnames[t[0]]\n return self.cython_functionname(t, self.cython_functionnames[t[0]])\n d = {}\n for key, x in zip(self.template_types[t[0]], t[1:-1]):\n if isinstance(x, basestring):\n val = self.cython_functionnames[x] if x in self.cython_functionnames \\\n else x\n elif isinstance(x, Number):\n val = str(x).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif x[0] in self.base_types:\n val = self.cython_functionnames[x[0]]\n else:\n _, val = self.cython_functionname(x, self.cython_functionnames[x[0]])\n d[key] = val\n return t, cycyt.format(**d)", "def _get_source_type(version_string):\n # type: (str) -> Optional[str]\n if not version_string:\n return None\n\n if \"http://\" in version_string or \"https://\" in version_string:\n return \"url\"\n elif os.path.exists(version_string) and os.path.isfile(version_string):\n return \"file\"\n elif re.match(r\"\\d+\\.\\d+\\.\\d+\", version_string) or version_string == \"current\":\n return \"install_script\"\n else:\n raise ValueError(\n 'Invalid value \"%s\" for version_string. If it\\'s a path to a file, make'\n \"sure the file exists and if it's a URL, ensure URL exists.\"\n % (version_string)\n )", "def get_type_from_str(type_str):\n try:\n # Assume the current language to be C/C++ and make a try.\n return gdb.parse_and_eval(\"(%s *)0\" % type_str).type.target()\n except RuntimeError:\n # If assumption of current language to be C/C++ was wrong, then\n # lookup the type using the API.\n try:\n return gdb.lookup_type(type_str)\n except RuntimeError:\n return None", "def _get_uniform_function_name(varinfo):\n # NOTE: varinfo == dict(vartype=vartype, ndim=ndim, size=size)\n float_suffix = {True: 'f', False: 'i'}\n array_suffix = {True: 'v', False: ''}\n \n vartype = varinfo[\"vartype\"]\n ndim = varinfo[\"ndim\"]\n size = varinfo.get(\"size\", None)\n args = ()\n \n # scalar or vector uniform\n if type(ndim) == int or type(ndim) == long:\n # find function name\n funname = \"glUniform%d%s%s\" % (ndim, \\\n float_suffix[vartype == \"float\"], \\\n array_suffix[size is not None])\n\n # find function arguments\n if size is not None:\n args += (size,)\n \n # matrix uniform\n elif type(ndim) == tuple:\n # find function name\n funname = \"glUniformMatrix%dfv\" % (ndim[0])\n args += (1, False,)\n \n return funname, args", "def _parse_function(proto_string):\n _, parsed_features = tf.parse_single_sequence_example(\n proto_string, sequence_features=_parsing_spec(input_size))\n lengths = tf.shape(parsed_features['inputs'])[0]\n # Type int32 is not supported by parsing spec, but int64 is not supported by\n # XLA. Here cast to int32. The parsing functions are executed by the C++\n # threads. So, casting op will not slow down the session.run call later.\n labels = tf.to_int32(parsed_features['labels'])\n return parsed_features['inputs'], lengths, labels", "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def unparse_type(type_str):\n if not type_str.startswith('array'):\n return type_str\n arg_dim = type_str.lstrip('array')[0]\n data_type = type_str.lstrip('array')[1:]\n arg_type = \"vizgen.ndarray('\" + data_type + \"', \" + arg_dim + \")\"\n return arg_type", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def find_type(token_string: str):\n if re.compile('\\d+').match(token_string):\n return 'number'\n elif re.compile('[a-zA-Z]').match(token_string):\n return 'id'\n elif re.compile('\\*|\\+|-|/').match(token_string):\n return 'op'\n else:\n return 'undefined'", "def sample_type_str(t):\n if t == dsl.Type.NUMBER or t == dsl.Type.DIGIT:\n return get_number()\n elif t == dsl.Type.WORD:\n return get_word()\n elif t == dsl.Type.ALPHANUM or t == dsl.Type.CHAR:\n return get_alphanumeric()\n elif t == dsl.Type.ALL_CAPS:\n return get_caps()\n elif t == dsl.Type.PROP_CASE:\n return get_proper_case()\n elif t == dsl.Type.LOWER:\n return get_lower()\n else:\n raise ValueError('Unsupported type: {}'.format(t))", "def _get_shader_type(varinfo):\n if type(varinfo[\"ndim\"]) == int or type(varinfo[\"ndim\"]) == long:\n if varinfo[\"ndim\"] == 1:\n shader_type = varinfo[\"vartype\"]\n elif varinfo[\"ndim\"] >= 2:\n shader_type = \"vec%d\" % varinfo[\"ndim\"]\n if varinfo[\"vartype\"] != \"float\":\n shader_type = \"i\" + shader_type\n # matrix: (2,2) or (3,3) or (4,4)\n elif type(varinfo[\"ndim\"]) == tuple:\n shader_type = \"mat%d\" % varinfo[\"ndim\"][0]\n return shader_type", "def generate_definition(self):\n apientry = \"\"\n if self.__name[:2] == \"gl\":\n apientry = \"DNLOAD_APIENTRY \"\n params = \"void\"\n if self.__parameters:\n params = \", \".join(self.__parameters)\n return \"%s (%s*%s)(%s)\" % (self.__returntype, apientry, self.__name, params)", "def generate_prototype(self):\n apientry = \"\"\n if self.__name[:2] == \"gl\":\n apientry = \"DNLOAD_APIENTRY \"\n params = \"void\"\n if self.__parameters:\n params = \", \".join(self.__parameters)\n return \"(%s (%s*)(%s))\" % (self.__returntype, apientry, params)", "def compatible_firmware_version(self):\n identifier = self.firmware_version.split('compiled')[0]\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n res = self._dll.JLINKARM_GetEmbeddedFWString(identifier.encode(), buf, buf_size)\n if res < 0:\n raise errors.JLinkException(res)\n\n return ctypes.string_at(buf).decode()", "def get_jtype_string(c_type: str) -> str:\n m = STRING_PAT.match(c_type)\n if not m:\n return \"\"\n return \"jstring\"", "def get_parsed_declaration(self) -> str:\n args = self._get_arguments()\n\n func = self.node\n tu = func.tu\n\n # For functions the extent encompasses the return value, and the\n # location is the beginning of the functions name. So we can consume\n # all tokens in between.\n end = cindex.SourceLocation.from_offset(\n tu, func.location.file, func.location.offset - 1\n )\n extent = cindex.SourceRange.from_locations(func.extent.start, end)\n\n return_type = \" \".join(\n t.spelling for t in cindex.TokenGroup.get_tokens(tu, extent=extent)\n )\n\n return f\"{return_type} {func.spelling}({args})\"", "def _handle_string(\n *, artifacts: types.ColumnArtifacts\n) -> typing.Union[String, Binary, Date, DateTime]:\n if artifacts.open_api.format in {None, \"byte\", \"password\"}:\n if artifacts.open_api.max_length is None:\n return String\n return String(length=artifacts.open_api.max_length)\n if artifacts.open_api.format == \"binary\":\n if artifacts.open_api.max_length is None:\n return Binary\n return Binary(length=artifacts.open_api.max_length)\n if artifacts.open_api.format == \"date\":\n return Date\n if artifacts.open_api.format == \"date-time\":\n return DateTime\n raise exceptions.FeatureNotImplementedError(\n f\"{artifacts.open_api.format} format for string is not supported.\"\n )", "def parse_typename(typename):\n if typename is None:\n raise ValueError(\"function type must be provided\")\n idx = typename.rfind(\"/\")\n if idx < 0:\n raise ValueError(\"function type must be of the from namespace/name\")\n namespace = typename[:idx]\n if not namespace:\n raise ValueError(\"function type's namespace must not be empty\")\n type = typename[idx + 1:]\n if not type:\n raise ValueError(\"function type's name must not be empty\")\n return namespace, type", "def get_fun_name(line):\n match = re.match(r'(function|macro)\\s*\\((\\w+)', line)\n if not match:\n return\n return match.groups()[1]", "def get_data_name(data_func, data_type, npoints, y_error_sigma, x_error_sigma):\n data_name = '{}_{}'.format(data_func.__name__, data_type)\n if data_func.__name__ != 'get_image':\n data_name += 'funcs'\n data_name += '_{}pts_{}ye'.format(npoints, y_error_sigma)\n if x_error_sigma is not None:\n data_name += '_{}xe'.format(x_error_sigma)\n return data_name.replace('.', '_')", "def typeName (self, typecode):\n if typecode == qmf2.SCHEMA_DATA_VOID: return \"void\"\n elif typecode == qmf2.SCHEMA_DATA_BOOL: return \"bool\"\n elif typecode == qmf2.SCHEMA_DATA_INT: return \"int\"\n elif typecode == qmf2.SCHEMA_DATA_FLOAT: return \"float\"\n elif typecode == qmf2.SCHEMA_DATA_STRING: return \"string\"\n elif typecode == qmf2.SCHEMA_DATA_MAP: return \"map\"\n elif typecode == qmf2.SCHEMA_DATA_LIST: return \"list\"\n elif typecode == qmf2.SCHEMA_DATA_UUID: return \"uuid\"\n else:\n raise ValueError (\"Invalid type code: %s\" % str(typecode))", "def _get_shader_type_noint(varinfo):\n if varinfo[\"ndim\"] == 1:\n shader_type = \"float\"\n elif varinfo[\"ndim\"] >= 2:\n shader_type = \"vec%d\" % varinfo[\"ndim\"]\n # matrix: (2,2) or (3,3) or (4,4)\n elif type(varinfo[\"ndim\"]) == tuple:\n shader_type = \"mat%d\" % varinfo[\"ndim\"][0]\n return shader_type", "def GrabVariableInitializerType(line: str) -> (FClass.FiMVariableTypes,int):\n # Boolean\n if line.startswith( FGlobal.Methods['Variable Boolean Array'] ):\n length = len( _findFirst(FGlobal.Methods['Variable Boolean Array'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.BOOL_ARRAY, length\n if line.startswith( FGlobal.Methods['Variable Boolean'] ):\n length = len( _findFirst(FGlobal.Methods['Variable Boolean'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.BOOL, length\n # Number\n if line.startswith( FGlobal.Methods['Variable Number Array'] ):\n length = len( _findFirst(FGlobal.Methods['Variable Number Array'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.NUMBER_ARRAY, length\n if line.startswith( FGlobal.Methods['Variable Number'] ):\n length = len( _findFirst(FGlobal.Methods['Variable Number'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.NUMBER, length\n # String\n if line.startswith( FGlobal.Methods['Variable String Array'] ):\n length = len( _findFirst(FGlobal.Methods['Variable String Array'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.STRING_ARRAY, length\n if line.startswith( FGlobal.Methods['Variable String'] ):\n length = len( _findFirst(FGlobal.Methods['Variable String'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.STRING, length\n # Char\n if line.startswith( FGlobal.Methods['Variable Character'] ):\n length = len( _findFirst(FGlobal.Methods['Variable Character'], lambda x: line.startswith(x)) )\n return FClass.FiMVariableTypes.CHAR, length\n\n # Default value\n return FClass.FiMVariableTypes.UNKNOWN,-1", "def firmware_version(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", "def get_function_raw_name_at(self, address):\n pass", "def extract_function_name(maybe_function_str: str) -> Optional[str]:\n match = STACK_TRACE_LINE_RE.search(maybe_function_str)\n if match is not None:\n return match.group(2)\n return None", "def funcstring(funcname):\n s = str(funcname)[10:] #chop off '<function '\n spi = s.index(' ')\n return s[:spi]", "def config_gettype(function_name,\n config_name,\n param):\n config = configparser.ConfigParser()\n config.read(path_creator(config_name))\n #config.read(path_creator(config_name))\n if config[function_name][param].split(' ## ')[1] == 'str':\n return str(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'int':\n return int(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'float':\n return float(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'bool':\n return bool(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'path':\n return path_creator(str(config.get(function_name,param).split(' ## ')[0]))\n if config[function_name][param].split(' ## ')[1] == 'NoneType':\n return None", "def get_func_type(header):\n func_type = header.functionType\n if func_type == SSE.SCALAR:\n return FunctionType.Scalar\n elif func_type == SSE.AGGREGATION:\n return FunctionType.Aggregation\n elif func_type == SSE.TENSOR:\n return FunctionType.Tensor" ]
[ "0.5464446", "0.5418039", "0.53736573", "0.5311592", "0.53082407", "0.52509713", "0.52308387", "0.52233297", "0.5139834", "0.5137596", "0.5136655", "0.51061374", "0.50472605", "0.50168824", "0.5014381", "0.49955937", "0.4979121", "0.49771407", "0.49526176", "0.4947558", "0.4932718", "0.49298552", "0.4911541", "0.49067682", "0.49021164", "0.48970383", "0.48911554", "0.4874435", "0.48539236", "0.48426372" ]
0.69010454
0
Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first))
def dispatchMacroEnvFunction(self, tree, tree_parent): cpp_func_name = "getMacroProperty" py_func = tree.attr # extract type from function name py_type = py_func[len(cpp_func_name):] if py_type not in self._fgpu_types: self.RaiseError(tree, f"'{py_type}' is not a valid FLAME GPU type") # get cpp type t = self._fgpu_types[py_type] cpp_func_name += f"<{t}" # mess with the parent to extract (and remove arguments so they dont end up in the argument list) if not tree_parent.args : self.RaiseError(tree, f" Macro environment function '{py_func}' is expected to have some arguments.") # if more than one arg then the rest are bounds to translate if len(tree_parent.args) > 1: bounds = tree_parent.args[1:] # process bounds by appending to cpp function template arguments for i in bounds: if isinstance(i, ast.Num): # num required for python 3.7 if not isinstance(i.n, int): self.RaiseError(tree, f" Macro environment function argument '{i}' should be an integer value.") cpp_func_name += f", {i.n}" else: # all Python > 3.7 if not isinstance(i, ast.Constant): self.RaiseError(tree, f" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).") if not isinstance(i.value, int): self.RaiseError(tree, f" Macro environment function argument '{i}' should be an integer value.") cpp_func_name += f", {i.value}" # remove bounds from argument list (in place) del tree_parent.args[1:] cpp_func_name += ">" self.write(cpp_func_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_environment_string(self):\n pass", "def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment", "def getenv(device, variable_name):\n command = 'getenv \"%s\" \"%s\"' % (device.udid, variable_name)\n variable = _run_command(command)\n # The variable has an extra new line at the end, so remove it when returning\n return variable[:-1]", "def test_DDSim_getEnvScript_vars( self ):\n\n platform = \"Windows\"\n appname = \"ddsim\"\n appversion = \"Vista\"\n self.ddsim.ops.getOptionsDict = Mock( return_value = S_OK( dict(KNIGHTSWORD=\"Ni\",\n WHEN=\"Always\",\n G4LEDATA=\"/dev/camelot\",\n )\n )\n )\n res = self.ddsim.getEnvScript( platform, appname, appversion )\n self.assertEqual( res['Value'], os.path.abspath(\"DDSimEnv.sh\") )\n self.assertTrue( os.path.exists(os.path.abspath(\"DDSimEnv.sh\")) )\n with open(\"DDSimEnv.sh\") as script:\n scriptLines = \"\".join(script.readlines())\n self.assertIn( \"declare -x KNIGHTSWORD=Ni\", scriptLines )\n self.assertIn( \"declare -x WHEN=Always\", scriptLines )\n self.assertIn( \"declare -x G4LEDATA=/dev/camelot\", scriptLines )\n self.assertNotIn( \"declare -x G4LEDATA=$(ls -d $G4DATA/\", scriptLines )\n self.assertIn( \"declare -x G4LEVELGAMMADATA=$(ls -d $G4DATA/\", scriptLines )", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def _get_environmentdef():\n if 'environmentdef' not in env:\n abort(\"Environment needs to be configured\")\n\n environmentdef = env.environmentdef\n\n # If we're running via `fab`, we should restrict the environment\n # to the current host.\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n\n return environmentdef", "def getenv(space, var):\n e = os.environ.get(var)\n if e is None:\n return space.w_False\n return space.newstr(e)", "def get_os_env():\n env = os.environ\n# print(\"env \\n\" , env)\n return env", "def substitute_macros(text):\n f_text = text\n for (pattern,replacement) in context.environment.items():\n replacement = replacement.replace(os.path.sep,'/')\n f_text = f_text.replace('$(%s)' % pattern.upper(), replacement)\n return f_text", "def _GetEnvironmentVars(benchmark_spec):\n return ' '.join([\n 'NUM_GPUS=%s' % benchmark_spec.total_gpus,\n 'OMP_NUM_THREADS=%s' % benchmark_spec.cpus_per_rank\n ])", "def _env_switch(environment: str, prod_value: T, qa_value: T) -> T:\n if environment == PROD:\n return prod_value\n return qa_value", "def _readenv(name, ctor, default):\n value = os.environ.get(name)\n if value is None:\n return default() if callable(default) else default\n try:\n return ctor(value)\n except Exception:\n import warnings\n\n warnings.warn(\n \"environ %s defined but failed to parse '%s'\" % (name, value),\n RuntimeWarning,\n )\n return default", "def collect_env():\n env_info = mmengine_collect_env()\n\n # MMEngine does not add the hipcc compiler information when collecting\n # environment information, so it is added here. When MMEngine v0.3.0 is\n # released, the code here can be removed.\n cuda_available = torch.cuda.is_available()\n if cuda_available and env_info.get('NVCC') == 'Not Available':\n CUDA_HOME = env_info['CUDA_HOME']\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n if CUDA_HOME == '/opt/rocm':\n try:\n nvcc = osp.join(CUDA_HOME, 'hip/bin/hipcc')\n nvcc = subprocess.check_output(\n f'\"{nvcc}\" --version', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('HIP version:')\n build = nvcc.rfind('')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n else:\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n\n env_info['MMCV'] = mmcv.__version__\n\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n\n return env_info", "def parse(env, platform_name=None):\n\n platform_name = platform_name or PLATFORM\n\n result = {}\n for variable, value in env.items():\n\n # Platform specific values\n if isinstance(value, dict):\n value = value.get(platform_name, \"\")\n\n if not value:\n continue\n\n # Allow to have lists as values in the tool data\n if isinstance(value, (list, tuple)):\n value = \";\".join(value)\n\n result[variable] = value\n\n return result", "def getEnvironment(self):\n pass", "def _get_interpreter_values(token='ZEPPELIN_INTERPRETER') -> dict:\n env_variables = {}\n for env_variable in os.environ:\n if env_variable.startswith(token):\n env_variables[env_variable] = os.environ[env_variable]\n\n LOG.info('%s-based token environment variables: %s', token, env_variables)\n\n return env_variables", "def _default_getter(environ, metadata, prefix, name):\n ce = metadata[CNF_KEY]\n var = ce.name if ce.name is not None else \"_\".join((*prefix, name)).upper()\n log.debug(\"looking for env var '%s'.\", var)\n try:\n return environ[var]\n except KeyError:\n raise MissingEnvValueError(var) from None", "def convert_macro(macro):\n convert_dict = {\n 'CONTACTNAME' : 'user.name',\n 'CONTACTALIAS' : 'user.display_name',\n 'CONTACTEMAIL' : 'user.email',\n 'CONTACTPAGER' : 'user.pager',\n 'SERVICEDESC' : 'service.name',\n 'SERVICEDISPLAYNAME' : 'service.display_name',\n 'SERVICECHECKCOMMAND' : 'service.check_command',\n 'SERVICESTATE' : 'service.state',\n 'SERVICESTATEID' : 'service.state_id',\n 'SERVICESTATETYPE' : 'service.state_type',\n 'SERVICEATTEMPT' : 'service.check_attempt',\n 'MAXSERVICEATTEMPT' : 'service.max_check_attempts',\n 'LASTSERVICESTATE' : 'service.last_state',\n 'LASTSERVICESTATEID' : 'service.last_state_id',\n 'LASTSERVICESTATETYPE' : 'service.last_state_type',\n 'LASTSERVICESTATECHANGE' : 'service.last_state_change',\n 'SERVICEDOWNTIME' : 'service.downtime_depth',\n 'SERVICEDURATIONSEC' : 'service.duration_sec',\n 'SERVICELATENCY' : 'service.latency',\n 'SERVICEEXECUTIONTIME' : 'service.execution_time',\n 'SERVICEOUTPUT' : 'service.output',\n 'SERVICEPERFDATA' : 'service.perfdata',\n 'LASTSERVICECHECK' : 'service.last_check',\n 'SERVICENOTES' : 'service.notes',\n 'SERVICENOTESURL' : 'service.notes_url',\n 'SERVICEACTIONURL' : 'service.action_url',\n 'HOSTNAME' : 'host.name',\n 'HOSTADDRESS' : 'host.address',\n 'HOSTADDRESS6' : 'host.address6',\n 'HOSTDISPLAYNAME' : 'host.display_name',\n 'HOSTALIAS' : 'host.display_name',\n 'HOSTCHECKCOMMAND' : 'host.check_command',\n 'HOSTSTATE' : 'host.state',\n 'HOSTSTATEID' : 'host.state_id',\n 'HOSTSTATETYPE' : 'host.state_type',\n 'HOSTATTEMPT' : 'host.check_attempt',\n 'MAXHOSTATTEMPT' : 'host.max_check_attempts',\n 'LASTHOSTSTATE' : 'host.last_state',\n 'LASTHOSTSTATEID' : 'host.last_state_id',\n 'LASTHOSTSTATETYPE' : 'host.last_state_type',\n 'LASTHOSTSTATECHANGE' : 'host.last_state_change',\n 'HOSTDOWNTIME' : 'host.downtime_depth',\n 'HOSTDURATIONSEC' : 'host.duration_sec',\n 'HOSTLATENCY' : 'host.latency',\n 'HOSTEXECUTIONTIME' : 'host.execution_time',\n 'HOSTOUTPUT' : 'host.output',\n 'HOSTPERFDATA' : 'host.perfdata',\n 'LASTHOSTCHECK' : 'host.last_check',\n 'HOSTNOTES' : 'host.notes',\n 'HOSTNOTESURL' : 'host.notes_url',\n 'HOSTACTIONURL' : 'host.action_url',\n 'TOTALSERVICES' : 'host.num_services',\n 'TOTALSERVICESOK' : 'host.num_services_ok',\n 'TOTALSERVICESWARNING' : 'host.num_services_warning',\n 'TOTALSERVICESUNKNOWN' : 'host.num_services_unknown',\n 'TOTALSERVICESCRITICAL' : 'host.num_services_critical',\n 'COMMANDNAME' : 'command.name',\n 'NOTIFICATIONTYPE' : 'notification.type',\n 'NOTIFICATIONAUTHOR' : 'notification.author',\n 'NOTIFICATIONCOMMENT' : 'notification.comment',\n 'NOTIFICATIONAUTHORNAME' : 'notification.author',\n 'NOTIFICATIONAUTHORALIAS' : 'notification.author',\n 'TIMET' : 'icinga.timet',\n 'LONGDATETIME' : 'icinga.long_date_time',\n 'SHORTDATETIME' : 'icinga.short_date_time',\n 'DATE' : 'icinga.date',\n 'TIME' : 'icinga.time',\n 'PROCESSSTARTTIME' : 'icinga.uptime',\n 'TOTALHOSTSUP' : 'icinga.num_hosts_up',\n 'TOTALHOSTSDOWN' : 'icinga.num_hosts_down',\n 'TOTALHOSTSUNREACHABLE' : 'icinga.num_hosts_unreachable',\n 'TOTALHOSTSDOWNUNHANDLED' : '-',\n 'TOTALHOSTSUNREACHABLEUNHANDLED' : '-',\n 'TOTALHOSTPROBLEMS' : 'down',\n 'TOTALHOSTPROBLEMSUNHANDLED' : 'down-(downtime+acknowledged)',\n 'TOTALSERVICESOK' : 'icinga.num_services_ok',\n 'TOTALSERVICESWARNING' : 'icinga.num_services_warning',\n 'TOTALSERVICESCRITICAL' : 'icinga.num_services_critical',\n 'TOTALSERVICESUNKNOWN' : 'icinga.num_services_unknown',\n 'TOTALSERVICESWARNINGUNHANDLED' : '-',\n 'TOTALSERVICESCRITICALUNHANDLED' : '-',\n 'TOTALSERVICESUNKNOWNUNHANDLED' : '-',\n 'TOTALSERVICEPROBLEMS' : 'ok+warning+critical+unknown',\n 'TOTALSERVICEPROBLEMSUNHANDLED' : 'warning+critical+unknown-(downtime+acknowledged)',\n 'CHANGE_CUSTOM_CONTACT_VAR' : 'CHANGE_CUSTOM_USER_VAR',\n }\n\n #The following external commands are not supported:\n unsupported = [\n 'CHANGE_*MODATTR',\n 'CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD',\n 'CHANGE_HOST_NOTIFICATION_TIMEPERIOD',\n 'CHANGE_SVC_NOTIFICATION_TIMEPERIOD',\n 'DEL_DOWNTIME_BY_HOSTGROUP_NAME',\n 'DEL_DOWNTIME_BY_START_TIME_COMMENT',\n 'DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST',\n 'DISABLE_CONTACT_HOST_NOTIFICATIONS',\n 'DISABLE_CONTACT_SVC_NOTIFICATIONS',\n 'DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS',\n 'DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS',\n 'DISABLE_FAILURE_PREDICTION',\n 'DISABLE_HOST_AND_CHILD_NOTIFICATIONS',\n 'DISABLE_HOST_FRESHNESS_CHECKS',\n 'DISABLE_NOTIFICATIONS_EXPIRE_TIME',\n 'DISABLE_SERVICE_FRESHNESS_CHECKS',\n 'ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST',\n 'ENABLE_CONTACT_HOST_NOTIFICATIONS',\n 'ENABLE_CONTACT_SVC_NOTIFICATIONS',\n 'ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS',\n 'ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS',\n 'ENABLE_FAILURE_PREDICTION',\n 'ENABLE_HOST_AND_CHILD_NOTIFICATIONS',\n 'ENABLE_HOST_FRESHNESS_CHECKS',\n 'ENABLE_SERVICE_FRESHNESS_CHECKS',\n 'READ_STATE_INFORMATION',\n 'SAVE_STATE_INFORMATION',\n 'SET_HOST_NOTIFICATION_NUMBER',\n 'SET_SVC_NOTIFICATION_NUMBER',\n 'START_ACCEPTING_PASSIVE_HOST_CHECKS',\n 'START_ACCEPTING_PASSIVE_SVC_CHECKS',\n 'START_OBSESSING_OVER_HOST',\n 'START_OBSESSING_OVER_HOST_CHECKS',\n 'START_OBSESSING_OVER_SVC',\n 'START_OBSESSING_OVER_SVC_CHECKS',\n 'STOP_ACCEPTING_PASSIVE_HOST_CHECKS',\n 'STOP_ACCEPTING_PASSIVE_SVC_CHECKS',\n 'STOP_OBSESSING_OVER_HOST',\n 'STOP_OBSESSING_OVER_HOST_CHECKS',\n 'STOP_OBSESSING_OVER_SVC',\n 'STOP_OBSESSING_OVER_SVC_CHECKS',\n]\n # Strip the $$\n sane_macro = macro.translate(None, '$')\n #debug(sane_macro)\n # Return true when it's a global parameter\n if sane_macro in convert_dict:\n return False, '$' + convert_dict[sane_macro] + '$'\n elif sane_macro in unsupported:\n return False, 'UNSUPPORTED'\n elif not sane_macro in ['ARG1','ARG2','ARG3','ARG4','ARG5','ARG6']:\n return True, sane_macro.title()\n else:\n return False, ''", "def _setenv(self):\n tokens = {}\n tokens[\"CT_TIMESTAMP\"] = self._timestamp\n tokens[\"CT_SUBMITTER\"] = self._node.name()\n # tokens[\"CT_HIPBASE\"] = self._file[\"hipbase\"]\n tokens[\"CT_SCENE\"] = self._scene\n tokens[\"CT_PROJECT\"] = self.project_name\n\n for token in tokens:\n hou.putenv(token, tokens[token])\n\n return tokens", "def get_environment(self):\r\n return self.mcas[0].get_environment()", "def MakeEnvironment(self, context):\n\n # Start with any environment variables that are already present\n # in the environment.\n environment = os.environ.copy()\n # Copy context variables into the environment.\n for key, value in context.items():\n name = \"QMV_\" + key\n environment[name] = value\n # Extract additional environment variable assignments from the\n # 'Environment' field.\n for assignment in self.environment:\n if \"=\" in assignment:\n # Break the assignment at the first equals sign.\n variable, value = string.split(assignment, \"=\", 1)\n environment[variable] = value\n else:\n raise ValueError, \\\n qm.error(\"invalid environment assignment\",\n assignment=assignment)\n return environment", "def MakeEnvironment(self, context):\n\n # Start with any environment variables that are already present\n # in the environment.\n environment = os.environ.copy()\n # Copy context variables into the environment.\n for key, value in context.items():\n name = \"QMV_\" + key\n environment[name] = value\n return environment", "def get(self):\n if not self.__name in g_platform_variables:\n raise RuntimeError(\"unknown platform variable '%s'\" % (self.__name))\n current_var = g_platform_variables[self.__name]\n combinations = get_platform_combinations()\n for ii in combinations:\n if ii in current_var:\n return current_var[ii]\n raise RuntimeError(\"current platform %s not supported for variable '%s'\" % (str(combinations), self.__name))", "def evaluateMacro(compiled_expression):", "def constructor_env_variables(loader, node):\n value = loader.construct_scalar(node)\n match = pattern.findall(value) # to find all env variables in line\n if match:\n full_value = value\n for g in match:\n full_value = full_value.replace(\n f'${{{g}}}', os.environ.get(g, g)\n )\n return full_value\n return value", "def handle_software_environment(cwl_env: Dict[str, str], script: str) -> Dict[str, str]:\n exec_env = cwl_env.copy()\n exec_env[\"_CWLTOOL\"] = \"1\"\n res = subprocess.run([\"bash\", script], shell=False, env=exec_env) # nosec\n if res.returncode != 0:\n sys.stderr.write(\"Error while using SoftwareRequirements to modify environment\\n\")\n return cwl_env\n\n env = cwl_env.copy()\n with open(\"output_environment.dat\") as _:\n data = _.read().strip(\"\\0\")\n for line in data.split(\"\\0\"):\n key, val = line.split(\"=\", 1)\n if key in (\"_\", \"PWD\", \"SHLVL\", \"TMPDIR\", \"HOME\", \"_CWLTOOL\"):\n # Skip some variables that are meaningful to the shell or set\n # specifically by the CWL runtime environment.\n continue\n env[key] = val\n return env", "def testGetPortageEnvVar(self):\n stage = self.ConstructStage()\n board = self._current_board\n\n envvar = 'EXAMPLE'\n rc_mock = self.StartPatcher(cros_build_lib_unittest.RunCommandMock())\n rc_mock.AddCmdResult(['portageq-%s' % board, 'envvar', envvar],\n output='RESULT\\n')\n\n result = stage._GetPortageEnvVar(envvar, board)\n self.assertEqual(result, 'RESULT')", "def retrieve_environment(env_config, filter_config):\n global path, filter_list_from_config\n\n arena_data_server = acm.FDhDatabase[\"ADM\"].ADSNameAndPort().upper()\n configuration = acm.GetDefaultValueFromName(\n acm.GetDefaultContext(), acm.FObject, env_config)\n\n filter_list_from_config = acm.GetDefaultValueFromName(\n acm.GetDefaultContext(), acm.FList, filter_config).split(';')\n\n dom_xml = xml.parseString(configuration)\n tags = dom_xml.getElementsByTagName(\"Host\")\n for element in tags: \n if element.getAttribute(\"Name\") == arena_data_server:\n path = element.getElementsByTagName(\n \"output_path\")[0].childNodes[0].data\n print(\" path found: \", path)", "def test_DDSim_getEnvScript_noVars( self ):\n\n platform = \"Windows\"\n appname = \"ddsim\"\n appversion = \"Vista\"\n self.ddsim.ops.getOptionsDict = Mock( return_value = S_ERROR( \"No Variables Set\" ) )\n self.ddsim.ops.getValue = Mock( return_value = None )\n res = self.ddsim.getEnvScript( platform, appname, appversion )\n self.ddsim.ops.getValue.assert_called_once_with(\"/AvailableTarBalls/Windows/ddsim/Vista/InitScript\", None)\n self.assertEqual( res['Value'], os.path.abspath(\"DDSimEnv.sh\") )\n self.assertTrue( os.path.exists(os.path.abspath(\"DDSimEnv.sh\")) )\n with open(\"DDSimEnv.sh\") as script:\n scriptLines = \"\".join(script.readlines())\n self.assertIn( \"declare -x G4LEDATA=$(ls -d $G4DATA/\", scriptLines )\n self.assertNotIn( \"source \", scriptLines )", "def _Get_Env_Key(self, line0):\n #line0 = chunk[0]\n #code = '\\n'.join(chunk)\n q2 = self.p2.match(line0)\n if q2:\n envkey = q2.group(1)\n else:\n envkey = 'body'\n #codelist = code.split('\\n')\n return envkey#, code" ]
[ "0.58264863", "0.5568216", "0.53334945", "0.5327967", "0.51543343", "0.51488876", "0.51430243", "0.5069374", "0.50373465", "0.5026348", "0.50191504", "0.5002579", "0.49759382", "0.49720997", "0.49513227", "0.49360543", "0.4913783", "0.49026003", "0.48999316", "0.4882077", "0.48813444", "0.4872327", "0.4866534", "0.48442066", "0.48220772", "0.4811985", "0.48090705", "0.48069423", "0.47924456", "0.47458562" ]
0.61382043
0
Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp.
def dispatchFGPUDeviceFunctionArgs(self, tree): # reset the locals variable stack self._locals = ["pyflamegpu"] # input message first = True annotation = None for arg in tree.args.args: # ensure that there is a type annotation if not arg.annotation: self.RaiseError(arg, "Device function argument requires type annotation") # comma if not first if not first: self.write(", ") self.dispatchType(arg.annotation) self.write(f" {arg.arg}") # add arg to local variable stack self._locals.append(arg.arg) first = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):\n cpp_func_name = \"\"\n py_func = tree.attr\n # extract function name start\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break # dont allow the else\n else:\n return None\n # check type and lengths\n if allow_lengths:\n #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) \n type_and_length = py_func.split(\"Array\")\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree, f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n # generate template args\n if (len(type_and_length) == 1):\n cpp_func_name += f\"<{t}>\"\n elif (len(type_and_length) == 2):\n cpp_func_name += f\"<{t}, {type_and_length[1]}>\"\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f\"<{t}>\"\n # return \n return cpp_func_name", "def _FunctionDef(self, t):\n self.write(\"\\n\")\n # check decorators\n if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):\n self.RaiseError(t, \"Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\") \n # FLAMEGPU_AGENT_FUNCTION\n if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n if getattr(t, \"returns\", False):\n self.RaiseWarning(t, \"Function definition return type not supported on 'pyflamegpu.agent_function'\")\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION({t.name}, \")\n self.dispatchFGPUFunctionArgs(t)\n self.write(\")\")\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n self.fill(f\"FLAMEGPU_DEVICE_FUNCTION \")\n if t.returns:\n self.dispatchType(t.returns)\n else:\n self.write(\"void\")\n self.write(f\" {t.name}(\")\n self.dispatchFGPUDeviceFunctionArgs(t)\n self.write(\")\")\n # add to list of defined functions that can be called\n self._device_functions.append(t.name)\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu':\n # check for return annotation\n if not hasattr(t, \"returns\"):\n self.RaiseError(t, \"Agent function conditions must have a 'bool' return type specified as a return type annotation\")\n # check for return annotation type\n if not isinstance(t.returns, ast.Name):\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n if t.returns.id is not 'bool':\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n # check to ensure no arguments (discard any with a warning)\n if t.args.args:\n self.RaiseWarning(t, \"Agent function conditions does not support arguments. These will be discarded.\")\n # write the agent function macro\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})\")\n else:\n self.RaiseError(t, \"Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\")\n self.enter()\n self.dispatch(t.body)\n self.leave()", "def inner(*args, **kwargs):\n\n s = f(*args, **kwargs)\n\n if torch.cuda.is_available():\n return torch.from_numpy(s).cuda().type(dtype)\n\n return torch.from_numpy(s).type(dtype)", "def inner(*args, **kwargs):\n\n s, r, d, p = f(*args, **kwargs)\n\n if torch.cuda.is_available():\n s = torch.from_numpy(s).cuda().type(dtype)\n\n else:\n s = torch.from_numpy(s).type(dtype)\n\n return s, r, d, p", "def set_arg_types( self ):\n if self.mode == 'grad':\n self.function = terms.dw_grad\n use_method_with_name( self, self.get_fargs_grad, 'get_fargs' )\n elif self.mode == 'div':\n self.function = terms.dw_div\n use_method_with_name( self, self.get_fargs_div, 'get_fargs' )\n else:\n self.function = self.d_eval\n use_method_with_name( self, self.get_fargs_eval, 'get_fargs' )\n self.use_caches = {'state_in_volume_qp' : [['parameter_s']],\n 'div_vector' : [['parameter_v']]}", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def __init__(self, *args, **kwargs):\n super(MadryEtAlMultiGPU, self).__init__(*args, **kwargs)\n self.structural_kwargs += ['ngpu']", "def _handle_arg(obj, arg):\n if isinstance(arg, PythonTensor):\n if arg.has_init:\n arg.init_data()\n if not arg.const_arg:\n return arg\n elif isinstance(arg, (Tensor, CSRTensor, COOTensor)):\n return arg\n elif hasattr(arg, \"__ms_mutable__\") and getattr(arg, \"__ms_mutable__\"):\n # mutable([]) will be eliminated by FuncGraphSpecializer, and empty list is not supported by backend.\n if isinstance(arg, list) and not arg:\n return None\n return arg\n elif context.get_context(\"grad_for_scalar\") and isinstance(arg, (int, float)):\n return arg\n elif hasattr(obj, \"enable_tuple_broaden\") and obj.enable_tuple_broaden and isinstance(arg, tuple) and \\\n _check_all_tensor(arg):\n return arg\n return None", "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)", "def get_helper_c_code_args(self):\r\n return {'c_prefix': 'PyGpuArray',\r\n 'strides_mul': 1\r\n }", "def apply(op_type, device, inputs, **kwargs):\n cache = ExecutionCache.get_cache(op_type)\n run_config = cache.get_config(device, **kwargs)\n return FunctionLib._forward(inputs, run_config, **kwargs)", "def fn(*args, **kwargs):\n pass", "def function(args):\n pass", "def handle_args():\n parser = argparse.ArgumentParser(description=\"Faster-RCNN Implementation\")\n parser.add_argument(\"-handle-gpu\", action=\"store_true\", help=\"Tensorflow 2 GPU compatibility flag\")\n args = parser.parse_args()\n return args", "def parse_params(self, ngpu=1, **kwargs):\n\n return_status = super(MadryEtAlMultiGPU, self).parse_params(**kwargs)\n self.ngpu = ngpu\n\n return return_status", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def handle_args(args):\n\n if args.id and len(args.id) > MAX_ID_LENGTH:\n raise ValueError\n if args.payload and (len(args.payload) % 2 != 0 or len(args.payload) > MAX_PAYLOAD_LENGTH):\n raise ValueError\n\n if args.id_bitmap:\n if len(args.id_bitmap) > MAX_ID_LENGTH:\n raise ValueError\n for i in range(len(args.id_bitmap)):\n args.id_bitmap[i] = string_to_bool(args.id_bitmap[i])\n if args.payload_bitmap:\n if len(args.payload_bitmap) > MAX_PAYLOAD_LENGTH:\n raise ValueError\n for i in range(len(args.payload_bitmap)):\n args.payload_bitmap[i] = string_to_bool(args.payload_bitmap[i])\n\n if args.alg == \"random\":\n __handle_random(args)\n elif args.alg == \"linear\":\n __handle_linear(args)\n elif args.alg == \"replay\":\n __handle_replay(args)\n elif args.alg == \"ring_bf\":\n __handle_ring_bf(args)\n elif args.alg == \"mutate\":\n __handle_mutate(args)\n else:\n raise ValueError", "def call_function(self):\n try:\n arg_list = self.argument_list()\n function_dict = {}\n info = []\n for name_arg in arg_list:\n type_arg = self.arguments_type[name_arg]\n function_dict[name_arg] = utils.value_from_rpc(self.argument(name_arg)[1])\n info.append('{0}({1}): {2}'.format(name_arg, type_arg, function_dict[name_arg]))\n\n log.info('Execute command \\'{0}\\' with arguments [{1}] from device \\'{2}\\''\n .format(self.name(), '; '.join(info), self.device.id))\n self.function(self.device, **function_dict)\n\n except Exception as err:\n t = traceback.format_exc()\n log.error('Command \\'{0}\\' raise exception: {1}'.format(self.name(), decode_string(t)))", "def __call__(self, *args, backend=\"auto\", device_id=-1, ranges=None, out=None):\n\n dtype = args[0].dtype.__str__().split(\".\")[1]\n\n nx, ny = get_sizes(self.aliases, *args)\n nout, nred = (nx, ny) if self.axis == 1 else (ny, nx)\n\n if \"Arg\" in self.reduction_op:\n # when using Arg type reductions,\n # if nred is greater than 16 millions and dtype=float32, the result is not reliable\n # because we encode indices as floats, so we raise an exception ;\n # same with float16 type and nred>2048\n if nred > 1.6e7 and dtype in (\"float32\", \"float\"):\n raise ValueError(\n \"size of input array is too large for Arg type reduction with single precision. Use double precision.\"\n )\n elif nred > 2048 and dtype in (\"float16\", \"half\"):\n raise ValueError(\n \"size of input array is too large for Arg type reduction with float16 dtype..\"\n )\n\n out = GenredAutograd.apply(\n self.formula,\n self.aliases,\n backend,\n dtype,\n device_id,\n ranges,\n self.optional_flags,\n self.rec_multVar_highdim,\n nx,\n ny,\n out,\n *args\n )\n\n return postprocess(out, \"torch\", self.reduction_op, nout, self.opt_arg, dtype)", "def handle_input(self, data, feature_names, feature_types):\n interface = data.__cuda_array_interface__\n if 'mask' in interface:\n interface['mask'] = interface['mask'].__cuda_array_interface__\n interface_str = bytes(json.dumps(interface, indent=2), 'utf-8')\n\n handle = ctypes.c_void_p()\n _check_call(\n _LIB.XGDMatrixCreateFromArrayInterface(\n interface_str,\n ctypes.c_float(self.missing),\n ctypes.c_int(self.nthread),\n ctypes.byref(handle)))\n return handle, feature_names, feature_types", "def compile_function(self, function, arguments):", "def _set_function_parameters(self, p_args=None) -> bool:\n if self.get_type() == self.C_UNIT_CONV_LENGTH:\n self.units = {\n 'fm' : 1000000000000000,\n 'pm' : 1000000000000,\n 'nm' : 1000000000,\n 'um' : 1000000,\n 'mm' : 1000,\n 'cm' : 100,\n 'm' : 1.0,\n 'dam' : 0.1,\n 'hm' : 0.01,\n 'km' : 0.001,\n 'Mm' : 0.000001,\n 'Gm' : 0.000000001,\n 'Tm' : 0.000000000001,\n 'Pm' : 0.000000000000001,\n 'inch' : 39.3701,\n 'ft' : 3.28084,\n 'yd' : 1.09361,\n 'mi' : 0.000621371,\n 'nautMi' : 1.0/1852.0,\n 'lightYear' : 1.0/(9.4607304725808*(10**15))\n }\n \n elif self.get_type() == self.C_UNIT_CONV_PRESSURE:\n self.units = {\n 'Pa' : 100000.0,\n 'hPa' : 1000.0,\n 'kPa' : 100.0,\n 'MPa' : 0.1,\n 'bar' : 1.0,\n 'mbar' : 1000.0,\n 'ubar' : 1000000.0,\n 'kgcm2' : 1.01972,\n 'atm' : 0.986923,\n 'mmHg' : 750.062,\n 'mmH2O' : 10197.162129779,\n 'mH2O' : 10.197162129779,\n 'ftH2O' : 33.455256555148,\n 'inH2O' : 401.865,\n 'inHg' : 29.53,\n 'psi' : 14.5038\n }\n \n elif self.get_type() == self.C_UNIT_CONV_CURRENT:\n self.units = {\n 'fA' : 1000000000000000,\n 'pA' : 1000000000000,\n 'nA' : 1000000000,\n 'uA' : 1000000,\n 'mA' : 1000,\n 'cA' : 100,\n 'A' : 1.0,\n 'daA' : 0.1,\n 'hA' : 0.01,\n 'kA' : 0.001,\n 'MA' : 0.000001,\n 'GA' : 0.000000001,\n 'TA' : 0.000000000001,\n 'PA' : 0.000000000000001,\n }\n \n elif self.get_type() == self.C_UNIT_CONV_FORCE:\n self.units = {\n 'fN' : 1000000000000000,\n 'pN' : 1000000000000,\n 'nN' : 1000000000,\n 'uN' : 1000000,\n 'mN' : 1000,\n 'cN' : 100,\n 'N' : 1.0,\n 'daN' : 0.1,\n 'hN' : 0.01,\n 'kN' : 0.001,\n 'MN' : 0.000001,\n 'GN' : 0.000000001,\n 'TN' : 0.000000000001,\n 'PN' : 0.000000000000001,\n 'shortTonF' : 1.124045e-4,\n 'longTonF' : 1.003611e-4,\n 'kipf' : 2.248089e-4,\n 'lbf' : 2.248089431e-1,\n 'ozf' : 3.5969430896,\n 'pdf' : 7.2330138512,\n 'gf' : 1.019716213e+2,\n 'kgf' : 1.019716213e-1,\n 'dyn' : 1e+5,\n 'J/m' : 1.0,\n 'J/cm' : 100.0\n }\n \n elif self.get_type() == self.C_UNIT_CONV_POWER:\n self.units = {\n 'fW' : 1000000000000000*1e3,\n 'pW' : 1000000000000*1e3,\n 'nW' : 1000000000*1e3,\n 'uW' : 1000000*1e3,\n 'mW' : 1000*1e3,\n 'cW' : 100*1e3,\n 'W' : 1.0*1e3,\n 'daW' : 0.1*1e3,\n 'hW' : 0.01*1e3,\n 'kW' : 0.001*1e3,\n 'MW' : 0.000001*1e3,\n 'GW' : 0.000000001*1e3,\n 'TW' : 0.000000000001*1e3,\n 'PW' : 0.000000000000001*1e3,\n 'BTU/hr' : 3412.14,\n 'BTU/min' : 56.869,\n 'BTU/sec' : 0.94781666666,\n 'cal/sec' : 238.85,\n 'cal/min' : 238.85*60,\n 'cal/hr' : 238.85*60*60,\n 'erg/sec' : 10e9,\n 'erg/min' : 10e9*60,\n 'erg/hr' : 10e9*60*60,\n 'ftlb/sec' : 737.56,\n 'kCal/sec' : 0.24,\n 'kCal/min' : 0.24*60,\n 'kCal/hr' : 0.24*60*60,\n 'VA' : 1e3,\n 'metric_ton_ref' : 0.259,\n 'US_ton_ref' : 0.2843451361,\n 'J/sec' : 1000.0,\n 'J/min' : 1000.0*60,\n 'J/hr' : 1000.0*60*60,\n 'kgf-m/sec' : 101.97162129779,\n 'hp_mech' : 1.3410220888,\n 'hp_ele' : 1.3404825737,\n 'hp_metric' : 1.359621617304\n }\n \n elif self.get_type() == self.C_UNIT_CONV_MASS:\n self.units = {\n 'fg' : 1000000000000000*1e3,\n 'pg' : 1000000000000*1e3,\n 'ng' : 1000000000*1e3,\n 'ug' : 1000000*1e3,\n 'mg' : 1000*1e3,\n 'cg' : 100*1e3,\n 'g' : 1.0*1e3,\n 'dag' : 0.1*1e3,\n 'hg' : 0.01*1e3,\n 'kg' : 0.001*1e3,\n 'Mg' : 0.000001*1e3,\n 'Gg' : 0.000000001*1e3,\n 'Tg' : 0.000000000001*1e3,\n 'Pg' : 0.000000000000001*1e3,\n 'metricTon' : 1.0/1000.0,\n 'shortTon' : 1.0/907.185,\n 'longTon' : 1.0/1016.047,\n 'slug' : 1.0/14.5939029,\n 'lb' : 2.2046226218,\n 'oz' : 35.274,\n 'grain' : 2.2046226218*7000.0\n }\n \n elif self.get_type() == self.C_UNIT_CONV_TIME:\n self.units = {\n 'fs' : 1000000000000000,\n 'ps' : 1000000000000,\n 'ns' : 1000000000,\n 'us' : 1000000,\n 'ms' : 1000,\n 'cs' : 100,\n 's' : 1.0,\n 'das' : 0.1,\n 'hs' : 0.01,\n 'ks' : 0.001,\n 'Ms' : 0.000001,\n 'Gs' : 0.000000001,\n 'Ts' : 0.000000000001,\n 'Ps' : 0.000000000000001,\n 'min' : 1.0/60.0,\n 'hr' : 1.0/60.0/60.0,\n 'day' : 1.0/60.0/60.0/24.0\n }\n \n elif self.get_type() == self.C_UNIT_CONV_TEMPERATURE:\n self.units = {\n 'K' : 'Kelvin',\n 'R' : 'Rankine',\n 'F' : 'Fahrenheit',\n 'C' : 'Celcius',\n }\n \n if self.units.get(self._unit_in) is not None and self.units.get(self._unit_out) is not None:\n return True\n else:\n raise NotImplementedError('The input and/or output units do not exist!')", "def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation\n return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)", "def ubercam(*args, **kwargs)->AnyStr:\n pass", "def __call__(self, *args, **kwargs):\n dprint(2, \"FunctionMetadata::__call__\", self.func.__name__, args, kwargs, self.numba_args)\n atypes = tuple([type(x) for x in args])\n try_again = True\n count = 0\n if not self.numba_pfunc:\n if len(self.numba_args) == 0 and not self.no_global_cache:\n self.numba_pfunc = get_fm(FillerFunc(self.func), True)\n self.numba_func = get_fm(FillerFunc(self.func), False)\n else:\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n\n if gpu_present:\n dprint(1, \"using gpu context\")\n\n with dpctl.device_context(\"level0:gpu\"):\n while try_again and count < 2:\n count += 1\n try_again = False\n if self.ngfunc.get(atypes, True):\n try:\n ret = self.numba_pfunc(*args, **kwargs)\n self.ngfunc[atypes] = True\n return ret\n except numba.core.errors.TypingError as te:\n tetxt = str(te)\n tesplit = tetxt.splitlines()\n for teline in tesplit:\n if \"Untyped global name\" in teline and \"ramba.StencilMetadata\" in teline:\n try_again = True\n # Name of global that is of type ramba.StencilMetadata\n tes = teline[21:].split()[0][:-2]\n outer_globals = self.func.__globals__\n outer_locals = {}\n etes = eval(tes, outer_globals, outer_locals)\n etes.compile() # Converts to a Numba StencilFunc\n outer_globals[tes] = etes.sfunc # Rewrite the global to the Numba StencilFunc\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n if not try_again:\n self.ngfunc[atypes] = False\n dprint(1, \"Numba GPU ParallelAccelerator attempt failed.\")\n except:\n self.ngfunc[atypes] = False\n dprint(1, \"Numba GPU ParallelAccelerator attempt failed.\")\n\n while try_again and count < 2:\n count += 1\n try_again = False\n if self.npfunc.get(atypes, True):\n try:\n ret = self.numba_pfunc(*args, **kwargs)\n self.npfunc[atypes] = True\n return ret\n except numba.core.errors.TypingError as te:\n tetxt = str(te)\n tesplit = tetxt.splitlines()\n for teline in tesplit:\n if \"Untyped global name\" in teline and \"ramba.StencilMetadata\" in teline:\n try_again = True\n # Name of global that is of type ramba.StencilMetadata\n tes = teline[21:].split()[0][:-2]\n outer_globals = self.func.__globals__\n outer_locals = {}\n etes = eval(tes, outer_globals, outer_locals)\n etes.compile() # Converts to a Numba StencilFunc\n outer_globals[tes] = etes.sfunc # Rewrite the global to the Numba StencilFunc\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n if not try_again:\n self.npfunc[atypes] = False\n dprint(1, \"Numba ParallelAccelerator attempt failed.\")\n except:\n self.npfunc[atypes] = False\n dprint(1, \"Numba ParallelAccelerator attempt failed.\")\n\n if self.nfunc.get(atypes, True):\n try:\n ret = self.numba_func(*args, **kwargs)\n self.nfunc[atypes] = True\n dprint(3, \"Numba attempt succeeded.\")\n return ret\n except numba.core.errors.TypingError as te:\n print(\"Ramba TypingError:\", te, type(te))\n self.npfunc[atypes] = False\n dprint(1, \"Numba attempt failed.\")\n except:\n self.nfunc[atypes] = False\n dprint(1, \"Numba attempt failed.\")\n raise\n\n return self.func(*args, **kwargs)", "def run(fn, *input_values, **kwds):\n \n ee = kwds.get('ee', shared_exec_engine)\n input_types = [arg.type for arg in fn.args]\n gv_inputs = [gv_from_python(x, t) \n for (x,t) in \n zip(input_values, input_types)]\n \n return run_with_generic_values(fn, gv_inputs, ee)", "def cudify(fn):\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n result = fn(*args, **kwargs)\n return cuda_if_gpu(result)\n\n return wrapper", "def fsig(arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n _: Optional[ParametricBindings]) -> ConcreteType:\n checker = _Checker(arg_types, name, span).len(2).is_bits(0).is_array(1)\n\n arg0 = arg_types[0]\n arg1 = arg_types[1]\n assert isinstance(arg1, ArrayType), arg1\n assert isinstance(arg1.size.value, int), arg1\n return_type = arg1.element_type\n checker.check_is_bits(return_type,\n 'Want arg 1 element type to be bits; got {0}')\n checker.check_is_len(arg1, arg0.size,\n 'bit width {target} must match {t} array size {t.size}')\n return FunctionType(arg_types, return_type)", "def feed_arg(self, ws, name, value, dtype):\n ws.FeedTensor(name, numpy.array(value, dtype), self._arg_device)", "def handle_func_command(cls, command):\n cmd, _, args, kwargs = command\n\n try: # will work if tensors are wrappers\n\n # Replace all TensorFlow tensor with their child attribute\n # Note that we return also args_type which helps handling case 3 in the docstring\n new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(\n cmd, args, kwargs, return_args_type=True\n )\n # This handles case 3: it redirects the command to the appropriate class depending\n # of the syft type of the arguments and returns\n if args_type not in FrameworkTensor:\n return args_type.handle_func_command(command)\n\n # build the new command\n new_command = (cmd, None, new_args, new_kwargs)\n # Send it to the appropriate class and get the response\n response = new_type.handle_func_command(new_command)\n # Put back the wrappers where needed\n response = hook_args.hook_response(cmd, response, wrap_type=args_type)\n except PureFrameworkTensorFoundError: # means that it's not a wrapper but a pure tensor\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, cmd)\n return command(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: clean this line\n cmd_split = cmd.split(\".\")\n cmd_path = cmd_split[:-1]\n cmd_name = cmd_split[-1]\n cmd = \"syft.local_worker.hook.\" + \".\".join(cmd_path) + \".native_\" + cmd_name\n\n # Run the native function with the new args\n # Note the the cmd should already be checked upon reception by the worker\n # in the execute_command function\n if isinstance(args, tuple):\n response = eval(cmd)(*args, **kwargs)\n else:\n response = eval(cmd)(args, **kwargs)\n\n return response" ]
[ "0.6057609", "0.5804018", "0.5786905", "0.5687931", "0.5632874", "0.55926454", "0.55586314", "0.54775053", "0.5402661", "0.53772503", "0.5370256", "0.5299433", "0.5298682", "0.5296351", "0.52958226", "0.52583855", "0.52490014", "0.5243063", "0.52260655", "0.51923186", "0.5190427", "0.5190425", "0.51797056", "0.5172453", "0.5171298", "0.5169099", "0.51653165", "0.51564175", "0.5156381", "0.5146902" ]
0.77259445
0
Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap()) Using this function avoid using the global call one which may accept member function calls to things that are not iterators.
def dispatchMessageIteratorCall(self, tree): # simple case not a member function just an iterator with arguments if isinstance(tree.func, ast.Name): self.write(f"FLAMEGPU->{tree.func.id}") if isinstance(tree.func, ast.Attribute) : if isinstance(tree.func.value, ast.Name): # check that the iterator is supported if not tree.func.attr in self.fgpu_input_msg_iter_funcs: self.RaiseError(tree, f"Message input loop iterator '{tree.func.attr}' is not supported.") self.write(f"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}") else: self.RaiseError(tree, "Message input loop iterator format incorrect.") # handle function arguments self.write("(") self._CallArguments(tree) self.write(")")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call(self, message: Message) -> None:\n self.fn(message)", "def _call(self, x):\n return x.inner(x)", "def test_dispatch_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_inbound(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])", "def __iter__(self):\n yield from self.calls", "def __iter__(self):\n return iter([self.format_message(record) for record in self._messages])", "def iter_call(self, name, *args, **kwargs):\r\n return self.client.iter_call(self.name, name, *args, **kwargs)", "def test_dispatch_inbound_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(connector_name='fooconn')\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_inbound(msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])", "def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])", "def __next__(self):\n return self.read_message()", "def call(self, msg, cb=None):\r\n self._call(msg, cb or self._cb)", "def callable(func, message):\n return func, message", "def dispatchMessageLoop(self, tree):\n self.fill(\"for (const auto& \")\n self.dispatch(tree.target)\n self.write(\" : \")\n # if simple message iterator\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t, f\"Message input loop requires use of '{self._input_message_var}' as iterator.\")\n # write with prefix\n self.write(f\"FLAMEGPU->{self._input_message_var}\")\n # if it is a call then handle the different cases\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n #otherwise not supported\n else :\n self.RaiseError(tree, f\"Message input loop iterator in unsupported format\")\n self.write(\")\")\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None", "def nextinline(self, irc, msg, args):\n channel = self.registryValue('checkOpsInChannel')\n if channel == '':\n self.log.error('checkOpsInChannel not set!')\n return\n if channel not in irc.state.channels:\n self.log.warn('not in %s' % channel)\n return\n if msg.nick not in irc.state.channels[channel].ops:\n self.log.warn('denying access to non-chanop user %r' % msg.nick)\n return\n if len(self._queue) > 0:\n nick, notice = self._queue.pop(0)\n response = \"Next in line is %s\" % nick\n if notice is not None:\n response += \" with notice: %s\" % notice\n self._dump_queue()\n irc.reply(response)\n else:\n irc.reply(\"There's nobody queued up right now.\")", "def __call__(self, *args, **kwargs):\n return self.method(self.receiver, *args, **kwargs)", "def _iter_call_meth(self, method, *args, **kwargs):\n for obj in self:\n if hasattr(obj, method):\n f = op.methodcaller(method, *args, **kwargs)\n f(obj)", "def get_message(self, i):\n pass", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def __call__(self):\r\n return self.next()", "def _call(self, msg, cb, *args):\r\n if not self._status:\r\n raise InterfaceDisabledError('A disabled interface should not be '\r\n 'called.')\r\n\r\n if not callable(cb):\r\n raise TypeError('Callback has to be callable.')\r\n\r\n uid = uuid4().hex\r\n deferred = Deferred()\r\n deferred.addCallback(cb, *args)\r\n self._responses[uid] = deferred\r\n\r\n self._conn.sendMessage(self._iTag, self._clsName, msg, uid)", "def processMessage(self, *args, **kwargs):\r\n pass", "def send_method(\n self, call: Callable[[Any, str], str]\n ) -> Callable[[Any, str], str]:\n assert self._send_raw_message_call is None\n self._send_raw_message_call = call\n return call", "def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)", "def __call__(self, func, *args, **kwargs):\n\n @wraps(func) # To keep its own namespace\n def wrapper(*args, **kwargs):\n gener = self.__iter__()\n return func(gener, *args, **kwargs)\n return wrapper", "def process(self, message: Message, **kwargs: Any) -> None:", "def process(self, msg):\n raise NotImplemented", "def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)", "def imap_c(func):\n return functools.partial(imap, func)", "def sender_iter(self):\n while 1:\n yield self.send_next()", "def __call__(self, *args, **kw):\n return self.callable(*args, **kw)", "def call_all(self, msg_tag, message):\n return self.hub.call_all(self.get_private_key(), msg_tag, message)" ]
[ "0.5965189", "0.5781531", "0.5567268", "0.5560412", "0.5555918", "0.5474647", "0.53749025", "0.5369496", "0.5356947", "0.532416", "0.53201944", "0.52734697", "0.5214312", "0.51924616", "0.51724297", "0.51710767", "0.51597893", "0.51278067", "0.5124339", "0.5106569", "0.509719", "0.5078223", "0.5075271", "0.505164", "0.5017414", "0.49991345", "0.49987322", "0.4972996", "0.4969931", "0.496608" ]
0.73008895
0
A very limited set of function calls to members are supported so these are fully evaluated here. t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties) Function calls permitted are; pyflamegpu.function a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call. message_input.function a call to the message input variable (the name of which is specified in the function definition) msg.function a call to the message input iterator objection variable (the name of which is specified in the message function loop) message_output.function a call to the message output variable (the name of which is specified in the function definition) pyflamegpu.environment.function the only nested attribute type. This will be translated into a typed Cpp call. math.function Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()` numpy.type Any numpy types are translated to static casts
def dispatchMemberFunction(self, t, t_parent): # it could be possible that the Call object has no value property e.g. a()() if not hasattr(t, "value"): self.RaiseError(t, f"Function call is in an unsupported format.") # Nested member functions (e.g. x.y.z()) if isinstance(t.value, ast.Attribute): # store some information about the source of this function call in parent as this may be useful for validation in whatever has called this function t_parent.call_type = None # only nested attribute type is environment if not isinstance(t.value.value, ast.Name): self.RaiseError(t, "Unknown or unsupported nested attribute") # pyflamegpu.environment if t.value.value.id == "pyflamegpu" and t.value.attr == "environment": # check it is a supported environment function self.write("FLAMEGPU->environment.") if t.attr in self.fgpu_env_funcs: # proceed self.write(t.attr) else: # simple getProperty type function if t.attr.startswith('getProperty') : # possible getter setter type function py_func = self._deviceVariableFunctionName(t, ["getProperty"]) if not py_func: self.RaiseError(t, f"Function '{t.attr}' is not a supported pyflamegpu.environment property function.") # write the getProperty type function self.write(py_func) t_parent.call_type = "Environment" # need to catch case of getMacroProperty as arguments need to be translated into template parameters in cpp (and py_func can be ignored) elif t.attr.startswith("getMacroProperty"): # possible getter setter type function (Note: getMacroProperty only supports a subset of types but type checking is not performed. This is best left to the compiler.) # no not permit lengths (e.g. Float4) as these will be passed as arguments py_func = self._deviceVariableFunctionName(t, ["getMacroProperty"], allow_lengths=False) if not py_func: self.RaiseError(t, f"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.") # handle case self.dispatchMacroEnvFunction(t, t_parent) t_parent.call_type = "MacroEnvironment" else: self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.environment object") # pyflamegpu.random elif t.value.value.id == "pyflamegpu" and t.value.attr == "random": # check it is a supported random function self.write("FLAMEGPU->random.") if t.attr in self.fgpu_rand_funcs: # proceed self.write(t.attr) else: # possible getter setter type function py_func = self._deviceVariableFunctionName(t, ["uniform", "normal", "logNormal"], allow_lengths=False) if not py_func: self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.random object") # proceed self.write(py_func) t_parent.call_type = "Random" elif t.value.value.id == "pyflamegpu" and t.value.attr == "agent_out": # check it is a supported agent_out function self.write("FLAMEGPU->agent_out.") if t.attr in self.fgpu_agent_out_msg_funcs: # proceed self.write(t.attr) else: # possible getter setter type function py_func = self._deviceVariableFunctionName(t, ["setVariable"]) if not py_func: self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.agent_out object") # proceed self.write(py_func) t_parent.call_type = "AgentOut" else: self.RaiseError(t, f"Unknown or unsupported nested attribute in {t.value.value.id}") # Non nested member functions (e.g. x.y()) elif isinstance(t.value, ast.Name): # pyflamegpu singleton if t.value.id == "pyflamegpu": # check for legit FGPU function calls self.write("FLAMEGPU->") if t.attr in self.fgpu_funcs: # proceed self.write(t.attr) else: # possible getter setter type function py_func = self._deviceVariableFunctionName(t, ["getVariable", "setVariable"]) if not py_func: self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu object") # proceed self.write(py_func) # message_in function using whatever variable was named in function declaration (e.g radius) elif t.value.id == self._input_message_var: # only process functions on message_in that are not iterators if t.attr in self.fgpu_input_msg_funcs: self.write(f"FLAMEGPU->{self._input_message_var}.") self.write(t.attr) else: self.RaiseError(t, f"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'") # message input iterator arg elif self._message_iterator_var and t.value.id == self._message_iterator_var: self.write(f"{self._message_iterator_var}.") # check for legit FGPU function calls and translate if t.attr in self.fgpu_input_msg_iter_var_funcs: # proceed self.write(t.attr) else: # possible getter setter type function py_func = self._deviceVariableFunctionName(t, ["getVariable"]) if not py_func: self.RaiseError(t, f"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object") # proceed self.write(py_func) # message output arg elif t.value.id == self._output_message_var: # check for legit FGPU function calls and translate self.write("FLAMEGPU->message_out.") if t.attr in self.fgpu_output_msg_funcs: # proceed self.write(t.attr) else: # possible getter setter type function py_func = self._deviceVariableFunctionName(t, ["setVariable"]) if not py_func: self.RaiseError(t, f"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object") # proceed self.write(py_func) # math functions (try them in raw function call format) or constants elif t.value.id == "math": self.write(t.attr) # numpy types elif t.value.id == "numpy" or t.value.id == "np": if t.attr in self.numpytypes: self.write(f"static_cast<{self.numpytypes[t.attr]}>") else: self.RaiseError(t, f"Unsupported numpy type {t.attr}") # allow any call on any locals (too many cases to enforce without type checking) elif t.value.id in self._locals: self.write(f"{t.value.id}.{t.attr}") else: self.RaiseError(t, f"Global '{t.value.id}' identifier not supported") # Call is a very nested situation which can occur only on macro environment properties. E.g. 'pyflamegpu.environment.getMacroPropertyInt('a').exchange(10)' elif isinstance(t.value, ast.Call): # handle the call by recursively calling this function to do the depth first execution of pyflamegpu.environment.getMacroPropertyInt('a') self.dispatchMemberFunction(t.value.func, t.value) # check that the handler was actually for macro environment if t.value.call_type != "MacroEnvironment" : self.RaiseError(t, f"Function call {t.attr} is not supported") # now append the outer call by making sure the thing been called is a valid macro env function if not t.attr in self.fgpu_env_macro_funcs: self.RaiseError(t, f"Function {t.attr} is not a valid macro environment function") # write inner call args self.write("(") self._CallArguments(t.value) self.write(")") # write outer function (call args will be completed by _Call) self.write(f".{t.attr}") else: self.RaiseError(t, "Unsupported function call syntax")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _Call(self, t):\n # check calls but let attributes check in their own dispatcher\n funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator\n if isinstance(t.func, ast.Name):\n if (t.func.id not in funcs):\n self.RaiseWarning(t, \"Function call is not a defined FLAME GPU device function or a supported python built in.\")\n # dispatch even if warning raised\n self.dispatch(t.func)\n elif isinstance(t.func, ast.Lambda):\n self.dispatch(t.func) # not supported\n else:\n # special handler for dispatching member function calls\n # This would otherwise be an attribute\n self.dispatchMemberFunction(t.func, t) \n self.write(\"(\")\n self._CallArguments(t)\n self.write(\")\")", "def call_module(\n self,\n node: torch.fx.Node,\n parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n root_fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n ) -> None:\n assert isinstance(\n node.target, str\n ), f\"node.target must be a str, not {type(node.target)} for node {node}.\"\n\n sub_module = root_fx_graph_module.get_submodule(node.target)\n\n assert isinstance(\n sub_module, torch.fx.GraphModule\n ), f\"sub_module must be a torch.fx.GraphModule, not {type(sub_module)} for node {node}.\"\n\n sub_onnxscript_graph = self.run(\n sub_module, onnxfunction_dispatcher, op_level_debug, parent_onnxscript_graph\n )\n\n onnx_args, _ = _wrap_fx_args_as_onnxscript_args(\n list(node.args), {}, fx_name_to_onnxscript_value, tracer\n )\n\n # TODO: We may want to consider other naming styles. The goal is to be stable and\n # unique such that it can be easily identified in case of kernel substitution.\n # Example for current style is combination of qualified module class name and\n # module attribute name: `torch_nn_modules_conv_Conv2d_conv1`.\n # Other naming styles such as qualified module class name made unique can also\n # be considered.\n unique_module_name = f\"{sub_module._get_name()}_{node.target}\"\n\n outputs: Union[ # type: ignore[no-redef]\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ] = parent_onnxscript_graph.add_module_call(\n unique_module_name, sub_onnxscript_graph, onnx_args\n )\n\n assert isinstance(\n outputs, (onnxscript_graph_building.TorchScriptTensor, tuple)\n ), f\"Unexpected outputs type {type(outputs)} for node {node}.\"\n\n _fill_tensor_shape_type(outputs, node.name, node.meta[\"val\"])\n fx_name_to_onnxscript_value[node.name] = outputs\n\n # Skip op_level_validation for call_module. Subgraph nodes are validated individually.", "def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func", "def dispatchMacroEnvFunction(self, tree, tree_parent):\n cpp_func_name = \"getMacroProperty\"\n py_func = tree.attr\n # extract type from function name\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n # get cpp type\n t = self._fgpu_types[py_type]\n cpp_func_name += f\"<{t}\"\n # mess with the parent to extract (and remove arguments so they dont end up in the argument list)\n if not tree_parent.args :\n self.RaiseError(tree, f\" Macro environment function '{py_func}' is expected to have some arguments.\")\n # if more than one arg then the rest are bounds to translate\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n # process bounds by appending to cpp function template arguments\n for i in bounds:\n if isinstance(i, ast.Num): # num required for python 3.7\n if not isinstance(i.n, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.n}\"\n else: # all Python > 3.7 \n if not isinstance(i, ast.Constant):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\")\n if not isinstance(i.value, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.value}\"\n # remove bounds from argument list (in place)\n del tree_parent.args[1:]\n cpp_func_name += \">\"\n self.write(cpp_func_name)", "def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"", "def visit_Call(self, node):\n assert hasattr(node, 'args')\n if node.args:\n assert isinstance(node.args[0], gast.Starred)\n # modify args\n if isinstance(node.args[0].value, gast.Name):\n node.args[0].value.id += '_new'\n\n assert hasattr(node, 'keywords')\n if node.keywords:\n assert isinstance(node.keywords[0], gast.keyword)\n self.generic_visit(node)\n return node", "def call_statement(env, node):\n fun = env['f'][node.name]\n func_env = Environment(env).create(env['f'])\n args = fun['args'].interpret(env)\n call_args_interpretuated = node.args.interpret(env)\n args_counter = 0\n for arg in args:\n func_env['v'][arg] = call_args_interpretuated[args_counter].interpret(env)\n args_counter += 1\n fun['body'].interpret(func_env)\n return func_env['r']", "def visit_Call(self, node: ast.Call) -> None:\n self._check_wrong_function_called(node)\n self._check_boolean_arguments(node)\n self._check_isinstance_call(node)\n\n if functions.given_function_called(node, {'super'}):\n self._check_super_context(node)\n self._check_super_arguments(node)\n\n self.generic_visit(node)", "def _FunctionDef(self, t):\n self.write(\"\\n\")\n # check decorators\n if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):\n self.RaiseError(t, \"Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\") \n # FLAMEGPU_AGENT_FUNCTION\n if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n if getattr(t, \"returns\", False):\n self.RaiseWarning(t, \"Function definition return type not supported on 'pyflamegpu.agent_function'\")\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION({t.name}, \")\n self.dispatchFGPUFunctionArgs(t)\n self.write(\")\")\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n self.fill(f\"FLAMEGPU_DEVICE_FUNCTION \")\n if t.returns:\n self.dispatchType(t.returns)\n else:\n self.write(\"void\")\n self.write(f\" {t.name}(\")\n self.dispatchFGPUDeviceFunctionArgs(t)\n self.write(\")\")\n # add to list of defined functions that can be called\n self._device_functions.append(t.name)\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu':\n # check for return annotation\n if not hasattr(t, \"returns\"):\n self.RaiseError(t, \"Agent function conditions must have a 'bool' return type specified as a return type annotation\")\n # check for return annotation type\n if not isinstance(t.returns, ast.Name):\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n if t.returns.id is not 'bool':\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n # check to ensure no arguments (discard any with a warning)\n if t.args.args:\n self.RaiseWarning(t, \"Agent function conditions does not support arguments. These will be discarded.\")\n # write the agent function macro\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})\")\n else:\n self.RaiseError(t, \"Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\")\n self.enter()\n self.dispatch(t.body)\n self.leave()", "def dispatchFGPUDeviceFunctionArgs(self, tree):\n # reset the locals variable stack\n self._locals = [\"pyflamegpu\"]\n # input message\n first = True\n annotation = None\n for arg in tree.args.args:\n # ensure that there is a type annotation\n if not arg.annotation:\n self.RaiseError(arg, \"Device function argument requires type annotation\")\n # comma if not first\n if not first:\n self.write(\", \")\n self.dispatchType(arg.annotation)\n self.write(f\" {arg.arg}\")\n # add arg to local variable stack\n self._locals.append(arg.arg)\n first = False", "def call_top_interface_args_with_func_def(self, node: AnnCastCall):\n # call container is used to scope parameters\n call_con_name = call_container_name(node)\n\n # create argument and parameter variables\n # argument variables are inputs to the top interface\n # paramter variables are outputs of the top interface\n for i, n in enumerate(node.arguments):\n # argument name and scope str\n arg_name = call_argument_name(node, i)\n arg_con_scopestr = con_scope_to_str(node.func.con_scope)\n\n # parameter name and scopestr\n func_def = self.pipeline_state.func_def_node_from_id(node.func.id)\n param = func_def.func_args[i]\n assert(isinstance(param, AnnCastVar))\n param_name = param.val.name\n param_con_scopestr = con_scope_to_str(node.func.con_scope + [call_con_name])\n\n # argument and parameter share id, and start with initial version\n id = self.pipeline_state.next_collapsed_id()\n version = VAR_INIT_VERSION\n\n # build and store GrFN variables for argument and parameter\n arg_grfn_var = create_grfn_var(arg_name, id, version, arg_con_scopestr)\n arg_fullid = build_fullid(arg_name, id, version, arg_con_scopestr)\n self.pipeline_state.store_grfn_var(arg_fullid, arg_grfn_var)\n # store arg_fullid\n node.arg_index_to_fullid[i] = arg_fullid\n # create From Source metadata for the GrFN var\n from_source = False\n from_source_mdata = generate_from_source_metadata(from_source, VariableCreationReason.FUNC_ARG)\n add_metadata_to_grfn_var(arg_grfn_var, from_source_mdata)\n\n param_grfn_var = create_grfn_var(param_name, id, version, param_con_scopestr)\n param_fullid = build_fullid(param_name, id, version, param_con_scopestr)\n self.pipeline_state.store_grfn_var(param_fullid, param_grfn_var)\n # store param_fullid\n node.param_index_to_fullid[i] = param_fullid\n # create From Source metadata for the GrFN var\n add_metadata_from_name_node(param_grfn_var, param.val)\n\n # link argument and parameter through top interface\n node.top_interface_in[id] = arg_fullid\n node.top_interface_out[id] = param_fullid\n\n # DEBUG printing\n if self.pipeline_state.PRINT_DEBUGGING_INFO:\n print(\"After create_call_args_and_params():\")\n print(f\"\\ttop_interface_in = {node.top_interface_in}\")\n print(f\"\\ttop_interface_out = {node.top_interface_out}\")", "def eval_function_call(func_call, motif_node_dict):\n print(\"\\x1b[6;30;42m\" + 'Evaluating ' + func_call.name.name + ' function...' + '\\x1b[0m')\n # CamFlow \"alloc_provenance\" take two arguments but only the first is needed for modeling.\n if func_call.name.name == 'alloc_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n return provenance.alloc_provenance(arg_names[0], None)\n # CamFlow \"task_cred_xxx\" take two arguments but no argument is needed for modeling.\n elif func_call.name.name == 'task_cred_xxx':\n return provenance.task_cred_xxx(None, None)\n # CamFlow \"branch_mmap\" take two arguments but no argument is needed for modeling.\n elif func_call.name.name == 'branch_mmap':\n return provenance.branch_mmap(None, None)\n # CamFlow \"uses_two\" function takes five arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'uses_two':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses_two must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses_two must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses_two must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses_two must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.uses_two(arg_names[0], val1, val2, None, None, motif_node_dict)\n # CamFlow \"informs\" function takes five arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'informs':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in informs must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in informs must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in informs must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in informs must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.informs(arg_names[0], val1, val2, None, None, motif_node_dict)\n # CamFlow \"record_terminate\" function takes two arguments.\n elif func_call.name.name == 'record_terminate':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second arguments must be converted to MotifNode object first.\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_terminate must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_terminate must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_terminate(arg_names[0], val1, motif_node_dict)\n # CamFlow \"generates\" function takes six arguments but only the first four are needed for modeling.\n elif func_call.name.name == 'generates':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second, third, and fourth arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n arg3 = arg_names[3]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in generates must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in generates must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in generates must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in generates must have values in the dictionary.\\033[0m')\n exit(1)\n if arg3 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in generates must exist in the dictionary.\\033[0m')\n exit(1)\n val3 = getLastValueFromKey(motif_node_dict, arg3)\n if not val3:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in generates must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.generates(arg_names[0], val1, val2, val3, None, None, motif_node_dict)\n # CamFlow \"get_task_provenance\" takes no arguments.\n elif func_call.name.name == 'get_task_provenance':\n return provenance.get_task_provenance()\n # CamFlow \"get_cred_provenance\" takes no arguments.\n elif func_call.name.name == 'get_cred_provenance':\n return provenance.get_cred_provenance(motif_node_dict)\n # CamFlow \"uses\" takes six arguments but only the first four are needed for modeling.\n elif func_call.name.name == 'uses':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second, third, and fourth arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n arg3 = arg_names[3]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses must have values in the dictionary.\\033[0m')\n exit(1)\n if arg3 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in uses must exist in the dictionary.\\033[0m')\n exit(1)\n val3 = getLastValueFromKey(motif_node_dict, arg3)\n if not val3:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in uses must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.uses(arg_names[0], val1, val2, val3, None, None, motif_node_dict)\n # CamFlow \"refresh_inode_provenance\" takes two arguments but only the second one is needed for modeling.\n elif func_call.name.name == 'refresh_inode_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second argument must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in refresh_inode_provenance must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in refresh_inode_provenance must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.refresh_inode_provenance(None, val1, motif_node_dict)\n # CamFlow \"get_inode_provenance\" takes two arguments but only the second argument is needed for modeling.\n elif func_call.name.name == 'get_inode_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n if arg_names[1] == 'false':\n arg1 = False\n elif arg_names[1] == 'true':\n arg1 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg_names[1] + ' in get_inode_provenance is unknown.\\033[0m')\n exit(1)\n return provenance.get_inode_provenance(None, arg1, motif_node_dict)\n # CamFlow \"get_dentry_provenance\" takes two arguments but only the second argument is needed for modeling. \n elif func_call.name.name == 'get_dentry_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n if arg_names[1] == 'false':\n arg1 = False\n elif arg_names[1] == 'true':\n arg1 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg_names[1] + ' in get_dentry_provenance is unknown.\\033[0m')\n exit(1)\n return provenance.get_dentry_provenance(None, arg1, motif_node_dict)\n # CamFlow \"record_inode_name_from_dentry\" takes three arguments, but only the second and the third arguments are needed for modeling.\n elif func_call.name.name == 'record_inode_name_from_dentry':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second argument must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_inode_name_from_dentry must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_inode_name_from_dentry must have values in the dictionary.\\033[0m')\n exit(1)\n if arg_names[2] == 'false':\n arg2 = False\n elif arg_names[2] == 'true':\n arg2 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_inode_name_from_dentry is unknown.\\033[0m')\n exit(1)\n return provenance.record_inode_name_from_dentry(None, val1, arg2, motif_node_dict)\n # CamFlow \"record_node_name\" takes three arguments, but only the first and the third arguments are needed for modeling.\n elif func_call.name.name == 'record_node_name':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second argument must be converted to MotifNode objects first.\n arg0 = arg_names[0]\n if arg0 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_node_name must exist in the dictionary.\\033[0m')\n exit(1)\n val0 = getLastValueFromKey(motif_node_dict, arg0)\n if not val0:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_node_name must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_node_name(val0, None, arg_names[2], motif_node_dict)\n # CamFlow \"derives\" function takes five arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'derives':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in derives must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in derives must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in derives must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in derives must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.derives(arg_names[0], val1, val2, None, None, motif_node_dict)\n # CamFlow \"record_write_xattr\" function takes eight arguments but only the first four are needed for modeling.\n elif func_call.name.name == 'record_write_xattr':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second, third, and fourth arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n arg3 = arg_names[3]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_write_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_write_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_write_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_write_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg3 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in record_write_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val3 = getLastValueFromKey(motif_node_dict, arg3)\n if not val3:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in record_write_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_write_xattr(arg_names[0], val1, val2, val3, None, None, None, None, motif_node_dict)\n # CamFlow \"record_read_xattr\" function takes four arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'record_read_xattr':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg0 = arg_names[0]\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg0 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_read_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val0 = getLastValueFromKey(motif_node_dict, arg0)\n if not val0:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_read_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_read_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_read_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_read_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_read_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_read_xattr(val0, val1, val2, None, motif_node_dict)\n # CamFlow \"get_file_provenance\" takes two arguments but only the second argument is needed for modeling. \n elif func_call.name.name == 'get_file_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n if arg_names[1] == 'false':\n arg1 = False\n elif arg_names[1] == 'true':\n arg1 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg_names[1] + ' in get_file_provenance is unknown.\\033[0m')\n exit(1)\n return provenance.get_file_provenance(None, arg1, motif_node_dict)\n # CamFlow \"influences_kernel\" function takes four arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'influences_kernel':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in influences_kernel must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in influences_kernel must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in influences_kernel must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in influences_kernel must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.influences_kernel(arg_names[0], val1, val2, None, motif_node_dict)\n # CamFlow \"get_socket_inode_provenance\" takes one argument but it is not needed for modeling.\n elif func_call.name.name == 'get_socket_provenance':\n return provenance.get_socket_provenance(None, motif_node_dict)\n # CamFlow \"get_socket_inode_provenance\" takes one argument but it is not needed for modeling. \n elif func_call.name.name == 'get_socket_inode_provenance':\n return provenance.get_socket_inode_provenance(None, motif_node_dict)\n # CamFlow \"record_address\" takes three arguments but only the last argument is needed for modeling. \n elif func_call.name.name == 'record_address':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg2 = arg_names[2]\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_address must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_address must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_address(None, None, val2, motif_node_dict)\n # CamFlow \"get_sk_inode_provenance\" takes one argument but it is not needed for modeling. \n elif func_call.name.name == 'get_sk_inode_provenance':\n return provenance.get_sk_inode_provenance(None, motif_node_dict)\n # CamFlow \"get_sk_provenance\" takes one argument but it is not needed for modeling.\n elif func_call.name.name == 'get_sk_provenance':\n return provenance.get_sk_provenance(None, motif_node_dict)\n # CamFlow \"record_packet_content\" takes two arguments but only the second argument is needed for modeling. \n elif func_call.name.name == 'record_packet_content':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_packet_content must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_packet_content must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_packet_content(None, val1, motif_node_dict)\n # CamFlow \"record_args\" takes two arguments but only the first argument is needed for modeling.\n elif func_call.name.name == 'record_args':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg0 = arg_names[0]\n if arg0 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_args must exist in the dictionary.\\033[0m')\n exit(1)\n val0 = getLastValueFromKey(motif_node_dict, arg0)\n if not val0:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_args must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_args(val0, None, motif_node_dict)\n else:\n return None, None", "def _partition_call_operator(self, inputs, attr):\n\n try:\n from tensorflow.python.framework import function_def_to_graph\n except ImportError as e:\n raise ImportError(f\"Unable to import tensorflow which is required {e}\")\n\n main_graph_proto = self._main_graph_proto\n outer_graph_def = main_graph_proto._graph\n\n node_func_name = attr.get(\"f\").name\n func = next(\n (f for f in outer_graph_def.library.function if f.signature.name == node_func_name),\n None,\n )\n if func:\n devices = set(node.device for node in func.node_def)\n if len(devices) > 1:\n raise Exception(\n \"Found inconsistent Device assignment in the \"\n \"Stateful Partitioned SubGraph. Rejecting \"\n \"the subgraph \"\n )\n # Convert function definition to graph\n func_input_shapes = func.attr[\"_input_shapes\"].list.shape\n subgraph, _ = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes)\n\n # Computing subgraph's input shape dictionary\n subgraph_shape_dict, input_expr_dict = {}, {}\n for f_arg, input in zip(func.signature.input_arg, inputs):\n input_expr_dict[f_arg.name] = input\n subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph_proto._mod)\n\n func_name = f\"func_{func.signature.name}\"\n try:\n global_func = main_graph_proto._mod[func_name]\n sub_func = global_func\n sub_params = main_graph_proto._params\n except ValueError:\n # Construct relay nodes from the subgraph\n g1 = SubGraphProto(main_graph_proto)\n sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict)\n main_graph_proto._params.update(sub_params)\n func_expr = _function.Function(sub_func.params, sub_func.body)\n global_func = tvm.relay.GlobalVar(func_name)\n main_graph_proto._mod[global_func] = func_expr\n main_graph_proto._mod = InferType()(main_graph_proto._mod)\n\n param_exprs = []\n for param_expr in sub_func.params:\n # sub_params is subset of sub_func.params\n param_name = param_expr.vid.name_hint\n if param_name in input_expr_dict.keys():\n param_exprs.append(input_expr_dict[param_name])\n elif param_name in sub_params.keys():\n param_exprs.append(param_expr)\n else:\n raise Exception(f\"Input parameter {param_name} not found\")\n\n sb = tvm.relay.scope_builder.ScopeBuilder()\n loop_ret = global_func(*param_exprs)\n sb.ret(loop_ret)\n ret = sb.get()\n else:\n raise Exception(f\"Function not found - {node_func_name}\")\n return ret", "def call_top_interface_args_with_no_func_def(self, node: AnnCastCall):\n # call container is used to scope parameters\n call_con_name = call_container_name(node)\n\n # create argument and parameter variables\n # argument variables are inputs to the top interface\n # paramter variables are outputs of the top interface\n for i, n in enumerate(node.arguments):\n # argument name and scope str\n arg_name = call_argument_name(node, i)\n arg_con_scopestr = con_scope_to_str(node.func.con_scope)\n\n # parameter name and scopestr\n param_name = call_param_name(node, i)\n param_con_scopestr = con_scope_to_str(node.func.con_scope + [call_con_name])\n \n # argument and parameter share id, and start with initial version\n id = self.pipeline_state.next_collapsed_id()\n version = VAR_INIT_VERSION\n\n # build and store GrFN variables for argument and parameter\n arg_grfn_var = create_grfn_var(arg_name, id, version, arg_con_scopestr)\n arg_fullid = build_fullid(arg_name, id, version, arg_con_scopestr)\n self.pipeline_state.store_grfn_var(arg_fullid, arg_grfn_var)\n # store arg_fullid\n node.arg_index_to_fullid[i] = arg_fullid\n # create From Source metadata for the GrFN var\n from_source = False\n from_source_mdata = generate_from_source_metadata(from_source, VariableCreationReason.FUNC_ARG)\n add_metadata_to_grfn_var(arg_grfn_var, from_source_mdata)\n\n param_grfn_var = create_grfn_var(param_name, id, version, param_con_scopestr)\n param_fullid = build_fullid(param_name, id, version, param_con_scopestr)\n self.pipeline_state.store_grfn_var(param_fullid, param_grfn_var)\n # store param_fullid\n node.param_index_to_fullid[i] = param_fullid\n # create From Source metadata for the GrFN var\n # when we don't have the function def, we create a paramter with a default name\n add_metadata_to_grfn_var(param_grfn_var, from_source_mdata)\n\n # link argument and parameter through top interface\n node.top_interface_in[id] = arg_fullid\n node.top_interface_out[id] = param_fullid\n\n # DEBUG printing\n if self.pipeline_state.PRINT_DEBUGGING_INFO:\n print(\"After create_call_args_and_params():\")\n print(f\"\\ttop_interface_in = {node.top_interface_in}\")\n print(f\"\\ttop_interface_out = {node.top_interface_out}\")", "def visit_Call(self, node: ast.Call) -> None:\n self._check_wrong_function_called(node)\n self._check_boolean_arguments(node)\n self._check_super_call(node)\n self.generic_visit(node)", "def get_method_calls(function_node, object_name):\n calls = {}\n for node in ast.walk(function_node):\n # These series of if's check that the node is an assignment node that\n # follows the pattern:\n # object_name.attribute(arg, otherarg, finalarg, keyword=keywordarg, keyword2=otherkeywordarg)\n if isinstance(node, ast.Expr):\n expression = node\n if isinstance(expression.value, ast.Call):\n call = expression.value\n if isinstance(call.func, ast.Attribute):\n attribute = call.func # this is a type not the name of the attribute\n if isinstance(attribute.value, ast.Name):\n name = attribute.value\n if name.id == object_name:\n attr = attribute.attr # attr is the actual atribute name the name of the method called\n raw_args = call.args\n args = []\n for arg in raw_args:\n args.append(convert_arg(arg))\n\n keyword_args = {}\n keywords = call.keywords\n for keyword in keywords:\n key = keyword.arg\n raw_value = keyword.value\n value = convert_arg(raw_value)\n keyword_args[key] = value\n\n call = MethodCall(object_name=object_name, method_name=attr, args=args, keywords=keyword_args)\n \n calls[attr] = call \n return calls", "def eval_node(node, env):\n global genv\n global result\n node_type = node_name(node)\n\n if node_type == 'Expr':\n return eval_node(node.value, env)\n elif node_type == 'Assign':\n val = eval_node(node.value, env)\n\n while type(val) is tuple and len(val) == 2 and (type(val[1]) == GlobalEnv or type(val[1]) == LocalEnv):\n val = val[0]\n\n # extract the variable name, evaluate the RHS, then extend the environment.\n return 0, env.extend([node.targets[0].id], [val])\n elif node_type == 'BinOp':\n # get the left and right operands (we use only single operands) and the operator.\n # evaluate the operands and apply the operator. return the number, env.\n\n left = eval_node(node.left, env)[0]\n right = eval_node(node.right, env)[0]\n\n left = left[0] if type(left) is tuple else left\n right = right[0] if type(right) is tuple else right\n\n op = node_name(node.op)\n\n if op == \"Add\":\n return (left + right), env\n elif op == \"Sub\":\n return (left - right), env\n elif op == \"Mult\":\n return (left * right), env\n elif op == \"Div\":\n return (left / right), env\n elif op == \"Mod\":\n return (left % right), env\n return 0, env\n elif node_type == 'FunctionDef':\n # need the function id (name), args, and body. Extend the environment.\n # you can leave the args wrapped in the ast class and the body and unpack them\n # when the function is called.\n\n return 0, env.extend([node.name], [(node.args, node.body)])\n elif node_type == 'Call':\n # get any values passed in to the function from the Call object.\n # get the fxn name and look up its parameters, if any, and body from the env.\n # get lists for parameter names and values and extend a LocalEnv with those bindings.\n # evaluate the body in the local env, return the value, env.\n\n func = eval_node(node.func, env)[0]\n local_env = LocalEnv(None, env)\n\n args = func[0].args\n body = func[1]\n\n index = 0\n for val in node.args:\n local_env = local_env.extend([args[index].arg], [eval_node(val, local_env)[0]])\n index += 1\n\n for node in body:\n val = eval_node(node, local_env)\n\n if node_name(node) == \"Return\":\n output_val = val[0]\n local_env = val[1]\n return output_val, env\n elif node_type == 'Return':\n # evaluate the node, return the value, env.\n return eval_node(node.value, env)\n elif node_type == 'Name':\n # Name(identifier id)- lookup the value binding in the env\n # return the value, env\n return env.lookup(node.id), env\n # Num(object n) -- a number, return the number, env.\n elif node_type == 'Num':\n return node.n, env", "def visit_Call(self, node: ast.Call) -> None:\n self._check_buggy_super_context(node)\n self.generic_visit(node)", "def get_call(call_node):\n if not isinstance(call_node, ast.Call):\n # print(\"this node is \" + str(type(call_node)) + \" node, not call node\")\n return None\n\n elif isinstance(call_node.func, ast.Name):\n return call_node.func.id\n\n elif isinstance(call_node.func, ast.Attribute):\n if isinstance(call_node.func.value, ast.Name):\n return call_node.func.value.id + '.' + call_node.func.attr\n else:\n get_call(call_node.func.value)\n\n elif isinstance(call_node.func, ast.Call):\n get_call(call_node.func)", "def get_node_target(submodules: Mapping[str, torch.nn.Module], node: pippy.fx.Node) -> str:\n\n assert node.op in CALLABLE_NODE_OPS, (\n \"Expect op types of \" + \", \".join(CALLABLE_NODE_OPS) + f\", but found {node.op}\"\n )\n\n if node.op == \"call_module\":\n assert isinstance(node.target, str)\n submod = submodules[node.target]\n submod_type = getattr(submod, \"_base_class_origin\", type(submod))\n return get_acc_ops_name(submod_type)\n elif node.op == \"call_function\":\n target: Any = node.target\n return (\n f\"acc_ops.{target.__name__}\"\n if target.__module__ is not None and \"acc_ops\" in target.__module__\n else _get_qualified_name(target)\n )\n else:\n assert isinstance(node.target, str)\n return node.target", "def run_node(\n self,\n node,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n ):\n # Record stack trace of node in diagnostic.\n node_stack_trace = node.stack_trace\n if node_stack_trace:\n diagnostic = self.diagnostic_context.inflight_diagnostic(\n rule=diagnostics.rules.fx_node_to_onnx\n )\n diagnostic.with_additional_message(\n f\"### PyTorch source information\\n```\\n{node_stack_trace}\\n```\"\n )\n location = _location_from_fx_stack_trace(node_stack_trace)\n if location is not None:\n diagnostic.with_location(location)\n\n if node.op == \"placeholder\":\n self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)\n elif node.op == \"get_attr\":\n self.get_attr(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n fx_graph_module,\n )\n elif node.op == \"call_function\":\n self.call_function(\n node,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n onnxfunction_dispatcher,\n op_level_debug,\n fx_graph_module,\n )\n elif node.op == \"call_method\":\n self.call_method(node)\n elif node.op == \"call_module\":\n self.call_module(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n onnxscript_tracer,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n )\n elif node.op == \"output\":\n self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)\n else:\n raise RuntimeError(f\"Found node type not defined in torch.fx: {node.op}\")", "def __call__(self, *args):\n\n func_env = Environment(self.parent)\n self.define_args(func_env, *args)\n return evaluate(self.body, func_env)", "def dispatchMessageIteratorCall(self, tree):\n # simple case not a member function just an iterator with arguments\n if isinstance(tree.func, ast.Name):\n self.write(f\"FLAMEGPU->{tree.func.id}\")\n if isinstance(tree.func, ast.Attribute) :\n if isinstance(tree.func.value, ast.Name):\n # check that the iterator is supported\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree, f\"Message input loop iterator '{tree.func.attr}' is not supported.\")\n self.write(f\"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}\")\n else:\n self.RaiseError(tree, \"Message input loop iterator format incorrect.\")\n\n # handle function arguments \n self.write(\"(\")\n self._CallArguments(tree)\n self.write(\")\")", "def visit_Call(self, node: ast.Call) -> None:\n self._check_open_call_context(node)\n self._check_type_compare(node)\n self._check_range_len(node)\n self.generic_visit(node)", "def visit_Call(self, node: ast.Call) -> None:\n self._check_floating_nan(node)\n self.generic_visit(node)", "def __call__(fun_name):", "def transform_call(call):\n return {\n 'type': 'call',\n 'chain': [str(fn.name) for fn in call.names()],\n 'arguments': [str(arg) for arg in call.arguments()],\n 'body': transform_block(call.body())\n }", "def _(self, node: FunctionDef):\n body_nodes = []\n for n in node.body:\n curr_piece = self.visit(n)\n if len(curr_piece) > 0:\n body_nodes.append(curr_piece)\n\n func_body = \" \".join(body_nodes)\n\n return f\"( {node.name} {func_body} )\"", "def generic_function(self, node, ordered_functions):\n for generic in node.fortran_generic:\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n new._generated = \"fortran_generic\"\n fmt = new.fmtdict\n # XXX append to existing suffix\n if generic.fmtdict:\n fmt.update(generic.fmtdict)\n fmt.function_suffix = fmt.function_suffix + generic.function_suffix\n new.fortran_generic = {}\n new.wrap.assign(fortran=True)\n new.ast.declarator.params = generic.decls\n\n # Try to call original C function if possible.\n # All arguments are native scalar.\n need_wrapper = False\n if new.ast.declarator.is_indirect():\n need_wrapper = True\n \n for arg in new.ast.declarator.params:\n if arg.declarator.is_indirect():\n need_wrapper = True\n break\n elif arg.typemap.sgroup == \"native\":\n pass\n else:\n need_wrapper = True\n break\n\n if need_wrapper:\n # The C wrapper is required to cast constants.\n # generic.yaml: GenericReal\n new.C_force_wrapper = True\n new.wrap.c = True\n new._PTR_C_CXX_index = node._function_index\n else:\n new._PTR_F_C_index = node._function_index\n \n # Do not process templated node, instead process\n # generated functions above.\n # node.wrap.c = False\n node.wrap.fortran = False", "def visit_function(self, node):\n # ignore actual functions or method within a new style class\n if not node.is_method():\n return\n klass = node.parent.frame()\n for stmt in node.nodes_of_class(astroid.CallFunc):\n if node_frame_class(stmt) != node_frame_class(node):\n # Don't look down in other scopes.\n continue\n expr = stmt.func\n if not isinstance(expr, astroid.Getattr):\n continue\n call = expr.expr\n # skip the test if using super\n if isinstance(call, astroid.CallFunc) and \\\n isinstance(call.func, astroid.Name) and \\\n call.func.name == 'super':\n confidence = (INFERENCE if has_known_bases(klass)\n else INFERENCE_FAILURE)\n if not klass.newstyle:\n # super should not be used on an old style class\n self.add_message('super-on-old-class', node=node,\n confidence=confidence)\n else:\n # super first arg should be the class\n if not call.args and sys.version_info[0] == 3:\n # unless Python 3\n continue\n\n try:\n supcls = (call.args and next(call.args[0].infer())\n or None)\n except astroid.InferenceError:\n continue\n\n if supcls is None:\n self.add_message('missing-super-argument', node=call,\n confidence=confidence)\n continue\n\n if klass is not supcls:\n name = None\n # if supcls is not YES, then supcls was infered\n # and use its name. Otherwise, try to look\n # for call.args[0].name\n if supcls is not astroid.YES:\n name = supcls.name\n else:\n if hasattr(call.args[0], 'name'):\n name = call.args[0].name\n if name is not None:\n self.add_message('bad-super-call',\n node=call,\n args=(name, ),\n confidence=confidence)" ]
[ "0.6362595", "0.58942914", "0.57930183", "0.5696235", "0.56648827", "0.5555311", "0.5387862", "0.52408487", "0.5235522", "0.52275836", "0.51776576", "0.5120119", "0.51014704", "0.509886", "0.506699", "0.50667155", "0.5059268", "0.50211924", "0.49914703", "0.4978251", "0.49437723", "0.4930713", "0.4911009", "0.49019656", "0.48932612", "0.48795524", "0.4822934", "0.4794427", "0.47923595", "0.47719607" ]
0.790886
0
Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'. Each is then processed in a different way using a specific dispatcher. Function calls are actually checked and only permitted (or user defined) function calls are supported.
def _FunctionDef(self, t): self.write("\n") # check decorators if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute): self.RaiseError(t, "Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'") # FLAMEGPU_AGENT_FUNCTION if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu': if getattr(t, "returns", False): self.RaiseWarning(t, "Function definition return type not supported on 'pyflamegpu.agent_function'") self.fill(f"FLAMEGPU_AGENT_FUNCTION({t.name}, ") self.dispatchFGPUFunctionArgs(t) self.write(")") # FLAMEGPU_DEVICE_FUNCTION elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu': self.fill(f"FLAMEGPU_DEVICE_FUNCTION ") if t.returns: self.dispatchType(t.returns) else: self.write("void") self.write(f" {t.name}(") self.dispatchFGPUDeviceFunctionArgs(t) self.write(")") # add to list of defined functions that can be called self._device_functions.append(t.name) # FLAMEGPU_DEVICE_FUNCTION elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu': # check for return annotation if not hasattr(t, "returns"): self.RaiseError(t, "Agent function conditions must have a 'bool' return type specified as a return type annotation") # check for return annotation type if not isinstance(t.returns, ast.Name): self.RaiseError(t, "Agent function conditions return type must be 'bool'") if t.returns.id is not 'bool': self.RaiseError(t, "Agent function conditions return type must be 'bool'") # check to ensure no arguments (discard any with a warning) if t.args.args: self.RaiseWarning(t, "Agent function conditions does not support arguments. These will be discarded.") # write the agent function macro self.fill(f"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})") else: self.RaiseError(t, "Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'") self.enter() self.dispatch(t.body) self.leave()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_FunctionDef(self, node):\n self.functions[node.name] = self._generate_pytest_decorators(node.decorator_list)\n self.generic_visit(node)", "def isValidFunction(self):\n for token in self.value:\n if token.type == 'defFunction' or token.type == 'callFunction':\n if token.value.split('(')[0] == self.name:\n return False\n return True", "def frontend_access_required(function=None):\n access_denied_info_url = reverse_lazy('excerptexport:access_denied')\n actual_decorator = user_passes_test(\n _may_user_access_osmaxx_frontend,\n login_url=access_denied_info_url\n )\n if function:\n return actual_decorator(function)\n return actual_decorator", "def check_arguments_are_mass_functions(function):\n @functools.wraps(function)\n def wrapped_function(*args):\n if len(args) < 2:\n raise TypeError(\n \"Not enough mass functions provided, it should receive at least one!\"\n )\n for i in range(len(args)):\n if not isinstance(args[i], MassFunction):\n raise TypeError(\n \"This method accept only mass functions as arguments!\"\n )\n return function(*args)\n return wrapped_function", "def test_tolerate_return_function_decorator():\n decorator = tolerate()\n ok_(inspect.isfunction(decorator))\n\n args, varargs, keywords, defaults = inspect.getargspec(decorator)\n eq_(len(args), 1, 'Return function should take one argument for function')", "def check_mass_functions_compatibility(function):\n @functools.wraps(function)\n def wrapped_function(*args):\n for i in range(len(args)):\n for j in range(len(args)):\n if i != j and not args[i].is_compatible(args[j]):\n raise IncompatibleMassFunctionsError(args[i], args[j])\n return function(*args)\n return wrapped_function", "def _func_only(func):\n if inspect.isfunction(func):\n return\n else:\n raise Exception(\"Only functions can be tasks\")", "def add_check_function(check_function: Callable):\n\n def decorator(func: Callable):\n @wraps(func)\n def wrapper(*args, **kwargs):\n check_function(*args, *kwargs.values())\n return func(*args, **kwargs)\n\n return wrapper\n\n name = getattr(check_function, '__name__', '`func`')\n decorator.__doc__ = f\"Check the function's arguments via `{name}` before calling it.\"\n return decorator", "def validate(host_calls):\n\n for name, host_call in host_calls.items():\n if not isinstance(host_call, (tuple, list)):\n raise ValueError('{} should be tuple or list'.format(name))\n if len(host_call) != 2:\n raise ValueError('{} should have two elements.'.format(name))\n if not callable(host_call[0]):\n raise TypeError('{}[0] should be callable.'.format(name))\n if not isinstance(host_call[1], (tuple, list, dict)):\n raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))\n\n if isinstance(host_call[1], (tuple, list)):\n fullargspec = tf_inspect.getfullargspec(host_call[0])\n fn_args = util.fn_args(host_call[0])\n # wrapped_hostcall_with_global_step uses varargs, so we allow that.\n if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):\n raise RuntimeError(\n 'In TPUEstimatorSpec.{}, length of tensors {} does not match '\n 'method args of the function, which takes {}.'.format(\n name, len(host_call[1]), len(fn_args)))", "def _parse_functions(self, locals: dict):\n functions_dict = dict(filter(self._isfunction, locals.items()))\n functions = []\n if not self.args:\n functions.append(next(iter(functions_dict.values())))\n else:\n for i in range(len(self.args)):\n if functions_dict.get(self.args[0]):\n functions.append(functions_dict[self.args.pop(0)])\n else:\n if not functions:\n msg = f'ezmake command args: {self.args} did not ' + \\\n 'match any functions defined in Makefile.py: %s' %\\\n list(functions_dict.keys())\n raise TypeError(msg)\n break\n self.functions = functions", "def _check_invocation_requirements(\n solid_def: \"OpDefinition\", context: Optional[\"UnboundOpExecutionContext\"]\n) -> None:\n # Check resource requirements\n if (\n solid_def.required_resource_keys\n and cast(\"DecoratedOpFunction\", solid_def.compute_fn).has_context_arg()\n and context is None\n ):\n node_label = solid_def.node_type_str # string \"solid\" for solids, \"op\" for ops\n raise DagsterInvalidInvocationError(\n f'{node_label} \"{solid_def.name}\" has required resources, but no context was provided.'\n f\" Use the `build_{node_label}_context` function to construct a context with the\"\n \" required resources.\"\n )\n\n # Check config requirements\n if not context and solid_def.config_schema.as_field().is_required:\n node_label = solid_def.node_type_str # string \"solid\" for solids, \"op\" for ops\n raise DagsterInvalidInvocationError(\n f'{node_label} \"{solid_def.name}\" has required config schema, but no context was'\n f\" provided. Use the `build_{node_label}_context` function to create a context with\"\n \" config.\"\n )", "def admin_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):\n actual_decorator = user_passes_test(\n lambda u: u.is_admin,\n login_url=login_url,\n redirect_field_name=redirect_field_name\n )\n if function:\n return actual_decorator(function)\n return actual_decorator", "def is_unsupported(func):\n\n for m in BUILTIN_LIKELY_MODULES:\n for v in m.__dict__.values():\n if not callable(v):\n continue\n if func is v:\n translator_logger.log(\n 2,\n \"Whitelist: {} is part of built-in module and does not have to be transformed.\".format(\n func\n ),\n )\n return True\n\n # NOTE: should be placed before `is_paddle_func`\n # The api(s) should be considered as plain function and convert\n # them into static layer code.\n from paddle.nn import Sequential\n\n PADDLE_NEED_CONVERT_APIS = [Sequential]\n if type(func) in PADDLE_NEED_CONVERT_APIS:\n return False\n\n if is_paddle_func(func):\n translator_logger.log(\n 2,\n \"Whitelist: {} is part of Paddle module and does not have to be transformed.\".format(\n func\n ),\n )\n return True", "def decorate_with_checker(func: CallableT) -> CallableT:\n assert not hasattr(func, \"__preconditions__\"), \\\n \"Expected func to have no list of preconditions (there should be only a single contract checker per function).\"\n\n assert not hasattr(func, \"__postconditions__\"), \\\n \"Expected func to have no list of postconditions (there should be only a single contract checker per function).\"\n\n assert not hasattr(func, \"__postcondition_snapshots__\"), \\\n \"Expected func to have no list of postcondition snapshots (there should be only a single contract checker \" \\\n \"per function).\"\n\n sign = inspect.signature(func)\n if '_ARGS' in sign.parameters:\n raise TypeError(\n 'The arguments of the function to be decorated with a contract checker include \"_ARGS\" which is '\n 'a reserved placeholder for positional arguments in the condition.')\n\n if '_KWARGS' in sign.parameters:\n raise TypeError(\n 'The arguments of the function to be decorated with a contract checker include \"_KWARGS\" which is '\n 'a reserved placeholder for keyword arguments in the condition.')\n\n param_names = list(sign.parameters.keys())\n\n # Determine the default argument values\n kwdefaults = resolve_kwdefaults(sign=sign)\n\n id_func = id(func)\n\n # (mristin, 2021-02-16)\n # Admittedly, this branching on sync/async is absolutely monstrous.\n # However, I couldn't find out an easier way to refactor the code so that it supports async.\n # Python expects us to explicitly colour functions as sync/async so we can not just put in an if-statement and\n # introduce an \"await\".\n #\n # The two wrappers need to be manually maintained in parallel.\n # Whenever you make a change, please inspect manually that both sync and async code exercises equivalent behavior.\n # For example, copy/paste the two blocks of code in separate files and perform a diff.\n\n if inspect.iscoroutinefunction(func):\n\n async def wrapper(*args, **kwargs): # type: ignore\n \"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"\n kwargs_error = _assert_no_invalid_kwargs(kwargs)\n if kwargs_error:\n raise kwargs_error\n\n # We need to create a new in-progress set if it is None as the ``ContextVar`` does not accept\n # a factory function for the default argument. If we didn't do this, and simply set an empty\n # set as the default, ``ContextVar`` would always point to the same set by copying the default\n # by reference.\n in_progress = _IN_PROGRESS.get()\n if in_progress is None:\n in_progress = set()\n _IN_PROGRESS.set(in_progress)\n\n # Use try-finally instead of ExitStack for performance.\n try:\n # If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop\n # by skipping any subsequent contract checks for the same function.\n if id_func in in_progress:\n return await func(*args, **kwargs)\n\n in_progress.add(id_func)\n\n (preconditions, snapshots, postconditions) = _unpack_pre_snap_posts(wrapper)\n\n resolved_kwargs = kwargs_from_call(\n param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)\n\n type_error = _assert_resolved_kwargs_valid(postconditions, resolved_kwargs)\n if type_error:\n raise type_error\n\n violation_error = await _assert_preconditions_async(\n preconditions=preconditions, resolved_kwargs=resolved_kwargs)\n if violation_error:\n raise violation_error\n\n # Capture the snapshots\n if postconditions and snapshots:\n resolved_kwargs['OLD'] = await _capture_old_async(\n snapshots=snapshots, resolved_kwargs=resolved_kwargs)\n\n # Ideally, we would catch any exception here and strip the checkers from the traceback.\n # Unfortunately, this can not be done in Python 3, see\n # https://stackoverflow.com/questions/44813333/how-can-i-elide-a-function-wrapper-from-the-traceback-in-python-3\n result = await func(*args, **kwargs)\n\n if postconditions:\n resolved_kwargs['result'] = result\n\n violation_error = await _assert_postconditions_async(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs)\n if violation_error:\n raise violation_error\n\n return result\n finally:\n in_progress.discard(id_func)\n else:\n\n def wrapper(*args, **kwargs): # type: ignore\n \"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"\n kwargs_error = _assert_no_invalid_kwargs(kwargs)\n if kwargs_error:\n raise kwargs_error\n\n # We need to create a new in-progress set if it is None as the ``ContextVar`` does not accept\n # a factory function for the default argument. If we didn't do this, and simply set an empty\n # set as the default, ``ContextVar`` would always point to the same set by copying the default\n # by reference.\n in_progress = _IN_PROGRESS.get()\n if in_progress is None:\n in_progress = set()\n _IN_PROGRESS.set(in_progress)\n\n # Use try-finally instead of ExitStack for performance.\n try:\n # If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop\n # by skipping any subsequent contract checks for the same function.\n if id_func in in_progress:\n return func(*args, **kwargs)\n\n in_progress.add(id_func)\n\n (preconditions, snapshots, postconditions) = _unpack_pre_snap_posts(wrapper)\n\n resolved_kwargs = kwargs_from_call(\n param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)\n\n type_error = _assert_resolved_kwargs_valid(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs)\n if type_error:\n raise type_error\n\n violation_error = _assert_preconditions(\n preconditions=preconditions, resolved_kwargs=resolved_kwargs, func=func)\n if violation_error:\n raise violation_error\n\n # Capture the snapshots\n if postconditions and snapshots:\n resolved_kwargs['OLD'] = _capture_old(\n snapshots=snapshots, resolved_kwargs=resolved_kwargs, func=func)\n\n # Ideally, we would catch any exception here and strip the checkers from the traceback.\n # Unfortunately, this can not be done in Python 3, see\n # https://stackoverflow.com/questions/44813333/how-can-i-elide-a-function-wrapper-from-the-traceback-in-python-3\n result = func(*args, **kwargs)\n\n if postconditions:\n resolved_kwargs['result'] = result\n\n violation_error = _assert_postconditions(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs, func=func)\n if violation_error:\n raise violation_error\n\n return result\n finally:\n in_progress.discard(id_func)\n\n # Copy __doc__ and other properties so that doctests can run\n functools.update_wrapper(wrapper=wrapper, wrapped=func)\n\n assert not hasattr(wrapper, \"__preconditions__\"), \"Expected no preconditions set on a pristine contract checker.\"\n assert not hasattr(wrapper, \"__postcondition_snapshots__\"), \\\n \"Expected no postcondition snapshots set on a pristine contract checker.\"\n assert not hasattr(wrapper, \"__postconditions__\"), \"Expected no postconditions set on a pristine contract checker.\"\n\n # Precondition is a list of condition groups (i.e. disjunctive normal form):\n # each group consists of AND'ed preconditions, while the groups are OR'ed.\n #\n # This is necessary in order to implement \"require else\" logic when a class weakens the preconditions of\n # its base class.\n setattr(wrapper, \"__preconditions__\", [])\n setattr(wrapper, \"__postcondition_snapshots__\", [])\n setattr(wrapper, \"__postconditions__\", [])\n\n return wrapper # type: ignore", "def visit_any_function(self, node: AnyFunctionDef) -> None:\n self._check_unused_variables(node)\n self._check_generator(node)\n self._check_descriptor_decorators(node)\n self.generic_visit(node)", "def accept_funcs(func):\n @wraps(func)\n def wrapper(funcs, *args, **kwargs):\n if hasattr(funcs[0], '__name__'):\n funcs = [(f.__name__, f) for f in funcs]\n return func(funcs, *args, **kwargs)\n return wrapper", "def check_chief(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n \tif g.my['rank'] > 15:\r\n \t\tabort(401)\r\n \treturn function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def check_admin(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n if g.my['rank'] > 25:\r\n abort(401)\r\n return function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def check_fcn_attrs(self, node):\n options = node.options\n\n ast = node.ast\n declarator = ast.declarator\n node._has_found_default = False\n\n for attr in declarator.attrs:\n if attr[0] == \"_\": # internal attribute\n continue\n if attr not in [\n \"api\", # arguments to pass to C wrapper.\n \"allocatable\", # return a Fortran ALLOCATABLE\n \"cdesc\",\n \"deref\", # How to dereference pointer\n \"dimension\",\n \"free_pattern\",\n \"len\",\n \"name\",\n \"owner\",\n \"pure\",\n \"rank\",\n ]:\n raise RuntimeError(\n \"Illegal attribute '{}' for function '{}' define at line {}\".format(\n attr, node.ast.name, node.linenumber\n )\n )\n\n meta = declarator.metaattrs\n if ast.typemap is None:\n print(\"XXXXXX typemap is None\")\n if ast.typemap.sgroup == \"shadow\":\n if options.C_shadow_result:\n meta[\"api\"] = \"capptr\"\n else:\n meta[\"api\"] = \"capsule\"\n if declarator.is_ctor():\n meta[\"intent\"] = \"ctor\"\n elif declarator.is_dtor():\n meta[\"intent\"] = \"dtor\"\n else:\n meta[\"intent\"] = declarator.get_subprogram()\n self.check_deref_attr_func(node)\n self.check_common_attrs(node.ast)\n\n for arg in declarator.params:\n if arg.declarator is None:\n raise RuntimeError(\"Argument must have name in {} at line {}\".format(\n node.decl, node.linenumber))\n self.check_arg_attrs(node, arg)\n\n if node.fortran_generic:\n for generic in node.fortran_generic:\n for garg in generic.decls:\n generic._has_found_default = False\n self.check_arg_attrs(generic, garg, node.options)\n check_implied_attrs(node, generic.decls)\n else:\n check_implied_attrs(node, declarator.params)\n\n self.parse_attrs(node, ast)", "def _visit_decorators_and_check_asynq(self, decorator_list):\n async_kind = AsyncFunctionKind.non_async\n is_classmethod = False\n is_decorated_coroutine = False\n is_staticmethod = False\n decorators = []\n for decorator in decorator_list:\n # We have to descend into the Call node because the result of\n # asynq.asynq() is a one-off function that we can't test against.\n # This means that the decorator will be visited more than once, which seems OK.\n if isinstance(decorator, ast.Call):\n decorator_value = self.visit(decorator)\n callee = self.visit(decorator.func)\n if isinstance(callee, KnownValue):\n if safe_in(callee.val, self.config.ASYNQ_DECORATORS):\n if any(kw.arg == \"pure\" for kw in decorator.keywords):\n async_kind = AsyncFunctionKind.pure\n else:\n async_kind = AsyncFunctionKind.normal\n elif safe_in(callee.val, self.config.ASYNC_PROXY_DECORATORS):\n # @async_proxy(pure=True) is a noop, so don't treat it specially\n if not any(kw.arg == \"pure\" for kw in decorator.keywords):\n async_kind = AsyncFunctionKind.async_proxy\n decorators.append((callee, decorator_value))\n else:\n decorator_value = self.visit(decorator)\n if decorator_value == KnownValue(classmethod):\n is_classmethod = True\n elif decorator_value == KnownValue(staticmethod):\n is_staticmethod = True\n elif asyncio is not None and decorator_value == KnownValue(\n asyncio.coroutine\n ):\n is_decorated_coroutine = True\n decorators.append((decorator_value, decorator_value))\n return FunctionInfo(\n async_kind=async_kind,\n is_decorated_coroutine=is_decorated_coroutine,\n is_classmethod=is_classmethod,\n is_staticmethod=is_staticmethod,\n decorators=decorators,\n )", "def _is_function(self, words):\n if words[0] == 'function':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_FUNCTION command.\".format(self._file_line))\n return True\n else:\n return False", "def test_check_fn_validity():\n with pytest.raises(TypeError, match=r'.*did not pass a function.*'):\n s7.check_doc_len(\"abc\")\n s7.check_doc_len(100)\n\n def test_doc_len_multi():\n \"\"\"Checks if the passed function have more than 50 characters in doc string \"\"\"\n\n def helper(doc):\n if doc:\n return True if len(doc) > 50 else False\n else:\n return 0\n\n def f():\n pass\n\n for item in [100, 1.0, \"abcd\", f, s7.add, s7.mul, s7.div]:\n try:\n if isinstance(item, types.FunctionType):\n doc = item.__doc__\n f = s7.check_doc_len(item)\n assert f() is helper(doc)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__", "def test_require_at_least_one_and_several_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy', 'there'), 'foo')\n self.assertEqual(_func(arg1='ahoy', arg2='there'), 'foo')\n self.assertEqual(_func('ahoy', arg2='there', arg3='matey'), 'foo')", "def _OHE_checker(func):\n\n @wraps(func)\n def wrapper_checker(database, features_list=None):\n _CheckInput._check_database_input(database)\n if features_list:\n for column in features_list:\n _CheckInput._check_column_in_database(column,database)\n return func(database, features_list)\n return wrapper_checker", "def _validate_args(\n autologging_integration,\n function_name,\n user_call_args,\n user_call_kwargs,\n autologging_call_args,\n autologging_call_kwargs,\n):\n\n def _validate_new_input(inp):\n \"\"\"\n Validates a new input (arg or kwarg) introduced to the underlying / original ML function\n call during the execution of a patched ML function. The new input is valid if:\n\n - The new input is a function that has been decorated with\n `exception_safe_function_for_class` or `pickalable_exception_safe_function`\n - OR the new input is a class with the `ExceptionSafeClass` metaclass\n - OR the new input is a list and each of its elements is valid according to the\n these criteria\n \"\"\"\n if type(inp) == list:\n for item in inp:\n _validate_new_input(item)\n elif callable(inp):\n assert getattr(inp, _ATTRIBUTE_EXCEPTION_SAFE, False), (\n f\"New function argument '{inp}' passed to original function is not exception-safe.\"\n \" Please decorate the function with `exception_safe_function` or \"\n \"`pickalable_exception_safe_function`\"\n )\n else:\n assert hasattr(inp, \"__class__\") and type(inp.__class__) in [\n ExceptionSafeClass,\n ExceptionSafeAbstractClass,\n ], (\n f\"Invalid new input '{inp}'. New args / kwargs introduced to `original` function \"\n \"calls by patched code must either be functions decorated with \"\n \"`exception_safe_function_for_class`, instances of classes with the \"\n \"`ExceptionSafeClass` or `ExceptionSafeAbstractClass` metaclass safe or lists of \"\n \"such exception safe functions / classes.\"\n )\n\n def _assert_autologging_input_positional_args_are_superset(\n autologging_call_input, user_call_input\n ):\n length_diff = len(autologging_call_input) - len(user_call_input)\n assert (\n length_diff >= 0\n ), f\"{length_diff} expected inputs are missing from the call to the original function.\"\n\n def _assert_autologging_input_kwargs_are_superset(autologging_call_input, user_call_input):\n assert set(user_call_input.keys()).issubset(set(autologging_call_input.keys())), (\n \"Keyword or dictionary arguments to original function omit\"\n \" one or more expected keys: '{}'\".format(\n set(user_call_input.keys()) - set(autologging_call_input.keys())\n )\n )\n\n def _validate(autologging_call_input, user_call_input=None):\n \"\"\"\n Validates that the specified `autologging_call_input` and `user_call_input`\n are compatible. If `user_call_input` is `None`, then `autologging_call_input`\n is regarded as a new input added by autologging and is validated using\n `_validate_new_input`. Otherwise, the following properties must hold:\n\n - `autologging_call_input` and `user_call_input` must have the same type\n (referred to as \"input type\")\n - if the input type is a tuple, list or dictionary, then `autologging_call_input` must\n be equivalent to `user_call_input` or be a superset of `user_call_input`\n - for all other input types, `autologging_call_input` and `user_call_input`\n must be equivalent by reference equality or by object equality\n\n :param autologging_call_input: call input from autologging\n :param user_call_input: call input from user\n \"\"\"\n\n if user_call_input is None and autologging_call_input is not None:\n _validate_new_input(autologging_call_input)\n return\n\n assert type(autologging_call_input) == type(\n user_call_input\n ), \"Type of input to original function '{}' does not match expected type '{}'\".format(\n type(autologging_call_input), type(user_call_input)\n )\n\n if type(autologging_call_input) in [list, tuple]:\n _assert_autologging_input_positional_args_are_superset(\n autologging_call_input, user_call_input\n )\n # If the autologging call input is longer than the user call input, we `zip_longest`\n # will pad the user call input with `None` values to ensure that the subsequent calls\n # to `_validate` identify new inputs added by the autologging call\n for a, u in itertools.zip_longest(autologging_call_input, user_call_input):\n _validate(a, u)\n elif type(autologging_call_input) == dict:\n _assert_autologging_input_kwargs_are_superset(autologging_call_input, user_call_input)\n for key in autologging_call_input.keys():\n _validate(autologging_call_input[key], user_call_input.get(key, None))\n else:\n assert (\n autologging_call_input is user_call_input\n or autologging_call_input == user_call_input\n ), (\n \"Input to original function does not match expected input.\"\n f\" Original: '{autologging_call_input}'. Expected: '{user_call_input}'\"\n )\n\n # Similar validation logic found in _validate, unraveling the list of arguments to exclude\n # checks for any validation exempt positional arguments.\n _assert_autologging_input_positional_args_are_superset(autologging_call_args, user_call_args)\n for index, autologging_call_arg, user_call_arg in itertools.zip_longest(\n range(len(user_call_args)), autologging_call_args, user_call_args\n ):\n if not _is_arg_exempt_from_validation(\n autologging_integration,\n function_name,\n user_call_arg,\n argument_index=index,\n ):\n _validate(autologging_call_arg, user_call_arg)\n\n # Similar validation logic found in _validate, unraveling the dictionary of arguments to exclude\n # checks for any validation exempt keyword arguments.\n _assert_autologging_input_kwargs_are_superset(autologging_call_kwargs, user_call_kwargs)\n for key in autologging_call_kwargs.keys():\n if not _is_arg_exempt_from_validation(\n autologging_integration,\n function_name,\n user_call_kwargs.get(key, None),\n argument_name=key,\n ):\n _validate(\n autologging_call_kwargs[key],\n user_call_kwargs.get(key, None),\n )", "def __call__(self, fn):\n fn.handler = True\n fn.function = True\n\n # Circuits properties\n fn.names = self.names\n fn.priority = self.kwargs.get(\"priority\", 0)\n fn.channel = \"functions.{0}\".format(self.names[0])\n fn.override = self.kwargs.get(\"override\", False)\n fn.event = True\n\n @wraps(fn)\n def app_function_decorator(itself, event, *args, **kwargs):\n \"\"\"\n The decorated function\n\n :param itself: The function to decorate\n :type itself: resilient_circuits.ResilientComponent\n :param event: The Event with the StompFrame and the Message read off the Message Destination\n :type event: resilient_circuits.action_message.FunctionMessage\n \"\"\"\n function_inputs = event.message.get(\"inputs\", {})\n\n def _invoke_app_function(evt, **kwds):\n \"\"\"\n The code to call when a function with the decorator `@app_function(api_name)`\n is invoked.\n\n Returns result_list when function with the decorator `@app_function(api_name)` is\n finished processing.\n\n A method that has this handler should yield a StatusMessage or a FunctionResult\n - When a StatusMessage is yield'ed a StatusMessageEvent is fired with the text of the StatusMessage\n - When a FunctionResult is yield'ed it calls resilient-lib.ResultPayload.done() with the parameters of\n FunctionResult being passed to it and appends the result to result_list. E.g:\n `yield FunctionResult({\"key\":\"value\"})`\n `yield FunctionResult({\"key\": \"value\"}, success=False, reason=\"Bad call\")`\n\n :param evt: The Event with the StompFrame and the Message read off the Message Destination\n :type fn: resilient_circuits.action_message.FunctionMessage\n \"\"\"\n LOG.debug(\"Running _invoke_app_function in Thread: %s\", threading.currentThread().name)\n\n result_list = []\n\n # Validate the fn_inputs in the Message\n fn_inputs = validate_fields([], kwds)\n LOG.info(\"[%s] Validated function inputs\", evt.name)\n LOG.debug(\"[%s] fn_inputs: %s\", evt.name, fn_inputs)\n\n rp = ResultPayload(itself.PACKAGE_NAME, version=constants.APP_FUNCTION_PAYLOAD_VERSION, **fn_inputs)\n\n fn_inputs_tuple = namedtuple(\"fn_inputs\", fn_inputs.keys())(*fn_inputs.values())\n\n # Set evt.message in local thread storage\n itself.set_fn_msg(evt.message)\n\n # Invoke the actual Function\n fn_results = fn(itself, fn_inputs_tuple)\n\n for r in fn_results:\n if isinstance(r, StatusMessage):\n LOG.info(\"[%s] StatusMessage: %s\", evt.name, r)\n itself.fire(StatusMessageEvent(parent=evt, message=r.text))\n\n elif isinstance(r, FunctionResult):\n r.name = evt.name\n if not r.custom_results:\n r.value = rp.done(\n content=r.value,\n success=r.success,\n reason=r.reason)\n LOG.info(\"[%s] Returning results\", r.name)\n result_list.append(r)\n\n elif isinstance(r, Exception):\n raise r\n\n else:\n # Whatever this is, add it to the results\n LOG.debug(r)\n result_list.append(r)\n\n return result_list\n\n invoke_app_function = task(_invoke_app_function, event, **function_inputs)\n fn_result = yield itself.call(invoke_app_function, \"functionworker\")\n yield fn_result.value\n\n return app_function_decorator", "def check_implemented_functions(_class):\n mandatory_functions_to_implement = [('generate', 2), ('__init__', 6)]\n implemented_class_function_names = get_implemented_class_functions(_class)\n for function in mandatory_functions_to_implement:\n function_name = function[0]\n number_function_mandatory_params = function[1]\n # check if the method is implemented in the class\n if function_name not in implemented_class_function_names:\n logger.error(f\"Method {function_name} not implemented in class {_class.__name__}\")\n raise SystemExit(0)\n ref_function = getattr(_class, function_name)\n # check if the method is expecting the mandatory number of arguments\n if not len(inspect.getfullargspec(ref_function).args) == number_function_mandatory_params:\n logger.error(\n f\"Method {function_name} implemented in class {_class.__name__} \"\n f\"is not expecting {number_function_mandatory_params} passed arguments\")\n raise SystemExit(0)", "def test_function_definition_with_decorator(self):\n self.script(\"# script.py\\n\"\n \"def g(x):\\n\"\n \" return x\\n\"\n \"@g\\n\"\n \"def f():\\n\"\n \" 'fdoc'\\n\"\n \" pass\\n\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n function_def = self.find_code_component(name=\"f\")\n\n self.assertEqual(function_def.type, \"function_def\")\n self.assertEqual(function_def.mode, \"w\")\n self.assertEqual(function_def.first_char_line, 4)\n self.assertEqual(function_def.first_char_column, 0)\n self.assertEqual(function_def.last_char_line, 7)\n self.assertEqual(function_def.last_char_column, 8)\n self.assertEqual(function_def.container_id, script.id)\n\n function_def_block = self.metascript.code_blocks_store[function_def.id]\n self.assertEqual(function_def_block.code,\n \"@g\\n\"\n \"def f():\\n\"\n \" 'fdoc'\\n\"\n \" pass\")\n self.assertEqual(function_def_block.docstring, \"fdoc\")\n self.assertTrue(bool(function_def_block.code_hash))", "def eval_function_call(func_call, motif_node_dict):\n print(\"\\x1b[6;30;42m\" + 'Evaluating ' + func_call.name.name + ' function...' + '\\x1b[0m')\n # CamFlow \"alloc_provenance\" take two arguments but only the first is needed for modeling.\n if func_call.name.name == 'alloc_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n return provenance.alloc_provenance(arg_names[0], None)\n # CamFlow \"task_cred_xxx\" take two arguments but no argument is needed for modeling.\n elif func_call.name.name == 'task_cred_xxx':\n return provenance.task_cred_xxx(None, None)\n # CamFlow \"branch_mmap\" take two arguments but no argument is needed for modeling.\n elif func_call.name.name == 'branch_mmap':\n return provenance.branch_mmap(None, None)\n # CamFlow \"uses_two\" function takes five arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'uses_two':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses_two must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses_two must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses_two must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses_two must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.uses_two(arg_names[0], val1, val2, None, None, motif_node_dict)\n # CamFlow \"informs\" function takes five arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'informs':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in informs must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in informs must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in informs must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in informs must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.informs(arg_names[0], val1, val2, None, None, motif_node_dict)\n # CamFlow \"record_terminate\" function takes two arguments.\n elif func_call.name.name == 'record_terminate':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second arguments must be converted to MotifNode object first.\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_terminate must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_terminate must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_terminate(arg_names[0], val1, motif_node_dict)\n # CamFlow \"generates\" function takes six arguments but only the first four are needed for modeling.\n elif func_call.name.name == 'generates':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second, third, and fourth arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n arg3 = arg_names[3]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in generates must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in generates must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in generates must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in generates must have values in the dictionary.\\033[0m')\n exit(1)\n if arg3 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in generates must exist in the dictionary.\\033[0m')\n exit(1)\n val3 = getLastValueFromKey(motif_node_dict, arg3)\n if not val3:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in generates must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.generates(arg_names[0], val1, val2, val3, None, None, motif_node_dict)\n # CamFlow \"get_task_provenance\" takes no arguments.\n elif func_call.name.name == 'get_task_provenance':\n return provenance.get_task_provenance()\n # CamFlow \"get_cred_provenance\" takes no arguments.\n elif func_call.name.name == 'get_cred_provenance':\n return provenance.get_cred_provenance(motif_node_dict)\n # CamFlow \"uses\" takes six arguments but only the first four are needed for modeling.\n elif func_call.name.name == 'uses':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second, third, and fourth arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n arg3 = arg_names[3]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in uses must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in uses must have values in the dictionary.\\033[0m')\n exit(1)\n if arg3 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in uses must exist in the dictionary.\\033[0m')\n exit(1)\n val3 = getLastValueFromKey(motif_node_dict, arg3)\n if not val3:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in uses must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.uses(arg_names[0], val1, val2, val3, None, None, motif_node_dict)\n # CamFlow \"refresh_inode_provenance\" takes two arguments but only the second one is needed for modeling.\n elif func_call.name.name == 'refresh_inode_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second argument must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in refresh_inode_provenance must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in refresh_inode_provenance must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.refresh_inode_provenance(None, val1, motif_node_dict)\n # CamFlow \"get_inode_provenance\" takes two arguments but only the second argument is needed for modeling.\n elif func_call.name.name == 'get_inode_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n if arg_names[1] == 'false':\n arg1 = False\n elif arg_names[1] == 'true':\n arg1 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg_names[1] + ' in get_inode_provenance is unknown.\\033[0m')\n exit(1)\n return provenance.get_inode_provenance(None, arg1, motif_node_dict)\n # CamFlow \"get_dentry_provenance\" takes two arguments but only the second argument is needed for modeling. \n elif func_call.name.name == 'get_dentry_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n if arg_names[1] == 'false':\n arg1 = False\n elif arg_names[1] == 'true':\n arg1 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg_names[1] + ' in get_dentry_provenance is unknown.\\033[0m')\n exit(1)\n return provenance.get_dentry_provenance(None, arg1, motif_node_dict)\n # CamFlow \"record_inode_name_from_dentry\" takes three arguments, but only the second and the third arguments are needed for modeling.\n elif func_call.name.name == 'record_inode_name_from_dentry':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second argument must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_inode_name_from_dentry must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_inode_name_from_dentry must have values in the dictionary.\\033[0m')\n exit(1)\n if arg_names[2] == 'false':\n arg2 = False\n elif arg_names[2] == 'true':\n arg2 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_inode_name_from_dentry is unknown.\\033[0m')\n exit(1)\n return provenance.record_inode_name_from_dentry(None, val1, arg2, motif_node_dict)\n # CamFlow \"record_node_name\" takes three arguments, but only the first and the third arguments are needed for modeling.\n elif func_call.name.name == 'record_node_name':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second argument must be converted to MotifNode objects first.\n arg0 = arg_names[0]\n if arg0 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_node_name must exist in the dictionary.\\033[0m')\n exit(1)\n val0 = getLastValueFromKey(motif_node_dict, arg0)\n if not val0:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_node_name must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_node_name(val0, None, arg_names[2], motif_node_dict)\n # CamFlow \"derives\" function takes five arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'derives':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in derives must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in derives must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in derives must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in derives must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.derives(arg_names[0], val1, val2, None, None, motif_node_dict)\n # CamFlow \"record_write_xattr\" function takes eight arguments but only the first four are needed for modeling.\n elif func_call.name.name == 'record_write_xattr':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second, third, and fourth arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n arg3 = arg_names[3]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_write_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_write_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_write_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_write_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg3 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in record_write_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val3 = getLastValueFromKey(motif_node_dict, arg3)\n if not val3:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg3 + ' in record_write_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_write_xattr(arg_names[0], val1, val2, val3, None, None, None, None, motif_node_dict)\n # CamFlow \"record_read_xattr\" function takes four arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'record_read_xattr':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg0 = arg_names[0]\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg0 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_read_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val0 = getLastValueFromKey(motif_node_dict, arg0)\n if not val0:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_read_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_read_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_read_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_read_xattr must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_read_xattr must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_read_xattr(val0, val1, val2, None, motif_node_dict)\n # CamFlow \"get_file_provenance\" takes two arguments but only the second argument is needed for modeling. \n elif func_call.name.name == 'get_file_provenance':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n if arg_names[1] == 'false':\n arg1 = False\n elif arg_names[1] == 'true':\n arg1 = True\n else:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg_names[1] + ' in get_file_provenance is unknown.\\033[0m')\n exit(1)\n return provenance.get_file_provenance(None, arg1, motif_node_dict)\n # CamFlow \"influences_kernel\" function takes four arguments but only the first three are needed for modeling.\n elif func_call.name.name == 'influences_kernel':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n # The second and third arguments must be converted to MotifNode objects first.\n arg1 = arg_names[1]\n arg2 = arg_names[2]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in influences_kernel must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in influences_kernel must have values in the dictionary.\\033[0m')\n exit(1)\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in influences_kernel must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in influences_kernel must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.influences_kernel(arg_names[0], val1, val2, None, motif_node_dict)\n # CamFlow \"get_socket_inode_provenance\" takes one argument but it is not needed for modeling.\n elif func_call.name.name == 'get_socket_provenance':\n return provenance.get_socket_provenance(None, motif_node_dict)\n # CamFlow \"get_socket_inode_provenance\" takes one argument but it is not needed for modeling. \n elif func_call.name.name == 'get_socket_inode_provenance':\n return provenance.get_socket_inode_provenance(None, motif_node_dict)\n # CamFlow \"record_address\" takes three arguments but only the last argument is needed for modeling. \n elif func_call.name.name == 'record_address':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg2 = arg_names[2]\n if arg2 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_address must exist in the dictionary.\\033[0m')\n exit(1)\n val2 = getLastValueFromKey(motif_node_dict, arg2)\n if not val2:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg2 + ' in record_address must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_address(None, None, val2, motif_node_dict)\n # CamFlow \"get_sk_inode_provenance\" takes one argument but it is not needed for modeling. \n elif func_call.name.name == 'get_sk_inode_provenance':\n return provenance.get_sk_inode_provenance(None, motif_node_dict)\n # CamFlow \"get_sk_provenance\" takes one argument but it is not needed for modeling.\n elif func_call.name.name == 'get_sk_provenance':\n return provenance.get_sk_provenance(None, motif_node_dict)\n # CamFlow \"record_packet_content\" takes two arguments but only the second argument is needed for modeling. \n elif func_call.name.name == 'record_packet_content':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg1 = arg_names[1]\n if arg1 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_packet_content must exist in the dictionary.\\033[0m')\n exit(1)\n val1 = getLastValueFromKey(motif_node_dict, arg1)\n if not val1:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg1 + ' in record_packet_content must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_packet_content(None, val1, motif_node_dict)\n # CamFlow \"record_args\" takes two arguments but only the first argument is needed for modeling.\n elif func_call.name.name == 'record_args':\n args = func_call.args.exprs\n arg_names = get_arg_name(args)\n arg0 = arg_names[0]\n if arg0 not in motif_node_dict:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_args must exist in the dictionary.\\033[0m')\n exit(1)\n val0 = getLastValueFromKey(motif_node_dict, arg0)\n if not val0:\n print('\\33[101m' + '[error][eval_function_call]: ' + arg0 + ' in record_args must have values in the dictionary.\\033[0m')\n exit(1)\n return provenance.record_args(val0, None, motif_node_dict)\n else:\n return None, None", "def accepts(*types):\n\n def check_accepts(f):\n \"\"\"Check the types.\"\"\"\n spec = tf_inspect.getargspec(f)\n\n num_function_arguments = len(spec.args)\n if len(types) != num_function_arguments:\n raise Error(\n \"Function %r has %d arguments but only %d types were provided in the \"\n \"annotation.\" % (f, num_function_arguments, len(types)))\n\n if spec.defaults:\n num_defaults = len(spec.defaults)\n for (name, a, t) in zip(spec.args[-num_defaults:],\n spec.defaults,\n types[-num_defaults:]):\n allowed_type = _replace_forward_references(t, f.__globals__)\n if not isinstance(a, allowed_type):\n raise Error(\"default argument value %r of type %r is not an instance \"\n \"of the allowed type %s for the %s argument to %r\"\n % (a, type(a), _type_repr(allowed_type), name, f))\n\n @functools.wraps(f)\n def new_f(*args, **kwds):\n \"\"\"A helper function.\"\"\"\n for (a, t) in zip(args, types):\n allowed_type = _replace_forward_references(t, f.__globals__)\n if not isinstance(a, allowed_type):\n raise Error(\"%r of type %r is not an instance of the allowed type %s \"\n \"for %r\" % (a, type(a), _type_repr(allowed_type), f))\n return f(*args, **kwds)\n\n return new_f\n\n return check_accepts" ]
[ "0.6134093", "0.5936042", "0.57259756", "0.56994855", "0.5666439", "0.5570822", "0.5568335", "0.55557805", "0.5536746", "0.55267", "0.5503715", "0.55015457", "0.54883873", "0.5477766", "0.5459286", "0.54585314", "0.5451225", "0.543271", "0.54145896", "0.5368069", "0.53297734", "0.5303614", "0.53034496", "0.5297371", "0.52900183", "0.52811486", "0.52656263", "0.5252748", "0.5241029", "0.5239762" ]
0.7230727
0
Two type for for loop are supported. Either; 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in' 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop
def _For(self, t): # if message loop then process differently if isinstance(t.iter, ast.Name): if t.iter.id == self._input_message_var: self.dispatchMessageLoop(t) else: self.RaiseError(t, "Range based for loops only support message iteration using 'message_in' iterator") # do not support for else elif t.orelse: self.RaiseError(t, "For else not supported") # allow calls but only to range function elif isinstance(t.iter, ast.Call): # simple function call e.g. message_in() or range() if isinstance(t.iter.func, ast.Name): # catch case of message_input with arguments (e.g. spatial messaging) if t.iter.func.id == self._input_message_var: self.dispatchMessageLoop(t) # otherwise permit only range based for loops elif t.iter.func.id == "range": # switch on different uses of range based on number of arguments if len(t.iter.args) == 1: self.fill(f"for (int ") self.dispatch(t.target) self.write("=0;") self.dispatch(t.target) self.write("<") self.dispatch(t.iter.args[0]) self.write(";") self.dispatch(t.target) self.write("++)") elif len(t.iter.args) == 2: self.fill(f"for (int ") self.dispatch(t.target) self.write("=") self.dispatch(t.iter.args[0]) self.write(";") self.dispatch(t.target) self.write("<") self.dispatch(t.iter.args[1]) self.write(";") self.dispatch(t.target) self.write("++)") elif len(t.iter.args) == 3: self.fill(f"for (int ") self.dispatch(t.target) self.write("=") self.dispatch(t.iter.args[0]) self.write(";") self.dispatch(t.target) self.write("<") self.dispatch(t.iter.args[1]) self.write(";") self.dispatch(t.target) self.write("+=") self.dispatch(t.iter.args[2]) self.write(")") else: self.RaiseError(t, "Range based for loops requires use of 'range' function with arguments and not keywords") self.enter() self.dispatch(t.body) self.leave() else: self.RaiseError(t, "Range based for loops only support calls to the 'range' function") # member function call can only be on message_in.func() type call. elif isinstance(t.iter.func, ast.Attribute): # must be an attribute (e.g. calling a member of message_in) if t.iter.func.value.id == self._input_message_var: self.dispatchMessageLoop(t) else: self.RaiseError(t, "Range based for loops only support calling members of message input variable") else: self.RaiseError(t, "Range based for loops only support message iteration or use of 'range'") else: self.RaiseError(t, "Range based for loops only support message iteration or use of 'range'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dispatchMessageLoop(self, tree):\n self.fill(\"for (const auto& \")\n self.dispatch(tree.target)\n self.write(\" : \")\n # if simple message iterator\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t, f\"Message input loop requires use of '{self._input_message_var}' as iterator.\")\n # write with prefix\n self.write(f\"FLAMEGPU->{self._input_message_var}\")\n # if it is a call then handle the different cases\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n #otherwise not supported\n else :\n self.RaiseError(tree, f\"Message input loop iterator in unsupported format\")\n self.write(\")\")\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None", "def dispatchMessageIteratorCall(self, tree):\n # simple case not a member function just an iterator with arguments\n if isinstance(tree.func, ast.Name):\n self.write(f\"FLAMEGPU->{tree.func.id}\")\n if isinstance(tree.func, ast.Attribute) :\n if isinstance(tree.func.value, ast.Name):\n # check that the iterator is supported\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree, f\"Message input loop iterator '{tree.func.attr}' is not supported.\")\n self.write(f\"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}\")\n else:\n self.RaiseError(tree, \"Message input loop iterator format incorrect.\")\n\n # handle function arguments \n self.write(\"(\")\n self._CallArguments(tree)\n self.write(\")\")", "def multiple_eval_for_loops_v2():", "def multiple_eval_for_loops_v1():", "def convert_for(self, variable, start, stop, step, array):\n\n # Run super definition\n variable, start, stop, step, array = super().convert_for(\n variable, start, stop, step, array\n )\n\n # Remove data type from variable(duck typing in Python)\n variable = variable.split(\" \")[-1]\n\n # Create for template\n for_template = \"for {} in {}:\"\n\n # Define loop condition\n if array:\n # If array if given, loop through array\n loop_cond = array\n\n # Check if array slicing is required\n if step != \"1\" or stop != \"Array.length\" or start != \"0\":\n\n # Make template for array slicing\n loop_cond = \"{}[{{}}]\".format(array)\n\n if start == \"0\":\n start = \"\"\n\n if stop == \"Array.length\":\n stop = \"\"\n\n if step == \"1\":\n step = \"\"\n\n # If step is default, omit step\n if not step:\n\n # Else add start to range call\n loop_cond = loop_cond.format(start + \":\" + stop)\n else:\n # Add all three parameters if step is provided\n loop_cond = loop_cond.format(start + \":\" + stop + \":\" + step)\n\n else:\n # Else make range template\n loop_cond = \"range({})\"\n\n # If step if default, omit step\n if step == \"1\":\n\n # If start is default, omit start\n if start == \"0\":\n loop_cond = loop_cond.format(stop)\n\n else:\n # Else add start to range call\n loop_cond = loop_cond.format(start + \", \" + stop)\n else:\n # Add all three parameters if step is provided\n loop_cond = loop_cond.format(start + \", \" + stop + \", \" + step)\n\n # Return converted for statement\n return [for_template.format(variable, loop_cond)], []", "def LoopBody(i, *input_arrays):\n # Outfeed ops execute on each JF node, so they must be located on the\n # nodes.\n outfeed_devices = []\n device_assignment = py_utils.GetTpuDeviceAssignment()\n assert device_assignment\n for replica in range(device_assignment.num_replicas):\n num_cores_per_replica = 1 if self.spmd else (\n device_assignment.num_cores_per_replica)\n for core in range(num_cores_per_replica):\n with tf.device(device_assignment.host_device(replica, core)):\n outfeed_devices.append(\n tpu_ops.outfeed_dequeue_tuple(\n tensor_types,\n tensor_shapes,\n device_ordinal=device_assignment.tpu_ordinal(replica,\n core)))\n offset = i * num_devices\n output_arrays = list(input_arrays)\n # Each output_array holds a different per-example tensor. We get results\n # for each tensor from each TPU for each TpuTrainStep call.\n for j in range(len(output_arrays)):\n for k in range(len(outfeed_devices)):\n output_arrays[j] = output_arrays[j].write(offset + k,\n outfeed_devices[k][j])\n\n return tuple([i + 1] + output_arrays)", "def multi_c_enumerate():\n for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)))):\n print(a,b,c,d)", "def visit_for(self: Parser, node: doc.For) -> None:\n for_frame = self.eval_expr(node.iter)\n if not isinstance(for_frame, T.frame.ForFrame):\n self.report_error(\n node.iter,\n \"Expect the for loop to be one of the following: \"\n \"range, T.serial, T.grid, T.parallel, T.vectorized, T.unroll, T.thread_binding\",\n )\n with self.var_table.with_frame():\n with for_frame as iters:\n self.eval_assign(target=node.target, source=iters, bind_value=bind_for_value)\n self.visit_body(node.body)", "def for_loop(num_iters, body, initial_args):\n for i in range(num_iters):\n if i == 0:\n outputs = body(*initial_args)\n else:\n outputs = body(*outputs)\n return outputs", "def makeloop(keyword, G, *args):\n if not args:\n return []\n Nargs = len(args)\n lis = []\n for arg in args:\n lis.append(makeiter(G(\"%s%s\" % (keyword, arg))))\n try:\n Nlis = lis[0].count()\n except TypeError:\n Nlis = len(lis[0])\n olist = [[] for i in range(Nargs)]\n for i in range(Nlis):\n for k in range(Nargs):\n try:\n olist[k].append(lis[k][i])\n except Exception:\n olist[k].append(\"\")\n return olist", "def test_element_loop_1cell():\n\n # Mesh object\n mesh = MeshMat()\n mesh.cell[\"triangle3\"] = CellMat(nb_node_per_cell=3)\n mesh.node = NodeMat()\n\n mesh.node.add_node(np.array([0, 0]))\n mesh.node.add_node(np.array([1, 0]))\n mesh.node.add_node(np.array([0, 1]))\n\n nodes_test = np.array([0, 1, 2])\n mesh.add_cell(nodes_test, \"triangle3\")\n\n indice = [0]\n\n # Physical quantities\n dim = 2\n Nt_tot = 1\n\n mu = 1\n Be = np.array([[[mu / 2, 0]]])\n He = np.array([[[-1 / 2, 0]]])\n mue = np.array([[mu]])\n\n Me = np.reshape(Be / mue - He, (dim, 1, Nt_tot))\n\n alphaij = [[1, 0, 0], [1, 0, 0]]\n\n alpha1 = 1\n alpha2 = 1\n\n # Computation\n tensor = ForceTensor()\n\n f, connect = tensor.element_loop(mesh, Be, He, mue, indice, dim, Nt_tot, alphaij)\n\n f1_analytic = 1 / 2 * mu * np.array([alpha1 + alpha2, alpha2])\n f2_analytic = 1 / 2 * mu * np.array([-(alpha1 + alpha2), 0])\n f3_analytic = 1 / 2 * mu * np.array([0, -alpha2])\n\n assert (f[0, :, 0] == f1_analytic).all()\n assert (f[1, :, 0] == f2_analytic).all()\n assert (f[2, :, 0] == f3_analytic).all()\n\n print(\"test_element_loop succeeded\")\n\n return True", "def _loop_raw(raw):\n raise NotImplemented", "def syntax_for():\n for i in range(5):\n print(i)\n\n ## Output\n # 0\n # 1\n # 2\n # 3\n # 4\n\n ## Notes\n # range()\n # Rather than being a function, range is actually an immutable sequence type.\n # The range type represents an immutable sequence of numbers and is commonly used for\n # looping a specific number of times in for loops.", "def test_39_for(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\tfor a:=1+1.5 to -2 do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: For(Id(a)BinaryOp(+,IntLiteral(1),FloatLiteral(1.5)),UnaryOp(-,IntLiteral(2)),True,[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,439))", "def go_c_enumerate_step():\n for i,k in enumerate(range(1,7,2)):\n print(i, k)", "def visit_for(self, flags, scope, token, parent):\r\n\r\n arglist, body = token.children\r\n\r\n if body.type != Token.T_BLOCK:\r\n # the parser should be run in python mode\r\n raise TransformError(body, \"expected block in for loop body\")\r\n\r\n # the extra block scope, and finalize, allows for declaring\r\n # variables inside of a for arg list\r\n scope.pushBlockScope(\"loop\")\r\n self._push_finalize(scope, token, parent)\r\n\r\n self._push_children(scope, body, flags)\r\n self._push_children(scope, arglist, flags)", "def vectorized_loops(self, data):\n\n # TODO: finish this.\n return np.add(np.multiply(data,data), data)", "def test_for_loop(self, modes, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"for int m in {}\\n\\tMeasureFock() | m\".format(modes)\n )\n assert np.all(\n bb._forvar[\"m\"] == np.array(modes)\n )\n assert bb.operations == [\n {'op': 'MeasureFock', 'args': [], 'kwargs': {}, 'modes': [modes[0]]},\n {'op': 'MeasureFock', 'args': [], 'kwargs': {}, 'modes': [modes[1]]},\n {'op': 'MeasureFock', 'args': [], 'kwargs': {}, 'modes': [modes[2]]}\n ]", "def go_c_enumerate():\n for i,k in enumerate(range(1,5)):\n print(i, k)", "def iteration(self) -> global___Statement.Iteration:", "def iteration(self) -> global___Statement.Iteration:", "def internal_loop(p, i, j, k, l):\n return _RNAstructure_wrap.internal_loop(p, i, j, k, l)", "def _analyse_stmt_For(self, statement: ast.For, *, next: CFNode) -> CFNode:\n return self._analyse_loop(statement, next=next)", "def multibranch_loop(i, j):\n return _RNAstructure_wrap.multibranch_loop(i, j)", "def _batch_iter(self, source, target, i: int):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n # the result and loss\n result = self.model(source)\n loss = self.criterion(result, target)\n\n # optimization and backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # update the loss\n self.epoch_loss.update(loss.item(), source.size(0))\n\n # print the information\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Batch: { i } | loss: { self.epoch_loss.avg }\", end=\"\")\n\n # clean the data\n del source, target\n\n return result", "def enumerate_(start = 0):\n\n @filters\n def _dagpype_internal_fn_act(target):\n count = start \n try:\n while True:\n e = (yield)\n target.send((numpy.arange(count, count + len(e)), e))\n count += len(e)\n except GeneratorExit: \n target.close() \n\n return _dagpype_internal_fn_act", "def coforeach(function, iterator):\n return _CoFunCaller(function=function).coiterate(iterator)", "def iterate_inputs(function, type_to_vars):\n if isinstance(function.input_type, tuple):\n input_types = list(function.input_type)\n else:\n input_types = [function.input_type]\n\n argslists = []\n for input_type in input_types:\n argslists.append(type_to_vars[input_type])\n for args in itertools.product(*argslists):\n yield args", "def emit_unpack_instruction(self, *, loop_indices=None):", "def test_make_cpp_parfor():\n\n class STDVectorThreads(LocalObject):\n\n dtype = type('std::vector<std::thread>', (c_void_p,), {})\n\n def __init__(self):\n super().__init__('threads')\n\n class STDThread(LocalObject):\n\n dtype = type('std::thread&', (c_void_p,), {})\n\n class FunctionType(LocalObject):\n\n dtype = type('FuncType&&', (c_void_p,), {})\n\n # Basic symbols\n nthreads = Symbol(name='nthreads', is_const=True)\n threshold = Symbol(name='threshold', is_const=True)\n last = Symbol(name='last', is_const=True)\n first = Symbol(name='first', is_const=True)\n portion = Symbol(name='portion', is_const=True)\n\n # Composite symbols\n threads = STDVectorThreads()\n\n # Iteration helper symbols\n begin = Symbol(name='begin')\n l = Symbol(name='l')\n end = Symbol(name='end')\n\n # Functions\n stdmax = sympy.Function('std::max')\n\n # Construct the parallel-for body\n func = FunctionType('func')\n i = Dimension(name='i')\n threadobj = Call('std::thread', Lambda(\n Iteration(Call(func.name, i), i, (begin, end-1, 1)),\n ['=', Byref(func.name)],\n ))\n threadpush = Call(FieldFromComposite('push_back', threads), threadobj)\n it = Dimension(name='it')\n iteration = Iteration([\n DummyExpr(begin, it, init=True),\n DummyExpr(l, it + portion, init=True),\n DummyExpr(end, InlineIf(l > last, last, l), init=True),\n threadpush\n ], it, (first, last, portion))\n thread = STDThread('x')\n waitcall = Call('std::for_each', [\n Call(FieldFromComposite('begin', threads)),\n Call(FieldFromComposite('end', threads)),\n Lambda(Call(FieldFromComposite('join', thread.name)), [], [thread])\n ])\n body = [\n DummyExpr(threshold, 1, init=True),\n DummyExpr(portion, stdmax(threshold, (last - first) / nthreads), init=True),\n Call(FieldFromComposite('reserve', threads), nthreads),\n iteration,\n waitcall\n ]\n\n parfor = ElementalFunction('parallel_for', body, 'void',\n [first, last, func, nthreads])\n\n assert str(parfor) == \"\"\"\\\nstatic inline \\\nvoid parallel_for(const int first, const int last, FuncType&& func, const int nthreads)\n{\n const int threshold = 1;\n const int portion = std::max(threshold, (-first + last)/nthreads);\n threads.reserve(nthreads);\n for (int it = first; it <= last; it += portion)\n {\n int begin = it;\n int l = it + portion;\n int end = (l > last) ? last : l;\n threads.push_back(std::thread([=, &func]()\n {\n for (int i = begin; i <= end - 1; i += 1)\n {\n func(i);\n }\n }));\n }\n std::for_each(threads.begin(),threads.end(),[](std::thread& x)\n {\n x.join();\n });\n}\"\"\"" ]
[ "0.6300082", "0.5951897", "0.5818866", "0.57553744", "0.55254656", "0.55113477", "0.5369612", "0.5264706", "0.51863104", "0.5155934", "0.5096092", "0.50791895", "0.5068876", "0.5014519", "0.49778667", "0.49353746", "0.48620737", "0.48602274", "0.48535544", "0.48398575", "0.48398575", "0.48390695", "0.48140576", "0.47989768", "0.47923055", "0.47620964", "0.4760266", "0.47585553", "0.47537646", "0.4742317" ]
0.6590012
0
A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function. Attributes supported are only; pyflamegpu.attribute a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member. math.constant Any supported math constants are translated to C definition versions
def _Attribute(self,t): # Only a limited set of globals supported func_dict = None # pyflamegpu singleton if isinstance(t.value, ast.Name): if t.value.id == "pyflamegpu": if t.attr in self.fgpu_attrs: # proceed self.write("flamegpu::") self.write(t.attr) else: self.RaiseError(t, f"Attribute '{t.attr}' does not exist in pyflamegpu object") # math functions (try them in raw function call format) or constants elif t.value.id == "math": if t.attr in self.mathconsts: self.write(self.mathconsts[t.attr]) else: self.RaiseError(t, f"Unsupported math constant '{t.attr}'") # numpy types elif t.value.id == "numpy" or t.value.id == "np": # not sure how a numpy attribute would be used without function call or type hint but translate anyway if t.attr in self.numpytypes: self.write(self.numpytypes[t.attr]) else: self.RaiseError(t, f"Unsupported numpy type {t.attr}") else: self.RaiseError(t, f"Global '{t.value.id}' identifiers not supported") else: self.RaiseError(t, "Unsupported attribute")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])", "def check_common_attrs(self, ast):\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n ntypemap = ast.typemap\n is_ptr = declarator.is_indirect()\n\n # api\n api = attrs[\"api\"]\n if api is None:\n pass\n elif api not in [\"capi\", \"buf\", \"cfi\"]:\n raise RuntimeError(\n \"'api' attribute must 'capi', 'buf', or 'cfi'\"\n )\n else:\n meta[\"api\"] = api\n\n # dimension\n dimension = attrs[\"dimension\"]\n rank = attrs[\"rank\"]\n if rank:\n if rank is True:\n raise RuntimeError(\n \"'rank' attribute must have an integer value\"\n )\n try:\n attrs[\"rank\"] = int(attrs[\"rank\"])\n except ValueError:\n raise RuntimeError(\n \"'rank' attribute must have an integer value, not '{}'\"\n .format(attrs[\"rank\"])\n )\n if attrs[\"rank\"] > 7:\n raise RuntimeError(\n \"'rank' attribute must be 0-7, not '{}'\"\n .format(attrs[\"rank\"])\n )\n if not is_ptr:\n raise RuntimeError(\n \"rank attribute can only be \"\n \"used on pointer and references\"\n )\n if dimension:\n if dimension is True:\n raise RuntimeError(\n \"dimension attribute must have a value.\"\n )\n if attrs[\"value\"]:\n raise RuntimeError(\n \"argument may not have 'value' and 'dimension' attribute.\"\n )\n if rank:\n raise RuntimeError(\n \"argument may not have 'rank' and 'dimension' attribute.\"\n )\n if not is_ptr:\n raise RuntimeError(\n \"dimension attribute can only be \"\n \"used on pointer and references\"\n )\n elif ntypemap:\n if ntypemap.base == \"vector\":\n # default to 1-d assumed shape\n attrs[\"rank\"] = 1\n elif ntypemap.name == 'char' and is_ptr == 2:\n # 'char **' -> CHARACTER(*) s(:)\n attrs[\"rank\"] = 1\n\n owner = attrs[\"owner\"]\n if owner is not None:\n if owner not in [\"caller\", \"library\"]:\n raise RuntimeError(\n \"Illegal value '{}' for owner attribute. \"\n \"Must be 'caller' or 'library'.\".format(owner)\n )\n\n free_pattern = attrs[\"free_pattern\"]\n if free_pattern is not None:\n if free_pattern not in self.newlibrary.patterns:\n raise RuntimeError(\n \"Illegal value '{}' for free_pattern attribute. \"\n \"Must be defined in patterns section.\".format(free_pattern)\n )", "def UseAttribute(self) -> bool:", "def test_attrs():\n assert hasattr(constants.Planck_constant, \"value\")\n assert hasattr(constants.Planck_constant, \"units\")\n assert hasattr(constants.Planck_constant, \"name\")\n assert hasattr(constants.Planck_constant, \"error\")", "def addAttr(*args, attributeType: Union[AnyStr, bool]=\"\", binaryTag: Union[AnyStr, bool]=\"\",\n cachedInternally: bool=True, category: Union[AnyStr, List[AnyStr], bool]=\"\",\n dataType: Union[AnyStr, List[AnyStr], bool]=\"\", defaultValue: Union[float,\n bool]=0.0, disconnectBehaviour: Union[int, bool]=0, enumName: Union[AnyStr,\n bool]=\"\", exists: bool=True, fromPlugin: bool=True, hasMaxValue: bool=True,\n hasMinValue: bool=True, hasSoftMaxValue: bool=True, hasSoftMinValue: bool=True,\n hidden: bool=True, indexMatters: bool=True, internalSet: bool=True, keyable:\n bool=True, longName: Union[AnyStr, bool]=\"\", maxValue: Union[float, bool]=0.0,\n minValue: Union[float, bool]=0.0, multi: bool=True, niceName: Union[AnyStr,\n bool]=\"\", numberOfChildren: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n proxy: Union[AnyStr, bool]=\"\", readable: bool=True, shortName: Union[AnyStr,\n bool]=\"\", softMaxValue: Union[float, bool]=0.0, softMinValue: Union[float,\n bool]=0.0, storable: bool=True, usedAsColor: bool=True, usedAsFilename: bool=True,\n usedAsProxy: bool=True, writable: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def attributeInfo(*args, allAttributes: bool=True, bool: bool=True, enumerated: bool=True,\n hidden: bool=True, inherited: bool=True, internal: bool=True, leaf: bool=True,\n logicalAnd: bool=True, multi: bool=True, short: bool=True, userInterface:\n bool=True, writable: bool=True, type: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def _parse_attributes(self, attributes, node):\n for attr in attributes:\n if attr.value.ByteSize() > self.MAX_NODE_ATTRIBUTE_VALUE_BYTES:\n message = f\"The attribute value of node({node.name}) \" \\\n f\"is over {self.MAX_NODE_ATTRIBUTE_VALUE_BYTES} Bytes, will ignore.\"\n logger.warning(message)\n continue\n if attr.name in ('input_is_dynamic_shape', 'output_is_dynamic_shape') and not \\\n node.is_dynamic_shape_node and attr.value.bool_val:\n node.is_dynamic_shape_node = True\n node.add_attr({attr.name: str(attr.value)})", "def attributeQuery(*args, affectsAppearance: bool=True, affectsWorldspace: bool=True,\n attributeType: bool=True, cachedInternally: bool=True, categories: bool=True,\n channelBox: bool=True, connectable: bool=True, enum: bool=True, exists:\n bool=True, hidden: bool=True, indeterminant: bool=True, indexMatters:\n bool=True, internal: bool=True, internalGet: bool=True, internalSet:\n bool=True, keyable: bool=True, listChildren: bool=True, listDefault:\n bool=True, listEnum: bool=True, listParent: bool=True, listSiblings:\n bool=True, longName: bool=True, maxExists: bool=True, maximum: bool=True,\n message: bool=True, minExists: bool=True, minimum: bool=True, multi:\n bool=True, niceName: bool=True, node: name=None, numberOfChildren: bool=True,\n range: bool=True, rangeExists: bool=True, readable: bool=True, renderSource:\n bool=True, shortName: bool=True, softMax: bool=True, softMaxExists:\n bool=True, softMin: bool=True, softMinExists: bool=True, softRange:\n bool=True, softRangeExists: bool=True, storable: bool=True, type: AnyStr=\"\",\n typeExact: AnyStr=\"\", usedAsColor: bool=True, usedAsFilename: bool=True,\n usesMultiBuilder: bool=True, worldspace: bool=True, writable: bool=True,\n **kwargs)->List[float]:\n pass", "def visit_AttributeDeclaration(self, node):\n extend_ops = self.extend_ops\n\n # Load the method that's going to be called and the\n # name of the attribute being declared.\n extend_ops([\n (LOAD_FAST, self.curr_name()),\n (LOAD_ATTR, 'add_attribute'),\n (LOAD_CONST, node.name),\n ])\n\n # Generate the ops to the load the type (if one was given),\n # and the call the add_attribute method\n node_type = node.type\n if node_type is not None:\n type_code = compile(node_type.py_ast, self.filename, mode='eval')\n extend_ops([\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, type_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])\n else:\n extend_ops([\n (LOAD_CONST, 'is_event'),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0101),\n (POP_TOP, None),\n ])\n\n # Visit the default attribute binding if one exists.\n default = node.default\n if default is not None:\n self.visit(node.default)", "def visit_AttributeDeclaration(self, node):\n attr_type = node.type or 'object'\n self.code_ops.extend([\n (SetLineno, node.lineno),\n (DUP_TOP, None), # cls._add_user_attribute(name, type, is_event)\n (LOAD_CONST, node.name),\n (LOAD_NAME, attr_type),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])", "def attributes(c):\n global cfg # pylint: disable=global-variable-not-assigned\n if int(c['xp01']) >= cfg['card']['xp_limit']:\n return 'evolve'\n else:\n return 'level'", "def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def is_attribute(tag, kmip_version=None):\n kmip_1_0_attribute_tags = [\n Tags.UNIQUE_IDENTIFIER,\n Tags.NAME,\n Tags.OBJECT_TYPE,\n Tags.CRYPTOGRAPHIC_ALGORITHM,\n Tags.CRYPTOGRAPHIC_LENGTH,\n Tags.CRYPTOGRAPHIC_PARAMETERS,\n Tags.CRYPTOGRAPHIC_DOMAIN_PARAMETERS,\n Tags.CERTIFICATE_TYPE,\n Tags.CERTIFICATE_IDENTIFIER,\n Tags.CERTIFICATE_SUBJECT,\n Tags.CERTIFICATE_ISSUER,\n Tags.DIGEST,\n Tags.OPERATION_POLICY_NAME,\n Tags.CRYPTOGRAPHIC_USAGE_MASK,\n Tags.LEASE_TIME,\n Tags.USAGE_LIMITS,\n Tags.STATE,\n Tags.INITIAL_DATE,\n Tags.ACTIVATION_DATE,\n Tags.PROCESS_START_DATE,\n Tags.PROTECT_STOP_DATE,\n Tags.DEACTIVATION_DATE,\n Tags.DESTROY_DATE,\n Tags.COMPROMISE_OCCURRENCE_DATE,\n Tags.COMPROMISE_DATE,\n Tags.REVOCATION_REASON,\n Tags.ARCHIVE_DATE,\n Tags.OBJECT_GROUP,\n Tags.LINK,\n Tags.APPLICATION_SPECIFIC_INFORMATION,\n Tags.CONTACT_INFORMATION,\n Tags.LAST_CHANGE_DATE,\n Tags.CUSTOM_ATTRIBUTE\n ]\n kmip_1_1_attribute_tags = copy.deepcopy(kmip_1_0_attribute_tags) + [\n Tags.CERTIFICATE_LENGTH,\n Tags.X_509_CERTIFICATE_IDENTIFIER,\n Tags.X_509_CERTIFICATE_SUBJECT,\n Tags.X_509_CERTIFICATE_ISSUER,\n Tags.DIGITAL_SIGNATURE_ALGORITHM,\n Tags.FRESH\n ]\n kmip_1_2_attribute_tags = copy.deepcopy(kmip_1_1_attribute_tags) + [\n Tags.ALTERNATIVE_NAME,\n Tags.KEY_VALUE_PRESENT,\n Tags.KEY_VALUE_LOCATION,\n Tags.ORIGINAL_CREATION_DATE\n ]\n kmip_1_3_attribute_tags = copy.deepcopy(kmip_1_2_attribute_tags) + [\n Tags.RANDOM_NUMBER_GENERATOR\n ]\n kmip_1_4_attribute_tags = copy.deepcopy(kmip_1_3_attribute_tags) + [\n Tags.PKCS12_FRIENDLY_NAME,\n Tags.DESCRIPTION,\n Tags.COMMENT,\n Tags.SENSITIVE,\n Tags.ALWAYS_SENSITIVE,\n Tags.EXTRACTABLE,\n Tags.NEVER_EXTRACTABLE\n ]\n kmip_2_0_attribute_tags = copy.deepcopy(kmip_1_4_attribute_tags) + [\n Tags.CERTIFICATE_SUBJECT_CN,\n Tags.CERTIFICATE_SUBJECT_O,\n Tags.CERTIFICATE_SUBJECT_OU,\n Tags.CERTIFICATE_SUBJECT_EMAIL,\n Tags.CERTIFICATE_SUBJECT_C,\n Tags.CERTIFICATE_SUBJECT_ST,\n Tags.CERTIFICATE_SUBJECT_L,\n Tags.CERTIFICATE_SUBJECT_UID,\n Tags.CERTIFICATE_SUBJECT_SERIAL_NUMBER,\n Tags.CERTIFICATE_SUBJECT_TITLE,\n Tags.CERTIFICATE_SUBJECT_DC,\n Tags.CERTIFICATE_SUBJECT_DN_QUALIFIER,\n Tags.CERTIFICATE_ISSUER_CN,\n Tags.CERTIFICATE_ISSUER_O,\n Tags.CERTIFICATE_ISSUER_OU,\n Tags.CERTIFICATE_ISSUER_EMAIL,\n Tags.CERTIFICATE_ISSUER_C,\n Tags.CERTIFICATE_ISSUER_ST,\n Tags.CERTIFICATE_ISSUER_L,\n Tags.CERTIFICATE_ISSUER_UID,\n Tags.CERTIFICATE_ISSUER_SERIAL_NUMBER,\n Tags.CERTIFICATE_ISSUER_TITLE,\n Tags.CERTIFICATE_ISSUER_DC,\n Tags.CERTIFICATE_ISSUER_DN_QUALIFIER,\n Tags.KEY_FORMAT_TYPE,\n Tags.NIST_KEY_TYPE,\n Tags.OPAQUE_DATA_TYPE,\n Tags.PROTECTION_LEVEL,\n Tags.PROTECTION_PERIOD,\n Tags.PROTECTION_STORAGE_MASK,\n Tags.QUANTUM_SAFE,\n Tags.SHORT_UNIQUE_IDENTIFIER,\n Tags.ATTRIBUTE\n ]\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_IDENTIFIER)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_SUBJECT)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_ISSUER)\n kmip_2_0_attribute_tags.remove(Tags.OPERATION_POLICY_NAME)\n kmip_2_0_attribute_tags.remove(Tags.CUSTOM_ATTRIBUTE)\n\n if kmip_version == KMIPVersion.KMIP_1_0:\n return tag in kmip_1_0_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_1:\n return tag in kmip_1_1_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_2:\n return tag in kmip_1_2_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_3:\n return tag in kmip_1_3_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_4:\n return tag in kmip_1_4_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_2_0:\n return tag in kmip_2_0_attribute_tags\n else:\n all_attribute_tags = set(\n kmip_1_0_attribute_tags +\n kmip_1_1_attribute_tags +\n kmip_1_2_attribute_tags +\n kmip_1_3_attribute_tags +\n kmip_1_4_attribute_tags +\n kmip_2_0_attribute_tags\n )\n return tag in all_attribute_tags", "def test_attributes(self):\n ujml_code = '<?xml version=\"1.0\"?><ujml version=\"{}\">'.format(uj_version) + '''\n <a_stoff a_str=\"qwerty\"\n a_int=\"9001\"\n a_bool=\"True\"\n a_float=\"1.2\"\n a_list=\"1,2,3,4\"\n a_eval=\"2+2\"\n a_exec=\"global x; x=3+3*b\">\n\n </a_stoff>\n </ujml>\n '''\n a_stoff = from_string(ujml_code)[0]\n\n self.assertEqual(\"qwerty\", a_stoff.a_str)\n self.assertEqual(9001, a_stoff.a_int)\n self.assertTrue(a_stoff.a_bool)\n self.assertEqual(1.2, a_stoff.a_float)\n self.assertEqual([1, 2, 3, 4], a_stoff.a_list)\n self.assertEqual(4, a_stoff.a_eval)\n a_stoff.a_exec(b=4)\n self.assertEqual(15, a_stoff.root.interpreter['x'])", "def getAttrs(element, exclude=(), required=()):\n conversionTable = {'lowerBound':PQU.PQU, 'upperBound':PQU.PQU, 'value':PQU.PQU, 'energy':PQU.PQU,\n 'neutronWidth':PQU.PQU, 'captureWidth':PQU.PQU, 'fissionWidthA':PQU.PQU, 'fissionWidthB':PQU.PQU, 'competitiveWidth':PQU.PQU,\n 'levelSpacing':PQU.PQU, 'Q':PQU.PQU, 'radius':PQU.PQU, 'effectiveRadius':PQU.PQU,\n 'reconstructCrossSection':getBool, 'multipleRegions': getBool, 'LdependentScatteringRadii': getBool,\n 'calculateChannelRadius':getBool, 'computeAngularDistribution':getBool, 'forSelfShieldingOnly': getBool,\n 'calculateShift':getBool,'calculatePenetrability':getBool,\n 'LvaluesNeededForConvergence':int, 'ENDF_MT':int, 'index':int, 'L':int,\n 'neutronDOF':floatOrint, 'gammaDOF':floatOrint, 'competitiveDOF':floatOrint, 'fissionDOF':floatOrint,\n 'spin':xParticle.spin, 'parity':xParticle.parity,\n 'scatteringRadius':(lambda foo: scatteringRadius(PQU.PQU(foo)) if foo!='energyDependent' else foo),\n }\n attrs = dict( element.items() )\n for key in attrs.keys():\n if key in exclude: attrs.pop(key)\n elif key in conversionTable: attrs[key] = conversionTable[key]( attrs[key] )\n for val in required:\n if val not in attrs: attrs[val] = False\n return attrs", "def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)", "def attributeType(self) -> unicode:\n ...", "def attributes(self):\n raise NotImplementedError", "def check_fcn_attrs(self, node):\n options = node.options\n\n ast = node.ast\n declarator = ast.declarator\n node._has_found_default = False\n\n for attr in declarator.attrs:\n if attr[0] == \"_\": # internal attribute\n continue\n if attr not in [\n \"api\", # arguments to pass to C wrapper.\n \"allocatable\", # return a Fortran ALLOCATABLE\n \"cdesc\",\n \"deref\", # How to dereference pointer\n \"dimension\",\n \"free_pattern\",\n \"len\",\n \"name\",\n \"owner\",\n \"pure\",\n \"rank\",\n ]:\n raise RuntimeError(\n \"Illegal attribute '{}' for function '{}' define at line {}\".format(\n attr, node.ast.name, node.linenumber\n )\n )\n\n meta = declarator.metaattrs\n if ast.typemap is None:\n print(\"XXXXXX typemap is None\")\n if ast.typemap.sgroup == \"shadow\":\n if options.C_shadow_result:\n meta[\"api\"] = \"capptr\"\n else:\n meta[\"api\"] = \"capsule\"\n if declarator.is_ctor():\n meta[\"intent\"] = \"ctor\"\n elif declarator.is_dtor():\n meta[\"intent\"] = \"dtor\"\n else:\n meta[\"intent\"] = declarator.get_subprogram()\n self.check_deref_attr_func(node)\n self.check_common_attrs(node.ast)\n\n for arg in declarator.params:\n if arg.declarator is None:\n raise RuntimeError(\"Argument must have name in {} at line {}\".format(\n node.decl, node.linenumber))\n self.check_arg_attrs(node, arg)\n\n if node.fortran_generic:\n for generic in node.fortran_generic:\n for garg in generic.decls:\n generic._has_found_default = False\n self.check_arg_attrs(generic, garg, node.options)\n check_implied_attrs(node, generic.decls)\n else:\n check_implied_attrs(node, declarator.params)\n\n self.parse_attrs(node, ast)", "def test():\n\n # get the measure trait class\n from p2.traits.Measure import Measure as measure\n\n\n # a client\n class Component:\n \"\"\"\n Simple class with a measure\n \"\"\"\n\n # declare a measure\n attr = measure()\n\n\n # get the attribute; careful not to trigger the descriptor behavior\n attr = Component.__dict__[\"attr\"]\n # verify it's a measure\n assert isinstance(attr, measure)\n # verify it has the right category name\n assert attr.category == \"measure\"\n # and that the trait predicates have the right values\n assert attr.isBehavior == False\n assert attr.isDerivation == False\n assert attr.isMeasure == True\n assert attr.isProperty == False\n assert attr.isFacility == False\n # all done\n return attr", "def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)", "def _get_attribute_functions(self, attributes):\n subqueries = []\n columns = []\n for attr in attributes:\n function = attributes[attr]\n if function == 'sum':\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n cast(self.db_value.value, Float).label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(\n self.db_value,\n self.db_value.id == self.db_tag.fk_value). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n columns.append(func.sum(sq.c.v))\n elif function == 'count' or function == 'count distinct':\n if attr == 'Activity' or attr == 'Stakeholder':\n columns.append(func.count())\n else:\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n self.db_value.value.label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(self.db_value). \\\n join(self.db_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n if (function == 'count distinct'):\n columns.append(func.count(distinct(sq.c.v)))\n else:\n columns.append(func.count(sq.c.v))\n return subqueries, columns", "def attribute(self, attribute):\n value = 3\n if self.age == \"child\":\n value -= 1\n if attribute == \"physique\" or attribute == \"phy\":\n if self.age == \"adult\":\n value += 1\n if self.gender == \"male\":\n value += 1\n elif self.gender == \"female\":\n value -= 1\n\n if attribute == \"sensitivity\" or attribute == \"sns\":\n if self.age == \"child\":\n value += 2\n if self.gender == \"male\":\n value -= 1\n elif self.gender == \"female\":\n value += 1\n\n if attribute == \"agility\" or attribute == \"agi\":\n if self.age == \"child\":\n value += 1 # to be equally as high as adult and young\n elif self.age == \"elder\":\n value -= 1\n\n if attribute == \"mind\" or attribute == \"mnd\":\n if self.age == \"elder\":\n value += 1\n\n for feature in self.features:\n if feature.name == \"blood\":\n for key in feature.modifiers:\n if attribute == key:\n value += feature.modifiers[key]\n\n if value < 1:\n value = 1\n return value", "def _get_active_attributes_and_uniforms(self):\n # This match a name of the form \"name[size]\" (= array)\n regex = re.compile(\"\"\"(?P<name>\\w+)\\s*(\\[(?P<size>\\d+)\\])\\s*\"\"\")\n # Get how many active attributes and uniforms there are\n cu = gl.glGetProgramParameter(self._handle, gl.GL_ACTIVE_UNIFORMS)\n ca = gl.glGetProgramParameter(self.handle, gl.GL_ACTIVE_ATTRIBUTES)\n # Get info on each one\n attributes = []\n uniforms = []\n for container, count, func in [(attributes, ca, gl.glGetActiveAttrib),\n (uniforms, cu, gl.glGetActiveUniform)]:\n for i in range(count):\n name, size, gtype = func(self._handle, i)\n m = regex.match(name) # Check if xxx[0] instead of xx\n if m:\n name = m.group('name')\n for i in range(size):\n container.append(('%s[%d]' % (name, i), gtype))\n else:\n container.append((name, gtype))\n #return attributes, uniforms\n return set([v[0] for v in attributes] + [v[0] for v in uniforms])", "def extensible_attributes():\n return 'extensibleattributedef?'", "def _attribute(self, name: _expression.String) -> _expression.Any:\n for c in self.constants:\n if c.name == name.native_value:\n assert isinstance(c.value, _expression.Any)\n return c.value\n\n return super(CompositeType, self)._attribute(name) # Hand over up the inheritance chain, this is important", "def visit_Attribute(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n args = [ node.value, ast.Str(node.attr) ]\n return to_call(to_name('getattr'), args)\n return node", "def __init__(self, alpha, beta, gamma, discount_factors, y_scale,\n unrestricted_weights=None, discounting=None, warmglow_type=\"constant\"):\n self.attr = dict()\n self.attr['y_scale'] = y_scale # weight on utility from charity euro\n self.attr['alpha'] = alpha # warm glow parameter\n self.attr['gamma'] = gamma # correlation aversion\n self.attr['beta'] = beta # risk aversion for self and charity euro\n self.attr[\"warmglow_type\"] = warmglow_type\n\n np.testing.assert_equal(warmglow_type in [\"constant\", \"linear\"], True)\n\n if discounting is not None:\n # Implement exponential discounting or hyperbolic discounting\n np.testing.assert_equal(discounting in ['exponential', 'hyperbolic'], True)\n\n if discounting in ['hyperbolic']:\n df_beta = discount_factors[0]\n df_delta = discount_factors[1]\n\n new_dfx = {\n t: (df_beta * df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()\n }\n elif discounting in ['exponential']:\n df_delta = discount_factors[0]\n new_dfx = {t: (df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()}\n self.attr['discount_factors'] = new_dfx\n else:\n # Implement nonparametric discounting.\n self.attr['discount_factors'] = discount_factors\n\n # Optional argument: nonparametric weight on y_t in the CES function.\n if unrestricted_weights is None:\n df = self.attr['discount_factors']\n y_weights = {t: y_scale for t, d_t in df.items()}\n self.attr['y_weights'] = y_weights\n else:\n # Nonparametric weight: no g() function applied in this case.\n self.attr['y_weights'] = unrestricted_weights\n\n self._check_attributes_warmglow = partial(check_attributes_warmglow, self)\n self._check_attributes_warmglow()", "def test_define_attributes(self):\n\n class Test(pyperry.Base): pass\n\n self.assertEqual(len(Test.defined_attributes), 0)\n\n Test.define_attributes(['id', 'name', 'name'])\n self.assertEqual(len(Test.defined_attributes), 2)\n\n Test.define_attributes(['foo_id', 'foo_id', 'id'])\n self.assertEqual(len(Test.defined_attributes), 3)", "def check_global_attr_type(ds, attr, attr_type):\n if attr not in ds.ncattrs():\n return 0\n\n global_attr = getattr(ds, attr)\n\n if attr_type == 'int':\n attr_type_class = int\n elif attr_type == 'float':\n attr_type_class = float\n elif attr_type == 'str':\n attr_type_class = str\n else:\n return 1\n\n if len(str(global_attr)) == 0:\n return 2\n\n if np.dtype(type(global_attr)) != np.dtype(attr_type_class):\n return 3\n\n return 4" ]
[ "0.6042245", "0.5977239", "0.5950845", "0.5936985", "0.59264654", "0.5886172", "0.5826526", "0.5790564", "0.5755268", "0.567972", "0.56774443", "0.5620047", "0.553324", "0.55215055", "0.54968786", "0.5450834", "0.5426266", "0.5415868", "0.5412917", "0.5388492", "0.53772295", "0.53504205", "0.5348758", "0.53437006", "0.5335686", "0.53263855", "0.5310507", "0.53039926", "0.5300546", "0.5292896" ]
0.713576
0
Some basic checks are undertaken on calls to ensure that the function being called is either a builtin or defined device function. A special dispatcher is required
def _Call(self, t): # check calls but let attributes check in their own dispatcher funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator if isinstance(t.func, ast.Name): if (t.func.id not in funcs): self.RaiseWarning(t, "Function call is not a defined FLAME GPU device function or a supported python built in.") # dispatch even if warning raised self.dispatch(t.func) elif isinstance(t.func, ast.Lambda): self.dispatch(t.func) # not supported else: # special handler for dispatching member function calls # This would otherwise be an attribute self.dispatchMemberFunction(t.func, t) self.write("(") self._CallArguments(t) self.write(")")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(fun_name):", "def test_dispatchUnknown(self):\n disp = Dispatcher()\n name = \"missing\"\n args = (1, 2)\n res = disp.dispatch(name, *args)\n self.assertEqual(res, (name,) + args)", "def validate_universal_calls(cls):\n assert True == cls.universal_called\n assert True == cls.nested_called\n\n # Reset for next time.\n cls.base_called = None\n cls.nested_called = None", "def check_call_function(self, a, text):\n logging.debug(\"check call function \" + text)\n if self.cur_char == self.func_call_char:\n self.produce(FUNCTION, text)\n self.functions_calls.append(text)\n elif not self.must_func_call_char:\n if text in self.functions:\n self.produce(FUNCTION, text)\n self.functions_calls.append(text)", "def _validate_builtin(_):\n pass", "def __def_function__():\n pass", "def _general_testing(self, context, kind, *args, **kwargs):\r\n if kind == \"fake_next_op\":\r\n self._register_fake_next_op(context.channel, *args, **kwargs)\r\n self._reply(context, proto_success({}, None), None)\r\n return True\r\n self._reply(context, proto_failure({\"Unsupported testing function '{}'\".format(kind)}), None)\r\n return False", "def is_unsupported(func):\n\n for m in BUILTIN_LIKELY_MODULES:\n for v in m.__dict__.values():\n if not callable(v):\n continue\n if func is v:\n translator_logger.log(\n 2,\n \"Whitelist: {} is part of built-in module and does not have to be transformed.\".format(\n func\n ),\n )\n return True\n\n # NOTE: should be placed before `is_paddle_func`\n # The api(s) should be considered as plain function and convert\n # them into static layer code.\n from paddle.nn import Sequential\n\n PADDLE_NEED_CONVERT_APIS = [Sequential]\n if type(func) in PADDLE_NEED_CONVERT_APIS:\n return False\n\n if is_paddle_func(func):\n translator_logger.log(\n 2,\n \"Whitelist: {} is part of Paddle module and does not have to be transformed.\".format(\n func\n ),\n )\n return True", "def dummy_fn(self):\n\t\tpass", "def usefulFunction():\n# I think the uname platform is a func. for findout out the information of the computer\n print(platform.uname())", "def exec_builtin(self, cmd):\r\n func = Builtin.builtins.get(cmd[0])\r\n if func is None:\r\n return False\r\n func(self, cmd)\r\n return True", "def test_no_requirements(self):\n def f():\n pass\n self._run_as_operator(f)", "def test_parameterless_calls(self):\n for attr in dir(api):\n func = getattr(api, attr)\n if callable(func): \n spec = inspect.getargspec(func)\n if not spec.args and not spec.varargs and not spec.keywords and not spec.defaults:\n func()", "def test_dispatchMissingUnknown(self):\n disp = Dispatcher()\n disp.disp_unknown = None\n self.assertRaises(irc.UnhandledCommand, disp.dispatch, \"bar\")", "def _op_easy(self, op, reg_list, param_list=None): # pylint: disable-msg=invalid-name\n\n has_op = hasattr(self.circuit, op)\n\n if has_op:\n if param_list:\n # DEBUG\n # print(\"********** op {} param_list {} reg_list {}\".format(op, param_list, reg_list)) # pylint: disable-msg=line-too-long\n # END-DEBUG\n getattr(self.circuit, op)(*param_list, *reg_list)\n else:\n getattr(self.circuit, op)(*reg_list)\n\n return has_op", "def dummy_fn(self, *args, **kwargs):", "def test_dispatch(self):\n disp = Dispatcher()\n args = (1, 2)\n res = disp.dispatch(\"working\", *args)\n self.assertEqual(res, args)", "def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool", "def can_mi():\n pass", "def check_call(self, cmd, nonzero_e = tc.error_e):\n self.run(cmd, nonzero_e = nonzero_e)", "def _get_target_function():\n return False", "def error_handlings():\n\n global_names = globals()\n for name in global_names:\n if name.startswith(\"provoke_and_handle_\"):\n print(\"\\nAUFRUF von '{}':\".format(name))\n global_names[name]()", "def get_apifunc(arg):\n if isinstance(arg, APIFunc):\n return arg\n\n if callable(arg):\n return APIFunc(arg)\n\n raise ValueError(\"Argument %s is neither apifunc nor callable\" % arg)", "def post_run_func_checked(driver: HammerDriver) -> None:\n if post_run_func is not None:\n post_run_func(driver)", "def dispatch(self) -> None:\n while True:\n body = self.general_queue.pop()\n if \"CMD$\" in body:\n cmd = [part for part in body[body.find(\"$\") + 1:].split(\";\") if part]\n try:\n module, func = cmd[0], cmd[1]\n except IndexError:\n self.send_through_aprs(f\"CMDERR: Unable to parse Commnd {cmd}\")\n continue\n if self.validate_func(module, func):\n try:\n getattr(self.modules[module], func)()\n self.send_through_aprs(f\"CMDSUC: Command {cmd} executed successfully\")\n except Exception as e:\n self.send_through_aprs(f\"CMDERR: Command {cmd} failed with {e}\")", "def is_compatible(self, function, arguments):", "def dummy_func(*args, **kwargs):\r\n pass", "def dispatch_commands(_globals, _name_):\n try:\n argh.dispatch_commands([\n v for k, v in _globals.items()\n if isinstance(v, types.FunctionType)\n and v.__module__ == _name_\n and not k.startswith('_')\n and k != 'main'\n ])\n except KeyboardInterrupt:\n sys.exit(1)", "def isbuiltin(object):\r\n return isinstance(object, types.BuiltinFunctionType)", "def _can_perform_call(self, node, args, keywords):\n return (\n getattr(node, \"starargs\", None) is None\n and getattr(node, \"kwargs\", None) is None\n and all(isinstance(arg, KnownValue) for arg in args)\n and all(isinstance(arg, KnownValue) for _, arg in keywords)\n )" ]
[ "0.5971725", "0.5816666", "0.56597704", "0.565735", "0.56368434", "0.56075937", "0.560516", "0.5587939", "0.55403495", "0.54888415", "0.542656", "0.5398054", "0.53934664", "0.53790116", "0.5372327", "0.53566486", "0.53564227", "0.5351131", "0.53422856", "0.5330329", "0.5329663", "0.5329485", "0.5323656", "0.52949", "0.5272383", "0.52667564", "0.5258041", "0.52533597", "0.5252052", "0.52446914" ]
0.5911068
1
A function to visualize pymatgen Structure objects in jupyter notebook using chemview package.
def quick_view(structure, bonds=True, conventional=False, transform=None, show_box=True, bond_tol=0.2, stick_radius=0.1): s = structure.copy() if conventional: s = SpacegroupAnalyzer(s).get_conventional_standard_structure() if transform: s.make_supercell(transform) atom_types = [i.symbol for i in s.species] if bonds: bonds = [] for i in range(s.num_sites - 1): sym_i = s[i].specie.symbol for j in range(i + 1, s.num_sites): sym_j = s[j].specie.symbol max_d = CovalentRadius.radius[sym_i] + CovalentRadius.radius[sym_j] + bond_tol if s.get_distance(i, j, np.array([0,0,0])) < max_d: bonds.append((i, j)) bonds = bonds if bonds else None mv = MolecularViewer(s.cart_coords, topology={'atom_types': atom_types, 'bonds': bonds}) if bonds: mv.ball_and_sticks(stick_radius=stick_radius) for i in s.sites: el = i.specie.symbol coord = i.coords r = CovalentRadius.radius[el] mv.add_representation('spheres', {'coordinates': coord.astype('float32'), 'colors': [get_atom_color(el)], 'radii': [r * 0.5], 'opacity': 1.0}) if show_box: o = np.array([0, 0, 0]) a, b, c = s.lattice.matrix[0], s.lattice.matrix[1], s.lattice.matrix[2] starts = [o, o, o, a, a, b, b, c, c, a + b, a + c, b + c] ends = [a, b, c, a + b, a + c, b + a, b + c, c + a, c + b, a + b + c, a + b + c, a + b + c] colors = [0xffffff for i in range(12)] mv.add_representation('lines', {'startCoords': np.array(starts), 'endCoords': np.array(ends), 'startColors': colors, 'endColors': colors}) return mv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ipython_repr_no_nglview(self):\n molecule = Molecule().from_smiles(\"CCO\")\n molecule._ipython_display_()", "def jupyter():", "def test_visualize_openeye(self):\n import IPython\n\n mol = Molecule().from_smiles(\"CCO\")\n\n assert isinstance(mol.visualize(backend=\"openeye\"), IPython.core.display.Image)", "def test_visualize_openeye(self):\n import IPython\n\n mol = Molecule().from_smiles(\"CCO\")\n\n assert isinstance(mol.visualize(backend=\"openeye\"), IPython.core.display.Image)", "def test_visualize_rdkit(self):\n import rdkit\n\n mol = Molecule().from_smiles(\"CCO\")\n\n assert isinstance(mol.visualize(backend=\"rdkit\"), rdkit.Chem.rdchem.Mol)", "def show(data, types=(\"inflated\", ), recache=False, cmap='RdBu_r', layout=None,\n autoclose=None, open_browser=None, port=None, pickerfun=None,\n template=\"mixer.html\", overlays_available=None,\n overlays_visible=('rois', 'sulci'), labels_visible=('rois', ),\n overlay_file=None, title='Brain', **kwargs):\n\n # populate default webshow args\n if autoclose is None:\n autoclose = options.config.get('webshow', 'autoclose', fallback='true') == 'true'\n if open_browser is None:\n open_browser = options.config.get('webshow', 'open_browser', fallback='true') == 'true'\n\n data = dataset.normalize(data)\n if not isinstance(data, dataset.Dataset):\n data = dataset.Dataset(data=data)\n\n html = FallbackLoader([os.path.split(os.path.abspath(template))[0], serve.cwd]).load(template)\n db.auxfile = data\n\n #Extract the list of stimuli, for special-casing\n stims = dict()\n for name, view in data:\n if 'stim' in view.attrs and os.path.exists(view.attrs['stim']):\n sname = os.path.split(view.attrs['stim'])[1]\n stims[sname] = view.attrs['stim']\n\n package = Package(data)\n metadata = json.dumps(package.metadata())\n images = package.images\n subjects = list(package.subjects)\n\n ctmargs = dict(method='mg2', level=9, recache=recache,\n external_svg=overlay_file, overlays_available=overlays_available)\n ctms = dict((subj, utils.get_ctmpack(subj, types, **ctmargs))\n for subj in subjects)\n package.reorder(ctms)\n\n subjectjs = json.dumps(dict((subj, \"ctm/%s/\"%subj) for subj in subjects))\n db.auxfile = None\n\n if layout is None:\n layout = [None, (1, 1), (2, 1), (3, 1), (2, 2), (3, 2), (3, 2), (3, 3), (3, 3), (3, 3)][len(subjects)]\n\n linear = lambda x, y, m: (1.-m)*x + m*y\n mixes = dict(\n linear=linear,\n smoothstep=(lambda x, y, m: linear(x, y, 3*m**2 - 2*m**3)),\n smootherstep=(lambda x, y, m: linear(x, y, 6*m**5 - 15*m**4 + 10*m**3))\n )\n\n post_name = Queue()\n\n # Put together all view options\n my_viewopts = dict(options.config.items('webgl_viewopts'))\n my_viewopts['overlays_visible'] = overlays_visible\n my_viewopts['labels_visible'] = labels_visible\n my_viewopts['brightness'] = options.config.get('curvature', 'brightness')\n my_viewopts['smoothness'] = options.config.get('curvature', 'webgl_smooth')\n my_viewopts['contrast'] = options.config.get('curvature', 'contrast')\n\n for sec in options.config.sections():\n if 'paths' in sec or 'labels' in sec:\n my_viewopts[sec] = dict(options.config.items(sec))\n\n if pickerfun is None:\n pickerfun = lambda a, b: None\n\n class CTMHandler(web.RequestHandler):\n def get(self, path):\n subj, path = path.split('/')\n if path == '':\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(open(ctms[subj]).read())\n else:\n fpath = os.path.split(ctms[subj])[0]\n mtype = mimetypes.guess_type(os.path.join(fpath, path))[0]\n if mtype is None:\n mtype = \"application/octet-stream\"\n self.set_header(\"Content-Type\", mtype)\n self.write(open(os.path.join(fpath, path), 'rb').read())\n\n class DataHandler(web.RequestHandler):\n def get(self, path):\n path = path.strip(\"/\")\n try:\n dataname, frame = path.split('/')\n except ValueError:\n dataname = path\n frame = 0\n\n if dataname in images:\n dataimg = images[dataname][int(frame)]\n if dataimg[1:6] == \"NUMPY\":\n self.set_header(\"Content-Type\", \"application/octet-stream\")\n else:\n self.set_header(\"Content-Type\", \"image/png\")\n\n if 'Range' in self.request.headers:\n self.set_status(206)\n rangestr = self.request.headers['Range'].split('=')[1]\n start, end = [ int(i) if len(i) > 0 else None for i in rangestr.split('-') ]\n\n clenheader = 'bytes %s-%s/%s' % (start, end or len(dataimg), len(dataimg) )\n self.set_header('Content-Range', clenheader)\n self.set_header('Content-Length', end-start+1)\n self.write(dataimg[start:end+1])\n else:\n self.write(dataimg)\n else:\n self.set_status(404)\n self.write_error(404)\n\n class StimHandler(web.StaticFileHandler):\n def initialize(self):\n pass\n\n def get(self, path):\n if path not in stims:\n self.set_status(404)\n self.write_error(404)\n else:\n self.root, fname = os.path.split(stims[path])\n super(StimHandler, self).get(fname)\n\n class StaticHandler(web.StaticFileHandler):\n def initialize(self):\n self.root = ''\n\n class MixerHandler(web.RequestHandler):\n def get(self):\n self.set_header(\"Content-Type\", \"text/html\")\n generated = html.generate(data=metadata,\n colormaps=colormaps,\n default_cmap=cmap,\n python_interface=True,\n leapmotion=True,\n layout=layout,\n subjects=subjectjs,\n viewopts=json.dumps(my_viewopts),\n title=title,\n **kwargs)\n #overlays_visible=json.dumps(overlays_visible),\n #labels_visible=json.dumps(labels_visible),\n #**viewopts)\n self.write(generated)\n\n def post(self):\n data = self.get_argument(\"svg\", default=None)\n png = self.get_argument(\"png\", default=None)\n with open(post_name.get(), \"wb\") as svgfile:\n if png is not None:\n data = png[22:].strip()\n try:\n data = binascii.a2b_base64(data)\n except:\n print(\"Error writing image!\")\n data = png\n svgfile.write(data)\n\n class JSMixer(serve.JSProxy):\n @property\n def view_props(self):\n \"\"\"An enumerated list of settable properties for views. \n There may be a way to get this from the javascript object, \n but I (ML) don't know how.\n\n There may be additional properties we want to set in views\n and animations; those must be added here.\n\n Old property list that used to be settable before webgl refactor:\n view_props = ['altitude', 'azimuth', 'target', 'mix', 'radius', 'pivot',\n 'visL', 'visR', 'alpha', 'rotationR', 'rotationL', 'projection',\n 'volume_vis', 'frame', 'slices']\n \"\"\"\n camera = getattr(self.ui, \"camera\")\n _camera_props = ['camera.%s' % k for k in camera._controls.attrs.keys()]\n surface = getattr(self.ui, \"surface\")\n _subject = list(surface._folders.attrs.keys())[0]\n _surface = getattr(surface, _subject)\n _surface_props = ['surface.{subject}.%s'%k for k in _surface._controls.attrs.keys()]\n _curvature_props = ['surface.{subject}.curvature.brightness',\n 'surface.{subject}.curvature.contrast',\n 'surface.{subject}.curvature.smoothness']\n return _camera_props + _surface_props + _curvature_props\n\n def _set_view(self, **kwargs):\n \"\"\"Low-level command: sets view parameters in the current viewer\n\n Sets each the state of each keyword argument provided. View parameters\n that can be set include all parameters in the data.gui in the html view.\n\n \"\"\"\n # Set unfolding level first, as it interacts with other arguments\n surface = getattr(self.ui, \"surface\")\n subject_list = surface._folders.attrs.keys()\n # Better to only self.view_props once; it interacts with javascript, \n # don't want to do that too often, it leads to glitches.\n vw_props = copy.copy(self.view_props)\n for subject in subject_list:\n if 'surface.{subject}.unfold' in kwargs:\n unfold = kwargs.pop('surface.{subject}.unfold')\n self.ui.set('surface.{subject}.unfold'.format(subject=subject), unfold)\n for k, v in kwargs.items():\n if not k in vw_props:\n print('Unknown parameter %s!'%k)\n continue\n else:\n self.ui.set(k.format(subject=subject) if '{subject}' in k else k, v)\n # Wait for webgl. Wait for it. .... WAAAAAIIIT.\n time.sleep(0.03)\n\n def _capture_view(self, frame_time=None):\n \"\"\"Low-level command: returns a dict of current view parameters\n\n Retrieves the following view parameters from current viewer:\n\n altitude, azimuth, target, mix, radius, visL, visR, alpha,\n rotationR, rotationL, projection, pivot\n\n Parameters\n ----------\n frame_time : scalar\n time (in seconds) to specify for this frame.\n \n Notes\n -----\n If multiple subjects are present, only retrieves view for first subject.\n \"\"\"\n view = {}\n subject = list(self.ui.surface._folders.attrs.keys())[0]\n for p in self.view_props:\n try:\n view[p] = self.ui.get(p.format(subject=subject) if '{subject}' in p else p)[0]\n # Wait for webgl.\n time.sleep(0.03)\n except Exception as err:\n # TO DO: Fix this hack with an error class in serve.py & catch it here\n print(err) #msg = \"Cannot read property 'undefined'\"\n #if err.message[:len(msg)] != msg:\n # raise err\n if frame_time is not None:\n view['time'] = frame_time\n return view\n\n def save_view(self, subject, name, is_overwrite=False):\n \"\"\"Saves current view parameters to pycortex database\n\n Parameters\n ----------\n subject : string\n pycortex subject id\n name : string\n name for view to store\n is_overwrite: bool\n whether to overwrite an extant view (default : False)\n\n Notes\n -----\n Equivalent to call to cortex.db.save_view(subject, vw, name)\n For a list of the view parameters saved, see viewer._capture_view\n \"\"\"\n db.save_view(self, subject, name, is_overwrite)\n\n def get_view(self, subject, name):\n \"\"\"Get saved view from pycortex database.\n\n Retrieves named view from pycortex database and sets current\n viewer parameters to retrieved values.\n\n Parameters\n ----------\n subject : string\n pycortex subject ID\n name : string\n name of saved view to re-load\n\n Notes\n -----\n Equivalent to call to cortex.db.get_view(subject, vw, name)\n For a list of the view parameters set, see viewer._capture_view\n \"\"\"\n view = db.get_view(self, subject, name)\n\n def addData(self, **kwargs):\n Proxy = serve.JSProxy(self.send, \"window.viewers.addData\")\n new_meta, new_ims = _convert_dataset(Dataset(**kwargs), path='/data/', fmt='%s_%d.png')\n metadata.update(new_meta)\n images.update(new_ims)\n return Proxy(metadata)\n\n def getImage(self, filename, size=(1920, 1080)):\n \"\"\"Saves currently displayed view to a .png image file\n\n Parameters\n ----------\n filename : string\n duh.\n size : tuple (x, y)\n size (in pixels) of image to save.\n \"\"\"\n post_name.put(filename)\n Proxy = serve.JSProxy(self.send, \"window.viewer.getImage\")\n return Proxy(size[0], size[1], \"mixer.html\")\n\n def makeMovie(self, animation, filename=\"brainmovie%07d.png\", offset=0,\n fps=30, size=(1920, 1080), interpolation=\"linear\"):\n \"\"\"Renders movie frames for animation of mesh movement\n\n Makes an animation (for example, a transition between inflated and\n flattened brain or a rotating brain) of a cortical surface. Takes a\n list of dictionaries (`animation`) as input, and uses the values in\n the dictionaries as keyframes for the animation.\n\n Mesh display parameters that can be animated include 'elevation',\n 'azimuth', 'mix', 'radius', 'target' (more?)\n\n\n Parameters\n ----------\n animation : list of dicts\n Each dict should have keys `idx`, `state`, and `value`.\n `idx` is the time (in seconds) at which you want to set `state` to `value`\n `state` is the parameter to animate (e.g. 'altitude', 'azimuth')\n `value` is the value to set for `state`\n filename : string path name\n Must contain '%d' (or some variant thereof) to account for frame\n number, e.g. '/some/directory/brainmovie%07d.png'\n offset : int\n Frame number for first frame rendered. Useful for concatenating\n animations.\n fps : int\n Frame rate of resultant movie\n size : tuple (x, y)\n Size (in pixels) of resulting movie\n interpolation : {\"linear\", \"smoothstep\", \"smootherstep\"}\n Interpolation method for values between keyframes.\n\n Example\n -------\n # Called after a call of the form: js_handle = cortex.webgl.show(DataViewObject)\n # Start with left hemisphere view\n js_handle._setView(azimuth=[90], altitude=[90.5], mix=[0])\n # Initialize list\n animation = []\n # Append 5 key frames for a simple rotation\n for az, idx in zip([90, 180, 270, 360, 450], [0, .5, 1.0, 1.5, 2.0]):\n animation.append({'state':'azimuth', 'idx':idx, 'value':[az]})\n # Animate! (use default settings)\n js_handle.makeMovie(animation)\n \"\"\"\n # build up two variables: State and Anim.\n # state is a dict of all values being modified at any time\n state = dict()\n # anim is a list of transitions between keyframes\n anim = []\n setfunc = self.ui.set\n for f in sorted(animation, key=lambda x:x['idx']):\n if f['idx'] == 0:\n setfunc(f['state'], f['value'])\n state[f['state']] = dict(idx=f['idx'], val=f['value'])\n else:\n if f['state'] not in state:\n state[f['state']] = dict(idx=0, val=self.getState(f['state'])[0])\n start = dict(idx=state[f['state']]['idx'],\n state=f['state'],\n value=state[f['state']]['val'])\n end = dict(idx=f['idx'], state=f['state'], value=f['value'])\n state[f['state']]['idx'] = f['idx']\n state[f['state']]['val'] = f['value']\n if start['value'] != end['value']:\n anim.append((start, end))\n\n for i, sec in enumerate(np.arange(0, anim[-1][1]['idx']+1./fps, 1./fps)):\n for start, end in anim:\n if start['idx'] < sec <= end['idx']:\n idx = (sec - start['idx']) / float(end['idx'] - start['idx'])\n if start['state'] == 'frame':\n func = mixes['linear']\n else:\n func = mixes[interpolation]\n\n val = func(np.array(start['value']), np.array(end['value']), idx)\n if isinstance(val, np.ndarray):\n setfunc(start['state'], val.ravel().tolist())\n else:\n setfunc(start['state'], val)\n self.getImage(filename%(i+offset), size=size)\n\n def _get_anim_seq(self, keyframes, fps=30, interpolation='linear'):\n \"\"\"Convert a list of keyframes to a list of EVERY frame in an animation.\n\n Utility function called by make_movie; separated out so that individual\n frames of an animation can be re-rendered, or for more control over the\n animation process in general.\n\n \"\"\"\n # Misc. setup\n fr = 0\n a = np.array\n func = mixes[interpolation]\n #skip_props = ['surface.{subject}.right', 'surface.{subject}.left', ] #'projection',\n # Get keyframes\n keyframes = sorted(keyframes, key=lambda x:x['time'])\n # Normalize all time to frame rate\n fs = 1./fps\n for k in range(len(keyframes)):\n t = keyframes[k]['time']\n t = np.round(t/fs)*fs\n keyframes[k]['time'] = t\n allframes = []\n for start, end in zip(keyframes[:-1], keyframes[1:]):\n t0 = start['time']\n t1 = end['time']\n tdif = float(t1-t0)\n # Check whether to continue frame sequence to endpoint\n use_endpoint = keyframes[-1]==end\n nvalues = np.round(tdif/fs).astype(int)\n if use_endpoint:\n nvalues += 1\n fr_time = np.linspace(0, 1, nvalues, endpoint=use_endpoint)\n # Interpolate between values\n for t in fr_time:\n frame = {}\n for prop in start.keys():\n if prop=='time':\n continue\n if (start[prop] is None) or (start[prop] == end[prop]) or isinstance(start[prop], (bool, str)):\n frame[prop] = start[prop]\n continue\n val = func(a(start[prop]), a(end[prop]), t)\n if isinstance(val, np.ndarray):\n frame[prop] = val.tolist()\n else:\n frame[prop] = val\n allframes.append(frame)\n return allframes\n\n def make_movie_views(self, animation, filename=\"brainmovie%07d.png\", \n offset=0, fps=30, size=(1920, 1080), alpha=1, frame_sleep=0.05,\n frame_start=0, interpolation=\"linear\"):\n \"\"\"Renders movie frames for animation of mesh movement\n\n Makes an animation (for example, a transition between inflated and\n flattened brain or a rotating brain) of a cortical surface. Takes a\n list of dictionaries (`animation`) as input, and uses the values in\n the dictionaries as keyframes for the animation.\n\n Mesh display parameters that can be animated include 'elevation',\n 'azimuth', 'mix', 'radius', 'target' (more?)\n\n\n Parameters\n ----------\n animation : list of dicts\n This is a list of keyframes for the animation. Each keyframe should be\n a dict in the form captured by the ._capture_view method. NOTE: every\n view must include all view parameters. Additionally, there should be\n one extra key/value pair for \"time\". The value for time should be\n in seconds. The list of keyframes is sorted by time before applying,\n so they need not be in order in the input.\n filename : string path name\n Must contain '%d' (or some variant thereof) to account for frame\n number, e.g. '/some/directory/brainmovie%07d.png'\n offset : int\n Frame number for first frame rendered. Useful for concatenating\n animations.\n fps : int\n Frame rate of resultant movie\n size : tuple (x, y)\n Size (in pixels) of resulting movie\n interpolation : {\"linear\", \"smoothstep\", \"smootherstep\"}\n Interpolation method for values between keyframes.\n\n Notes\n -----\n Make sure that all values that will be modified over the course\n of the animation are initialized (have some starting value) in the first\n frame.\n\n Example\n -------\n # Called after a call of the form: js_handle = cortex.webgl.show(DataViewObject)\n # Start with left hemisphere view\n js_handle._setView(azimuth=[90], altitude=[90.5], mix=[0])\n # Initialize list\n animation = []\n # Append 5 key frames for a simple rotation\n for az, t in zip([90, 180, 270, 360, 450], [0, .5, 1.0, 1.5, 2.0]):\n animation.append({'time':t, 'azimuth':[az]})\n # Animate! (use default settings)\n js_handle.make_movie(animation)\n \"\"\"\n allframes = self._get_anim_seq(animation, fps, interpolation)\n for fr, frame in enumerate(allframes[frame_start:], frame_start):\n self._set_view(**frame)\n time.sleep(frame_sleep)\n self.getImage(filename%(fr+offset+1), size=size)\n time.sleep(frame_sleep)\n\n class PickerHandler(web.RequestHandler):\n def get(self):\n pickerfun(int(self.get_argument(\"voxel\")), int(self.get_argument(\"vertex\")))\n\n class WebApp(serve.WebApp):\n disconnect_on_close = autoclose\n def get_client(self):\n self.connect.wait()\n self.connect.clear()\n return JSMixer(self.send, \"window.viewer\")\n\n def get_local_client(self):\n return JSMixer(self.srvsend, \"window.viewer\")\n\n if port is None:\n port = random.randint(1024, 65536)\n\n server = WebApp([(r'/ctm/(.*)', CTMHandler),\n (r'/data/(.*)', DataHandler),\n (r'/stim/(.*)', StimHandler),\n (r'/mixer.html', MixerHandler),\n (r'/picker', PickerHandler),\n (r'/', MixerHandler),\n (r'/static/(.*)', StaticHandler)],\n port)\n\n server.start()\n print(\"Started server on port %d\"%server.port)\n url = \"http://%s%s:%d/mixer.html\"%(serve.hostname, domain_name, server.port)\n if open_browser:\n webbrowser.open(url)\n client = server.get_client()\n client.server = server\n return client\n else:\n try:\n from IPython.display import display, HTML\n display(HTML('Open viewer: <a href=\"{0}\" target=\"_blank\">{0}</a>'.format(url)))\n except:\n pass\n return server", "def structure_jsmol(cif_str):\n from jsmol_bokeh_extension import JSMol\n import bokeh.models as bmd\n\n script_source = bmd.ColumnDataSource()\n\n info = dict(\n height=\"100%\",\n width=\"100%\",\n use=\"HTML5\",\n serverURL=\"https://chemapps.stolaf.edu/jmol/jsmol/php/jsmol.php\",\n j2sPath=\"https://chemapps.stolaf.edu/jmol/jsmol/j2s\",\n #serverURL=\"https://www.materialscloud.org/discover/scripts/external/jsmol/php/jsmol.php\",\n #j2sPath=\"https://www.materialscloud.org/discover/scripts/external/jsmol/j2s\",\n #serverURL=\"detail/static/jsmol/php/jsmol.php\",\n #j2sPath=\"detail/static/jsmol/j2s\",\n script=\"\"\"set antialiasDisplay ON;\n load data \"cifstring\"\n {}\n end \"cifstring\"\n \"\"\".format(cif_str)\n ## Note: Need PHP server for approach below to work\n # script=\"\"\"set antialiasDisplay ON;\n #load cif::{};\n #\"\"\".format(get_cif_url(entry.filename))\n )\n\n applet = JSMol(\n width=600,\n height=600,\n script_source=script_source,\n info=info,\n #js_url=\"detail/static/jsmol/JSmol.min.js\",\n )\n\n return applet", "def _ipython_display_(self):\n spec, render_type = self._get_spec_info()\n\n id = uuid.uuid4()\n publish_display_data(\n {'text/html': self._generate_html(id)},\n metadata={'jupyter-vega3': '#{0}'.format(id)}\n )\n publish_display_data(\n {'application/javascript':\n self._generate_js(id, spec, render_type)},\n metadata={'jupyter-vega3': '#{0}'.format(id)}\n )", "def test_visualize_rdkit(self):\n import IPython\n\n mol = Molecule().from_smiles(\"CCO\")\n\n assert isinstance(mol.visualize(backend=\"rdkit\"), IPython.core.display.SVG)", "def test_visualize_nglview(self):\n try:\n import nglview\n except ModuleNotFoundError:\n pass\n\n # Start with a molecule without conformers\n mol = Molecule().from_smiles(\"CCO\")\n\n with pytest.raises(ValueError):\n mol.visualize(backend=\"nglview\")\n\n # Add conformers\n mol.generate_conformers()\n\n # Ensure an NGLView widget is returned\n assert isinstance(mol.visualize(backend=\"nglview\"), nglview.NGLWidget)", "def test_visualize_nglview(self):\n try:\n import nglview\n except ModuleNotFoundError:\n pass\n\n # Start with a molecule without conformers\n mol = Molecule().from_smiles(\"CCO\")\n\n with pytest.raises(ValueError):\n mol.visualize(backend=\"nglview\")\n\n # Add conformers\n mol.generate_conformers()\n\n # Ensure an NGLView widget is returned\n assert isinstance(mol.visualize(backend=\"nglview\"), nglview.NGLWidget)\n\n # Providing other arguments is an error\n with pytest.raises(ValueError):\n mol.visualize(backend=\"nglview\", width=100)\n with pytest.raises(ValueError):\n mol.visualize(backend=\"nglview\", height=100)\n with pytest.raises(ValueError):\n mol.visualize(backend=\"nglview\", show_all_hydrogens=False)", "def overview(data):\n\n printer.table(['Name', 'El', 'Invariom name', 'Model compound'], head=True)\n for atom in data.iter_atoms(True):\n printer.table([atom.name, atom.element, atom.invariom_name, atom.invariom.molecule.name])\n printer.table(done=True)", "def showMolecule(self, colorBy=None, label=False, dcdFN=None):\n # Write PDB file\n # To set Occupancy, change atom.occupancy\n # To set Beta, change atom.temperature_factor\n import os.path\n pdbFN = os.path.join(MMTK.Database.molecule_types.directory,\n 'showMolecule.pdb')\n outF = MMTK.PDB.PDBOutputFile(pdbFN)\n outF.write(self.molecule)\n outF.close()\n # Write VMD script\n script = 'set ligand [mol new ' + pdbFN + ']\\n'\n if colorBy is not None:\n script += 'mol modcolor 0 $ligand ' + colorBy + '\\n'\n script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\\n'\n if label:\n script += \"\"\"\nproc label_atoms { molid seltext } {\n set sel [atomselect $molid $seltext] \n set atomlist [$sel list] \n foreach {atom} $atomlist { \n set atomlabel [format \"%d/%d\" $molid $atom]\n label add Atoms $atomlabel \n } \n $sel delete \n} \nlabel_atoms 0 all\n\"\"\"\n if dcdFN is not None:\n script += 'animate delete all $ligand\\n'\n script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\\n'\n scriptF = open('showMolecule.vmd', 'w')\n scriptF.write(script)\n scriptF.close()\n # Find and run vmd\n import AlGDock\n vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd'])\n import subprocess\n subprocess.call([vmdCommand, '-e', 'showMolecule.vmd'])\n # Remove files\n os.remove(pdbFN)\n os.remove('showMolecule.vmd')", "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "def show(self, notebook=notebook_display):\n print(\"\\nCluster Ensemble:\")\n if notebook is True:\n display(self._df)\n elif notebook is False:\n print(self._df)\n self.massrich_parameters()", "def notebook():\n pass", "def notebook():\n pass", "def visualize(model: Model, structural_part=True, measurement_part=False,\n view=True, filename=None, title=''):\n g = gv.Digraph(format='jpg', graph_attr={'label': title})\n if structural_part:\n g.node_attr.update(color='red', shape='box')\n for i, j in model.parameters['Beta']:\n lval, rval = model.beta_names[0][i], model.beta_names[0][j]\n g.edge(rval, lval)\n if measurement_part:\n g.node_attr.update(color='black', shape='circle')\n for i, j in model.parameters['Lambda']:\n lval, rval = model.lambda_names[0][i], model.lambda_names[0][j]\n g.edge(lval, rval)\n g.render(filename, view=view)", "def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[0].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [1]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('DSSC detector geometry ({})'.format(self.filename))\n return ax", "def __repr__(self):\n\n\t\t# Preparing variables\n\t\tl_s_content = [\t\t# List containing the content to print\n\t\t\t\"> The structure object :\"\n\t\t]\n\n\t\t# PDB fields\n\t\tl_s_content.append(\"s_name : {}\".format(self.s_name))\n\n\t\t# Structural fields\n\t\tl_s_content.append(\"i_atom_count : {}\".format(self.i_atom_count))\n\t\tl_s_content.append(\"a_atoms : {}\".format(len(self.a_atoms)))\n\n\t\t# Grid fields\n\t\tl_s_content.append(\"b_loaded : {}\".format(self.b_loaded))\n\t\tl_s_content.append(\"a_grid : {}\".format(self.a_grid.size))\n\n\t\treturn \"\\n\".join(l_s_content)\t\t# Returns the content to show", "def show(self) -> None:", "def show(self):", "def jntToDisplay():\n DISPLAY=\"DISPLAY\"\n # check obj exist\n if pm.objExists(DISPLAY) != 1:\n pm.error(\"no object call DISPLAY\")\n jnt=pm.ls(\"*_ikJnt*\",\"*_fkJnt*\",\"*_ctrlJnt*\",type ='joint')\n for obj in jnt:\n\n pm.delete(obj + \".overrideDisplayType\",icn=1)\n pm.setAttr(obj + \".overrideEnabled\",1)\n pm.setAttr(obj + \".overrideDisplayType\",0)\n pm.connectAttr(DISPLAY + \".ctrlJntDisplay\",obj + \".overrideDisplayType\",f=1)\n pm.setAttr(DISPLAY + \".ctrlJntDisplay\",0) # set to normal\n\n jnt=pm.ls(\"*_skinJnt*\",\"*:*_skinJnt*\",type ='joint')\n for obj in jnt:\n pm.delete(obj + \".overrideDisplayType\",icn=1)\n pm.setAttr(obj + \".overrideEnabled\",1)\n pm.setAttr(obj + \".overrideDisplayType\",0)\n pm.connectAttr(DISPLAY + \".skeletonDisplay\",obj + \".overrideDisplayType\",f=1)\n pm.setAttr(DISPLAY + \".skeletonDisplay\",0) # set to normal\n\n\n pm.setAttr(DISPLAY + \".geoDisplay\",0) # set to normal\n pm.setAttr((\"GEO.overrideEnabled\"),1)\n pm.setAttr((\"GEO.overrideDisplayType\"),0)\n pm.delete((\"GEO.overrideDisplayType\"),icn=1)\n pm.connectAttr((DISPLAY + \".geoDisplay\"),(\"GEO.overrideDisplayType\"),f=1)", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def displayDesigned(self, selection=\"designed\", mol=None):\n\n\t\tif mol == None:\n\t\t\tmol = self.design\n\n\t\tcmd.remove(\"name AX*\")\n\t\tcmd.remove(\"name CEN*\")\n\t\tcmd.remove(\"name CONT\")\n\n\t\tcmd.hide(\"lines\", selection)\n\t\tcmd.show(\"cartoon\", selection)\n\n\t\tcmd.select(\"ligand\", selection + \" & HETATM & !name V*\")\n\t\tcmd.select(\"virtual\", selection + \" & HETATM & name V*\")\n\t\tcmd.select(\"protein\", selection + \" & !HETATM\")\n\t\tcmd.select(\"shell1\", \"protein & (byres ligand around 5.0)\")\n\t\tcmd.select(\"nearby\", \"protein & (byres ligand around 9.0)\")\n\t\tcmd.disable(\"nearby\")\n\t\t\n\t\tcmd.color(\"gray\", selection + \" & name CA\")\n\t\tcmd.color(\"magenta\", \"virtual\")\n\t\tcmd.color(\"tv_green\", \"element C & protein & !name CA\")\n\t\tdisplayLigand(\"ligand\")\n\n\t\t\n\t\tif mol.numCatalytic() > 0:\n\t\t\tcmd.color(\"brightorange\", \"catalytic & element C\")\n\t\t\tcmd.show(\"stick\", \"catalytic\")\n\t\t\tcmd.set(\"stick_radius\", 0.2, \"catalytic\")\n\t\telse:\n\t\t\tprint \"no catalytic residues\"\n\n\t\tdisplaySticks(\"shell1\")\n\t\tdisplayLines(\"nearby\")\n\t\tcmd.do(\"show_ligand_holes(0.8)\")\n\t\t#cmd.do(\"ligandMesh\")\n\t\tcmd.zoom(\"nearby\")", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def plot(\r\n self,\r\n file_name: str = \"index\",\r\n path: str = \"./\",\r\n template: str = \"default\",\r\n notebook_height: int = 500,\r\n ):\r\n self.notebook_height = notebook_height\r\n\r\n script_path = os.path.dirname(os.path.abspath(__file__))\r\n if template in [\"default\", \"reaction_smiles\", \"smiles\", \"url_image\"]:\r\n template = \"template_\" + template + \".j2\"\r\n else:\r\n script_path = os.path.dirname(template)\r\n\r\n html_path = os.path.join(path, file_name + \".html\")\r\n js_path = os.path.join(path, file_name + \".js\")\r\n jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(script_path))\r\n\r\n has_legend = False\r\n\r\n for _, value in self.scatters.items():\r\n if value[\"has_legend\"]:\r\n has_legend = True\r\n break\r\n\r\n if not self.show_legend:\r\n has_legend = False\r\n\r\n # Drop colormaps before passing them to the document, as they are\r\n # not JSON serializable.\r\n trees_copy = copy.deepcopy(self.trees)\r\n scatters_copy = copy.deepcopy(self.scatters)\r\n\r\n for key, _ in trees_copy.items():\r\n del trees_copy[key][\"colormap\"]\r\n\r\n for key, _ in scatters_copy.items():\r\n del scatters_copy[key][\"colormap\"]\r\n\r\n model = {\r\n \"title\": self.title,\r\n \"file_name\": file_name + \".js\",\r\n \"clear_color\": self.clear_color,\r\n \"view\": self.view,\r\n \"coords\": str(self.coords).lower(),\r\n \"coords_color\": self.coords_color,\r\n \"coords_box\": str(self.coords_box).lower(),\r\n \"coords_ticks\": str(self.coords_ticks).lower(),\r\n \"coords_grid\": str(self.coords_grid).lower(),\r\n \"coords_tick_count\": self.coords_tick_count,\r\n \"coords_tick_length\": self.coords_tick_length,\r\n \"coords_offset\": self.coords_offset,\r\n \"x_title\": self.x_title,\r\n \"y_title\": self.y_title,\r\n \"tree_helpers\": list(trees_copy.values()),\r\n \"point_helpers\": list(scatters_copy.values()),\r\n \"has_legend\": str(has_legend).lower(),\r\n \"legend_title\": self.legend_title,\r\n \"legend_orientation\": self.legend_orientation,\r\n \"alpha_blending\": str(self.alpha_blending).lower(),\r\n \"anti_aliasing\": str(self.anti_aliasing).lower(),\r\n \"style\": self.style,\r\n \"impress\": self.impress,\r\n \"in_notebook\": Faerun.in_notebook(),\r\n \"thumbnail_width\": self.thumbnail_width,\r\n \"thumbnail_fixed\": str(self.thumbnail_fixed).lower(),\r\n }\r\n\r\n if Faerun.in_notebook():\r\n model[\"data\"] = self.create_data()\r\n else:\r\n with open(js_path, \"w\") as f:\r\n f.write(self.create_data())\r\n\r\n output_text = jenv.get_template(template).render(model)\r\n\r\n with open(html_path, \"w\") as result_file:\r\n result_file.write(output_text)\r\n\r\n if Faerun.in_notebook():\r\n display(IFrame(html_path, width=\"100%\", height=self.notebook_height))\r\n display(FileLink(html_path))", "def show_map(pdb,show_sticks_all=False, show_sticks_metalbinding=True, show_probes=True, show_pdb_metals=True):\n view=py3Dmol.view(width=1000, height=800)\n\n view.addModel(open(pdb+'.pdb', 'r').read(),'pdb')\n if show_probes:\n view.addModel(open(pdb+'_PredictedSites.xyz', 'r').read(),'xyz')\n probes = open(pdb+'_PredictedSites.xyz', 'r').readlines()\n if(int(probes[0])!=0):\n probabilities = [p.replace('#','').split()[-1] for p in probes[2:]] # read p from comment in xyz file\n colors = {}\n # use different colors for the probabilities\n for i,x in enumerate(probabilities):\n colors[i] = '#%02x%02x%02x' % (0, 128, int(float(x)/float(probabilities[0])*255))\n else: #no predicted site\n colors = [] \n view.addLabel(\"No probe predicted\", {'position': {'x':0, 'y':0, 'z':0}, 'backgroundColor': '#0080FF', 'fontColor': 'white'});\n \n view.zoomTo()\n view.setBackgroundColor('white')\n view.setStyle({},{'cartoon': {'color':'gray'}})\n if show_sticks_all:\n view.setStyle({}, {'stick':{},'cartoon': {'color':'gray'}})\n if show_pdb_metals:\n view.getModel(0).setStyle({'resn':\"ZN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CA\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CU\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"HG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"FE\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"NI\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MB\"},{'sphere': {'opacity':.75}})\n \n if show_probes:\n view.getModel(1).setStyle({},{'sphere': {'colorscheme':{'prop':'index', 'map':colors}}})\n \n # add hoverable labels for the residues and the predicted metals\n # two callbacks are needed, one for the residues and one for the metals\n # the metal one displays the probability\n view.getModel(0).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.resn+atom.resi+\":\"+atom.atom,{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n view.getModel(1).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.atom+\" [\"+atom.serial+\"]\",{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n if show_sticks_metalbinding:\n view.setStyle({'resn':\"HIS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"ASP\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"GLU\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"CYS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n\n return view.show()", "def display(self, style):\n self.stl = Style(style, mc_version='1.15.2')\n vae = self.stl.models.vae\n\n vae_data = Tile.vectorize_all(self.stl.info['mc_version'])\n encodings = vae.encoder.predict(vae_data)[0]\n\n tiles = [\n Tile('minecraft:quartz_stairs[half=bottom]', version='1.15.2'),\n Tile('minecraft:birch_stairs[half=bottom]', version='1.15.2'),\n Tile('minecraft:brick_stairs[half=bottom]', version='1.15.2'),\n Tile('minecraft:bricks', version='1.15.2'),\n Tile('minecraft:nether_bricks', version='1.15.2'),\n Tile('minecraft:white_carpet', version='1.15.2'),\n Tile('minecraft:snow[layers=1]', version='1.15.2')\n ]\n\n vae_data = vectorize(tiles, pad_to=vae.input_dim)\n encodings_subset = vae.encoder.predict(vae_data)[0]\n\n import matplotlib.pyplot as plt\n\n fig=plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(encodings[:,0], encodings[:,1], c=[[.9,.9,.9]], marker='x')\n ax.scatter(encodings_subset[:,0], encodings_subset[:,1], color='r', marker='x')\n for idx, t in enumerate(tiles):\n ax.annotate(t.name, (encodings_subset[idx,0], encodings_subset[idx,1]))\n ax.set_title('Minecraft tile-ok 2D látenstere')\n plt.show()", "def widgets(overwrite=True):\n install_nbextension(os.path.join(PKGPATH, 'static'),\n destination='molviz',\n overwrite=overwrite)" ]
[ "0.64746124", "0.6093616", "0.6001484", "0.6001484", "0.5946787", "0.58996487", "0.58758247", "0.5842013", "0.5826114", "0.5824589", "0.5681209", "0.56638604", "0.56560427", "0.5653275", "0.5592476", "0.5589071", "0.5589071", "0.557641", "0.5573647", "0.5561114", "0.55604887", "0.5557063", "0.55471545", "0.55434483", "0.5535069", "0.552192", "0.55034006", "0.54748124", "0.54724044", "0.5463462" ]
0.6274498
1
Remove the sprite from all lists and cancel the update event.
def remove_from_sprite_lists(self): super().remove_from_sprite_lists() # It is very important to call this to prevent potential # issues such as crashes or excess memory use from failed # garbage collection. pyglet.clock.unschedule(self.update)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n # delete sprite if fired\n if not self.player.state == 'USE_A':\n self.game.all_sprites.remove(self)", "def remove_sprites(self, *sprites):\r\n with self.lock:\r\n self.sprites_to_unload.update(sprites)", "def _remove_texture(self):\n # Retrieve the item that was selected\n key = self._listbox.get(ACTIVE)\n # Post a delete notice to the manager\n self._remove(key)", "def __del__(self):\n\n # Delete sprite (if it has been defined)\n try:\n self.canvas.delete(self.sprite)\n except AttributeError:\n pass\n except tk.TclError:\n pass", "def remove_songs(self):\n self.stop()\n self.listbox.delete(0, \"end\")\n pygame.mixer.music.stop()", "def removeTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfor sha in shas:\n\t\t\tif sha.a.texture_Occ.exists:\n\t\t\t\tsha.a.texture_Occ.delete()", "def kill(self):\n # stuff\n pygame.sprite.Sprite.kill(self)", "def kill(self):\n # stuff\n pygame.sprite.Sprite.kill(self)", "def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()", "def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)", "def remove(self):\n\n self.last_move = \"\"\n self.collision_boxes = []\n self.removed=True", "def _remove_wall_pic(self):\n # Retrieve the item that was selected\n key = self._listbox.get(ACTIVE)\n # Post a delete notice to the manager\n self._remove(key)", "def handle_collisions():\n for sprite in sprite_group:\n for other in pygame.sprite.spritecollide(sprite, sprite_group, False):\n if sprite is not other and DO_KILL:\n sprite.kill()\n other.kill()", "def update(self):\n if pygame.time.get_ticks() - self.start_time > const.LEVEL_WAITING:\n self.player.update()\n self.platform_list.update()\n self.platform_grass_list.update()\n self.platform_stone_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite.update()\n self.enemy_bubble_list.update()\n self.fruit_list.update()\n for bullet in self.bullet_list:\n if bullet.rect.x > const.SCREEN_WIDTH + 10 or bullet.rect.x < -10:\n self.bullet_list.remove(bullet)\n self.active_sprite.remove(bullet)\n\n\n for guy in self.enemy_list:\n enemy_hit_list = pygame.sprite.spritecollide(guy, self.bullet_list, False, pygame.sprite.collide_circle)\n for hit in enemy_hit_list:\n bub_enemy= enemy.Enemy_bubble(guy)\n self.enemy_list.remove(guy)\n\n self.bullet_list.remove(hit)\n self.active_sprite.add(bub_enemy)\n self.active_sprite.remove(hit)\n self.active_sprite.remove(guy)\n\n self.enemy_bubble_list.add(bub_enemy)\n\n\n if len(self.enemy_list) == 0 and len(self.enemy_bubble_list) == 0 and self.close_time == 0:\n self.close_time=pygame.time.get_ticks()\n\n if self.close_time > 0 and pygame.time.get_ticks()-self.close_time > 2000:\n self.close = True", "def empty(self):\n if self.sprite: self.sprite._focus_exit()\n pygame.sprite.GroupSingle.empty(self)", "def remove(self):\r\n game_ref.remove(self)", "def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)", "def clearTargetShips(self):\n self.targets = []\n self.currentTarget = None", "def remove_objects(self, objects):\n for sprite_group in self.sprite_level_blocks:\n sprite_group.remove(objects)", "def cancel(self):\n self.blackened = self.blackened_history[-1]\n self.blackened_history.pop()\n if self.victory:\n self.victory = False\n self.blackened_history_size -= 1", "def remove_ball(ball_list, canvas):\r\n if len(ball_list) > 1:\r\n ball_list[len(ball_list) - 1].delete_ball()\r\n ball_list.pop()", "def remove(name, send_events=True, moving=False):", "def remove(self, game_obj):\r\n self.game_objects_for_removal.append(game_obj)", "def clearList(self):\r\n self.players.clear()", "def remove_from_hand(self):\n pass", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def run_logic(self):\n if not self.game_over:\n # Move all the sprites\n self.all_sprites_list.update()", "def remove():", "def Remove(self, event):\n pass", "def discard(self, obj):\n self._drawables.discard(obj)\n self._updateables.discard(obj)\n self._collidables.discard(obj)\n self._projectiles.discard(obj)\n self._textboxes.discard(obj)\n self.__len__.cache_clear()" ]
[ "0.766939", "0.72574145", "0.6607035", "0.65096223", "0.64890796", "0.64082247", "0.63706535", "0.63706535", "0.6319077", "0.6264712", "0.62010026", "0.61186045", "0.60208344", "0.60023475", "0.59555334", "0.59235364", "0.5921738", "0.5916878", "0.5907688", "0.5894691", "0.5890487", "0.58901125", "0.5863885", "0.5848887", "0.57934004", "0.5777917", "0.5763857", "0.57308936", "0.5725365", "0.5717873" ]
0.82141757
0
Creates input data for tests using preprocessed standard star and its calibration files. The raw files will be downloaded and saved inside the path stored in the `$DRAGONS_TEST/raw_inputs` directory. Processed files will be stored inside a new folder called "dragons_test_inputs". The subdirectory structure should reflect the one returned by the `path_to_inputs` fixture.
def create_inputs_recipe(): module_name, _ = os.path.splitext(os.path.basename(__file__)) path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name) os.makedirs(path, exist_ok=True) os.chdir(path) os.makedirs("inputs/", exist_ok=True) print('Current working directory:\n {:s}'.format(os.getcwd())) for filename, _ in input_pars: print('Downloading files...') basename = filename.split("_")[0] + ".fits" sci_path = download_from_archive(basename) sci_ad = astrodata.open(sci_path) data_label = sci_ad.data_label() print('Reducing pre-processed data:') logutils.config(file_name='log_{}.txt'.format(data_label)) p = GNIRSLongslit([sci_ad]) p.prepare(bad_wcs="fix") p.addDQ() p.addVAR(read_noise=True) p.ADUToElectrons() p.addVAR(poisson_noise=True) # p.flatCorrect() p.makeIRAFCompatible() os.chdir("inputs/") processed_ad = p.writeOutputs().pop() os.chdir("../") print('Wrote pre-processed file to:\n' ' {:s}'.format(processed_ad.filename))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_test_inputs(input_dir):\n # Prepare input parameters\n parameters = Dict(dict={})\n # example structure: bcc Fe\n structure = StructureData(cell=[[1.42002584, 1.42002584, 1.42002584],\n [1.42002584, -1.42002584, -1.42002584],\n [-1.42002584, 1.42002584, -1.42002584]])\n structure.append_atom(position=[0, 0, 0], symbols='Fe')\n # create jij couplings input from csv export\n jijs_expanded = np.load(os.path.join(input_dir, 'Jij_expanded.npy'))\n jij_data = ArrayData()\n jij_data.set_array('Jij_expanded', jijs_expanded)\n\n # set up calculation\n inputs = {\n 'parameters': parameters,\n 'jij_data': jij_data,\n 'structure': structure,\n 'metadata': {\n 'description': 'Test job submission with the aiida_spirit plugin',\n },\n }\n\n return inputs", "def install_inputs():\n dest = os.path.join(safe_dir, \"input\")\n sys.stdout.write(\"Moving directory %r to %r...\\n\" % (\"input\", dest))\n try:\n shutil.move(\"input\", dest)\n except (OSError, shutil.Error), exc:\n sys.sdterr.write(\"Failed to move %r to %r\\n\" % (\"input\", dest))\n sys.sdterr.write(\" %s\\n\" % exc)\n return 1\n undo_actions.append(restore_inputs)\n\n source = os.path.join(ref_test_data.test_data_dir, \"input\")\n sys.stdout.write(\"Copying directory %r to %r...\\n\" % (source, \"input\"))\n try:\n shutil.copytree(source, \"input\")\n except (OSError, shutil.Error), exc:\n sys.sdterr.write(\"Failed to move %r to %r\\n\" % (source, \"input\"))\n sys.sdterr.write(\" %s\\n\" % exc)\n return 1\n undo_actions.append(remove_test_input)\n\n return 0", "def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)", "def main():\n\n args = _parse_arguments()\n path = _get_dragons_input_test_path()\n create_test_folder_if_does_not_exist(path)\n download_non_existing_test_files(path, args.list_of_files)", "def test_input_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")\n\n # All files are invalid\n files = [f+\".xxx\" for f in files]\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files_invalid\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def main(input_filepath: str = \"./data\",\n output_filepath: str = \"./data\") -> None:\n logger = logging.getLogger(__name__)\n logger.info(\"making final data set from raw data\")\n\n raw_data_dir = path.abspath(input_filepath)\n if path.isdir(raw_data_dir):\n\n processed_data_dir = path.abspath(output_filepath)\n\n logger.info(\"start\")\n filenames = [\"train.txt\", \"valid.txt\", \"test.txt\"]\n create_index(filenames, raw_data_dir, processed_data_dir)\n prepare_datasets(filenames, raw_data_dir, processed_data_dir)\n\n else:\n logger.info(\"File or directory does not exist\")\n\n logger.info(\"finished\")", "def _synth_input(self, path, files):\n features = np.empty((0, 15))\n for i in range(len(files)):\n train_set = np.load(f'{path}coords/{files[i]}.npy')\n train_set = train_set.reshape((train_set.shape[0], -1))\n features = np.concatenate((features, train_set), axis=0)\n self.input_ = F.normalize(torch.tensor(np.array(features), dtype=torch.float32))", "def preprocess(input_dir, output_dir, crs, resolution, country, overwrite):\n # Set data directories if not provided and create them if necessary\n if not input_dir:\n input_dir = os.path.join(os.curdir, \"Data\", \"Input\")\n if not output_dir:\n output_dir = os.path.join(os.curdir, \"Data\", \"Intermediary\")\n input_dir, output_dir = Path(input_dir), Path(output_dir)\n for p in (input_dir, output_dir):\n p.mkdir(parents=True, exist_ok=True)\n\n # Create raster grid from CLI options\n geom = country_geometry(country)\n dst_crs = CRS.from_string(crs)\n transform, shape, bounds = create_grid(geom, dst_crs, resolution)\n args = {\n \"dst_crs\": dst_crs,\n \"dst_bounds\": bounds,\n \"dst_res\": resolution,\n \"overwrite\": overwrite,\n \"geom\": geom,\n }\n\n raw = Raw(input_dir)\n preprocess_land_cover(\n src_files=raw.land_cover,\n dst_raster=output_dir.joinpath(\"land_cover.tif\").as_posix(),\n **args,\n )\n preprocess_elevation(src_files=raw.elevation, dst_dir=output_dir, **args)\n preprocess_osm(\n src_file=raw.openstreetmap[0],\n dst_dir=output_dir,\n dst_crs=dst_crs,\n dst_shape=shape,\n dst_transform=transform,\n geom=geom,\n overwrite=overwrite,\n )\n preprocess_surface_water(\n src_files=raw.surface_water,\n dst_raster=output_dir.joinpath(\"surface_water.tif\").as_posix(),\n **args,\n )\n\n log.info(\"Writing area of interest to disk.\")\n with open(output_dir.joinpath(\"area_of_interest.geojson\"), \"w\") as f:\n json.dump(geom.__geo_interface__, f)", "def construct_data(paths=DEFAULT_PATHS, use_saved=True):\n if not verify_paths(paths):\n raise FileNotFoundError('Some of the required data files could not be '\n 'found. Before running the project, run '\n '`setup.sh` to create/download them.')\n\n # Paths to save or load the constructed datasets from\n saved_train = os.path.join(paths['dir_output'], 'train.pk')\n saved_test = os.path.join(paths['dir_output'], 'test.pk')\n\n # Load the data if possible\n if (os.path.exists(saved_train) and os.path.exists(saved_test)\n and use_saved):\n print('Found existing saved dataset; loading it...')\n with open(saved_train, mode='rb') as train_file:\n train = pickle.load(train_file)\n with open(saved_test, mode='rb') as test_file:\n test = pickle.load(test_file)\n return train, test\n\n print('Constructing dataset...')\n\n # Read in the .csv files and create DataFrames for train, test observations\n depths = pd.read_csv(paths['df_depths'], index_col='id')\n train = pd.read_csv(paths['df_train'], index_col='id', usecols=[0])\n train = train.join(depths)\n test = depths[~depths.index.isin(train.index)].copy()\n\n # (Training images)\n print('Reading training images...')\n path = paths['dir_train_images'] + '{}.png'\n train['image'] = [read_image(path.format(img))\n for img in tqdm(train.index)]\n\n # (Training masks)\n print('Reading training masks...')\n path = paths['dir_train_masks'] + '{}.png'\n train['mask'] = [read_image(path.format(img)) for img in tqdm(train.index)]\n\n # (Testing images)\n print('Reading test images...')\n path = paths['dir_test_images'] + '{}.png'\n test['image'] = [read_image(path.format(img)) for img in tqdm(test.index)]\n\n # Calculate the coverage for the training images\n # Then, bin the images into discrete classes corresponding to coverage\n train['coverage'] = train['mask'].map(np.sum) / pow(101, 2)\n train['cov_class'] = train['coverage'].map(\n lambda cov: np.int(np.ceil(cov * 10)))\n\n # Write to file\n print('Saving the constructed dataset...')\n try:\n with open(saved_train, mode='wb') as train_file:\n pickle.dump(train, train_file)\n with open(saved_test, mode='wb') as test_file:\n pickle.dump(test, test_file)\n except OSError:\n print('Could not save the data due to an occasional Python bug on '\n 'some systems. :( If this is happening on macOS, try running on '\n 'Linux instead.')\n\n return train, test", "def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)", "def prepare_training_data(\n self, dir_snippy: Path, dir_ont: Path, caller: str = 'clair',\n break_complex: bool = True, snippy_ext: str = \".ref.vcf\"\n ):\n\n self.training_dir.mkdir(parents=True, exist_ok=True)\n\n comparisons = self.get_coverage_comparisons(dir_snippy=dir_snippy, dir_ont=dir_ont, snippy_ext=snippy_ext)\n\n ont_with_truth, snippies, _ = self.get_data_from_comparisons(\n comparisons=comparisons, caller=caller, break_complex=break_complex, outdir=self.training_dir\n )\n\n features, _ = self.parse_features(ont_calls=ont_with_truth)\n\n # Combined features for training\n self.features_combined = pd.concat(features) # combined feature frames\n self.features_combined = self.features_combined.reset_index(drop=True)\n self.features_combined.to_csv(self.training_dir / 'training_features.tsv', sep='\\t', index=False)", "def write_inputs(self, extraFstDict={}):\n\n if (self.run_dir == self.fst_dir):\n raise ValueError, \"run_dir == fst_dir, you cannot run directly in the template directory\"\n\n self.run_name, ext = os.path.splitext(self.fst_file)\n\n if (not os.path.isdir(self.run_dir)):\n os.mkdir(self.run_dir)\n\n self.fst_dir = os.path.abspath(self.fst_dir)\n\n if (self.exec_count <= 1): # Is 0 when invoked by main()\n # Is 1 when invoked by Assembly ???\n self.read_inputs()\n\n for key in extraFstDict:\n self.fstDict[key] = extraFstDict[key]\n\n curdir = os.getcwd()\n os.chdir (self.run_dir) ###note, change to run_dir\n\n self.writeFST(self.fst_file,self.fstDict) \n self.writeAD()\n self.writeBlade()\n self.writeWnd()\n self.writeNoise()\n self.writePtfm(self.fstDict)\n self.copyTwr()\n self.copyAdams()\n\n os.chdir(curdir) ## restore dir", "def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles", "def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def setUp(self):\n self.test_root = tempfile.mkdtemp(dir=tmpdir)\n self.test_input = os.path.join(self.test_root, 'input')\n self.test_output = os.path.join(self.test_root, 'output')\n self.test_output_tree = os.path.join(self.test_output, 'tree')\n self.test_output_meta = os.path.join(self.test_output_tree, 'meta.js')\n self.test_output_toc = os.path.join(self.test_output_tree, 'toc.js')\n\n os.makedirs(self.test_input, exist_ok=True)\n os.makedirs(self.test_output, exist_ok=True)", "def createInput(dirPath,gSettings):\n \n with open(os.path.join('../in','input.txt')) as f:\n inpFile = f.readlines()\n \n\n # Model settings\n model = gSettings[\"Model\"]\n inpFile[13] = \"insgrav: {:1d}\\n\".format(int(model[\"NS gravity\"][\"Flag\"]))\n inpFile[14] = \"isun: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Sun\"]))\n inpFile[15] = \"imoon: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Moon\"]))\n\n if model[\"Drag\"][\"Flag\"] == False:\n inpFile[16] = \"idrag: 0\\n\"\n else:\n dm = model[\"Drag\"][\"Model\"].lower()\n if dm == \"wertz\":\n idrag = 1\n elif dm == \"us76\":\n idrag = 2\n elif dm == \"j77\":\n idrag = 3\n elif dm == \"msis00\":\n idrag = 4\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Model\"] + '\" invalid.')\n inpFile[16] = \"idrag: {:1d}\\n\".format(idrag)\n if model[\"Drag\"][\"Solar flux\"].lower() == \"constant\":\n inpFile[17] = \"iF107: 0\\n\"\n elif model[\"Drag\"][\"Solar flux\"].lower() == \"variable\":\n inpFile[17] = \"iF107: 1\\n\"\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Solar flux\"] + '\" invalid.')\n\n if model[\"SRP\"][\"Flag\"] == False:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n else:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n if model[\"SRP\"][\"Eclipses\"]:\n inpFile[18] = \"iSRP: 2\\n\"\n \n if model[\"Lunisolar\"][\"Ephemerides\"] == \"DE431\":\n inpFile[19] = \"iephem: 1\\n\"\n elif model[\"Lunisolar\"][\"Ephemerides\"] == \"Meeus\":\n inpFile[19] = \"iephem: 2\\n\"\n else:\n raise ValueError('Value \"' + model[\"Lunisolar\"][\"Ephemerides\"] + '\" invalid.')\n \n inpFile[20] = \"gdeg: {:3d}\\n\".format(model[\"NS gravity\"][\"Degree\"])\n if model[\"NS gravity\"][\"Order\"] <= model[\"NS gravity\"][\"Degree\"]:\n inpFile[21] = \"gord: {:3d}\\n\".format(model[\"NS gravity\"][\"Order\"])\n else:\n raise ValueError(\"Order {0:d} of the gravity field is greater than degree {1:d}\".format(model[\"NS gravity\"][\"Order\"],model[\"NS gravity\"][\"Degree\"]))\n \n\n\n # Integration settings\n integ = gSettings[\"Integration\"]\n inpFile[29] = \"tol: {:22.15E}\\n\".format(integ[\"Tolerance\"])\n inpFile[30] = \"tspan: {:22.15E}\\n\".format(integ[\"Duration\"] * 365.25)\n inpFile[31] = \"tstep: {:22.15E}\\n\".format(integ[\"Step\"])\n inpFile[39] = \"eqs: {:2d}\\n\".format(integ[\"Equations\"])\n\n\n\n # Output settings\n inpFile[44] = \"verb: 0\\n\"\n inpFile[45] = \"out: \" + os.path.abspath(os.path.join(dirPath, ' '))\n\n\n with open(os.path.join(dirPath,'input.txt'),'w') as f:\n f.writelines(inpFile)", "def test_input_target_file(self):\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_target_file\"\n params[\"input_target\"] = \"file\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files = list_files_folder(params[\"input\"], ext=\"fna.gz\")\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def test_main(data, tmp_path):\n\n main(data, tmp_path)\n\n FILES = (\n \"gd32f888x(0-1)xx-pinctrl.h\",\n \"gd32f888x(2-3)xx-pinctrl.h\",\n \"gd32f888y(0-1)xx-pinctrl.h\",\n \"gd32f999x(0-1)xx-pinctrl.h\",\n \"gd32f999x(2-3)xx-pinctrl.h\",\n \"gd32f999y(0-1)xx-pinctrl.h\",\n )\n\n for file in FILES:\n ref_file = data / file\n gen_file = tmp_path / file\n\n assert gen_file.exists()\n\n with open(ref_file) as ref, open(gen_file) as gen:\n assert ref.read() == gen.read()", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def test_DerivativesDataSink_build_path(\n tmp_path,\n out_path_base,\n source,\n input_files,\n entities,\n expectation,\n dismiss_entities,\n):\n ds_inputs = []\n for input_file in input_files:\n fname = tmp_path / input_file\n if fname.name.rstrip(\".gz\").endswith(\".nii\"):\n hdr = nb.Nifti1Header()\n hdr.set_qform(np.eye(4), code=2)\n hdr.set_sform(np.eye(4), code=2)\n units = (\"mm\", \"sec\") if \"bold\" in input_file else (\"mm\",)\n size = (10, 10, 10, 10) if \"bold\" in input_file else (10, 10, 10)\n hdr.set_xyzt_units(*units)\n nb.Nifti1Image(np.zeros(size), np.eye(4), hdr).to_filename(fname)\n else:\n (tmp_path / input_file).write_text(\"\")\n\n ds_inputs.append(str(fname))\n\n dds = bintfs.DerivativesDataSink(\n in_file=ds_inputs,\n base_directory=str(tmp_path),\n source_file=source,\n out_path_base=out_path_base,\n dismiss_entities=dismiss_entities,\n **entities,\n )\n\n if type(expectation) == type(Exception):\n with pytest.raises(expectation):\n dds.run()\n return\n\n output = dds.run().outputs.out_file\n if isinstance(expectation, str):\n expectation = [expectation]\n output = [output]\n\n if dismiss_entities:\n if \"run\" in dismiss_entities:\n expectation = [e.replace(\"_run-1\", \"\") for e in expectation]\n\n if \"session\" in dismiss_entities:\n expectation = [\n e.replace(\"_ses-preop\", \"\").replace(\"ses-preop/\", \"\")\n for e in expectation\n ]\n\n base = out_path_base or \"niworkflows\"\n for out, exp in zip(output, expectation):\n assert Path(out).relative_to(tmp_path) == Path(base) / exp\n\n os.chdir(str(tmp_path)) # Exercise without setting base_directory\n dds = bintfs.DerivativesDataSink(\n in_file=ds_inputs,\n dismiss_entities=dismiss_entities,\n source_file=source,\n out_path_base=out_path_base,\n **entities,\n )\n\n output = dds.run().outputs.out_file\n if isinstance(output, str):\n output = [output]\n\n for out, exp in zip(output, expectation):\n assert Path(out).relative_to(tmp_path) == Path(base) / exp", "def load_fullres_inputs(task, subdir='training'):\n tagged_paths = {\n 'gt': glob.glob(join(task.root, subdir, '*_GTL.tif')),\n 'im': glob.glob(join(task.root, subdir, '*_RGB.tif')),\n\n 'gti': glob.glob(join(task.root, subdir, '*_GTI.tif')),\n\n # digital terrain model\n 'dtm': glob.glob(join(task.root, subdir, '*_DTM.tif')),\n # digital surface model\n 'dsm': glob.glob(join(task.root, subdir, '*_DSM.tif')),\n }\n\n def extract_primary_key_info(paths, tag):\n if not paths:\n return pd.DataFrame()\n infos = [parse.parse('{site_id}_Tile_{N}_{type}.tif', p).named\n for p in map(basename, paths)]\n df = pd.DataFrame(infos)\n df = df.rename(columns={'type': tag + 'type'})\n df[tag] = paths\n df = df.set_index(['site_id', 'N'], drop=False).sort_index()\n return df\n\n train = pd.DataFrame()\n for tag, paths in tagged_paths.items():\n _df = extract_primary_key_info(paths, tag)\n if len(_df):\n for pk in ['N', 'site_id']:\n if pk not in train.columns:\n train[pk] = _df[pk]\n train[tag] = _df[tag]\n\n null_idxs = list(set(np.where(pd.isnull(train))[0]))\n if null_idxs:\n raise ValueError(('MISSING DATA FOR {}'.format(\n [train.index[i] for i in null_idxs])))\n\n for tag, paths in tagged_paths.items():\n pass\n\n metadata = train[['site_id', 'N']].reset_index(drop=True)\n dump_im_names = ['{site_id}_Tile_{N}.tif'.format(**d)\n for d in metadata.to_dict(orient='records')]\n\n # train_gts = list(train['gt'].values)\n # train_rgb = list(train['im'].values)\n # train_dtm = list(train['dtm'].values)\n # train_dsm = list(train['dsm'].values)\n\n # train_gts = sorted(train_gts)\n # train_rgb = fnameutil.align_paths(train_gts, train_rgb)\n # train_dtm = fnameutil.align_paths(train_gts, train_dtm)\n # train_dsm = fnameutil.align_paths(train_gts, train_dsm)\n # dump_im_names = ['{site_id}_Tile_{N}.tif'.format(**d) for d in infos]\n\n kw = train.drop(['N', 'site_id'], axis=1).to_dict(orient='list')\n fullres = inputs.Inputs.from_paths(**kw)\n\n # aux_paths = {'dtm': train_dtm, 'dsm': train_dsm}\n # fullres = {'im': train_rgb, 'gt': train_gts, 'aux': aux}\n\n fullres.dump_im_names = dump_im_names\n fullres.metadata = metadata\n\n # fullres.aux_paths = {}\n fullres.tag = 'fullres'\n return fullres", "def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def source_data_files(self, data_dir, tmp_dir, dataset_split):\n raise NotImplementedError()", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(\n input_dir: Path = typer.Argument(..., exists=True),\n output_dir: Path = typer.Argument(...),\n beth_train_tar_name: str = \"i2b2_Beth_Train_Release.tar.gz\",\n partners_train_tar_name: str = \"i2b2_Partners_Train_Release.tar.gz\",\n test_zip_name: str = \"Task_1C.zip\",\n merge_docs: bool = True,\n):\n # Unpack compressed data files\n msg.info(\"Extracting raw data.\")\n beth_train_tar_path = input_dir / beth_train_tar_name\n partners_train_tar_path = input_dir / partners_train_tar_name\n test_zip_path = input_dir / test_zip_name\n\n for path in [beth_train_tar_path, partners_train_tar_path]:\n if path.name.endswith(\"tar.gz\"):\n msg.text(f\"Extracting {path}\")\n tar = tarfile.open(path, \"r:gz\")\n tar.extractall(path.parent)\n tar.close()\n\n shutil.unpack_archive(test_zip_path, input_dir / test_zip_name.replace(\".zip\", \"\"))\n\n # preprocess data\n msg.info(\"Converting to spaCy Doc objects.\")\n beth_train_docs = docs_from_many_clinical_records(\n input_dir / \"Beth_Train\", merge_docs=merge_docs\n )\n partners_train_docs = docs_from_many_clinical_records(\n input_dir / \"Partners_Train\", merge_docs=merge_docs\n )\n train_docs = beth_train_docs + partners_train_docs\n\n beth_test_docs = docs_from_many_clinical_records(\n input_dir / \"Task_1C/i2b2_Test/i2b2_Beth_Test\", merge_docs=merge_docs\n )\n partners_test_docs = docs_from_many_clinical_records(\n input_dir / \"Task_1C/i2b2_Test/i2b2_Partners_Test\", merge_docs=merge_docs\n )\n test_docs = beth_test_docs + partners_test_docs\n\n random.shuffle(train_docs)\n split_idx = int(len(train_docs) * 0.8)\n train_docs, dev_docs = train_docs[:split_idx], train_docs[split_idx:]\n\n msg.good(f\"Num Train Docs: {len(train_docs)}\")\n msg.good(f\"Num Dev Docs: {len(dev_docs)}\")\n msg.good(f\"Num Test Docs: {len(test_docs)}\")\n\n with msg.loading(f\"Saving docs to: {output_dir}...\"):\n DocBin(docs=train_docs).to_disk(output_dir / \"train.spacy\")\n DocBin(docs=dev_docs).to_disk(output_dir / \"dev.spacy\")\n DocBin(docs=test_docs).to_disk(output_dir / \"test.spacy\")\n msg.good(\"Done.\")", "def setUp(self):\n # make directory test\n self.temp_dir_string = '/tmp/test_for_seqprep/'\n create_dir(self.temp_dir_string)\n\n # make directory with spaces test\n self.temp_dir_string_space = '/tmp/test for seqprep/'\n create_dir(self.temp_dir_string_space)\n \n # create temp file path strings\n self.test_fn1 = os.path.join(self.temp_dir_string,'reads1.fastq')\n self.test_fn1_space = os.path.join(self.temp_dir_string_space, \n 'reads1.fastq')\n self.test_fn2 = os.path.join(self.temp_dir_string,'reads2.fastq')\n self.test_fn2_space = os.path.join(self.temp_dir_string_space,\n 'reads2.fastq')" ]
[ "0.6538091", "0.6146276", "0.607586", "0.5977594", "0.5945953", "0.59351915", "0.5910153", "0.5886085", "0.5845465", "0.5794927", "0.576355", "0.574656", "0.57456464", "0.57011575", "0.5694419", "0.569211", "0.5688554", "0.5685619", "0.56332463", "0.56278634", "0.5609137", "0.5597945", "0.5586367", "0.5574616", "0.557134", "0.556148", "0.5554964", "0.5554964", "0.55534005", "0.5534059" ]
0.7445018
0
This function checks that the ordering of the samples matches between the expression file and the metadata file. This ordering is used for calculating DEGs.
def compare_and_reorder_samples(expression_file, metadata_file): # Check ordering of sample ids is consistent between gene expression data and metadata metadata = pd.read_csv(metadata_file, sep="\t", header=0, index_col=0) metadata_sample_ids = metadata.index expression_data = pd.read_csv(expression_file, sep="\t", header=0, index_col=0) expression_sample_ids = expression_data.index if metadata_sample_ids.equals(expression_sample_ids): print("sample ids are ordered correctly") else: # Convert gene expression ordering to be the same as # metadata sample ordering print("sample ids don't match, going to re-order gene expression samples") expression_data = expression_data.reindex(metadata_sample_ids) expression_data.to_csv(expression_file, sep="\t")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_file_sorting(observable_config_path: list[Path]) -> None:\n _names = list(map(lambda f: f.name, observable_config_path))\n _names_sorted = list(\n sorted(_names, key=lambda f: re.findall(r\"(\\d+).bin\", f)[0])\n )\n _is_match = [f0 == f1 for f0, f1 in zip(_names, _names_sorted)]\n if sum(_is_match) != len(_is_match):\n logger.warning(\"Possible unsorted input files detected. Continuing.\")", "def validate_sample_and_seq_exp(program_id, donor_id, analysis_objs):\n sample_fields = {\n 'submitterSampleId': 'samples.submitterSampleId',\n 'sampleType': 'samples.sampleType',\n 'matchedNormalSubmitterSampleId': 'samples.matchedNormalSubmitterSampleId',\n 'specimenId': 'samples.specimen.specimenId',\n 'specimenType': 'samples.specimen.specimenType',\n 'tumourNormalDesignation': 'samples.specimen.tumourNormalDesignation',\n 'specimenTissueSource': 'samples.specimen.specimenTissueSource',\n 'donorId': 'samples.donor.donorId',\n 'gender': 'samples.donor.gender',\n 'studyId': 'studyId'\n }\n\n # gather sample information from all analysis objects\n sample_info = dict()\n for a in analysis_objs:\n analysisId = a['analysisId']\n sampleId = a['samples'][0]['sampleId']\n if sampleId not in sample_info:\n sample_info[sampleId] = {\n 'sampleId': sampleId,\n 'analysisId': [],\n 'sequencing_experiment': {}\n }\n\n sample_info[sampleId]['analysisId'].append(analysisId)\n\n for field in sample_fields:\n if field not in sample_info[sampleId]:\n sample_info[sampleId][field] = dict()\n\n source_path = sample_fields[field].split('.')\n if len(source_path) == 1:\n source_value_str = str(a[source_path[0]])\n elif source_path[0] == 'samples':\n if len(source_path) == 2:\n source_value_str = str(a['samples'][0][source_path[1]])\n elif len(source_path) == 3:\n source_value_str = str(a['samples'][0][source_path[1]][source_path[2]])\n else:\n assert False # not supposed to reach here\n else:\n assert False # not supposed to reach here\n\n if source_value_str not in sample_info[sampleId][field]:\n sample_info[sampleId][field][source_value_str] = []\n\n sample_info[sampleId][field][source_value_str].append(analysisId)\n\n # add sequencing_experiment\n if a.get('analysisType', {}).get('name') in ('sequencing_experiment', 'rna_sequencing_experiment'):\n strategy = a['experiment']['experimental_strategy']\n matchedNormalSubmitterSampleId = a['samples'][0]['matchedNormalSubmitterSampleId']\n\n if strategy not in sample_info[sampleId]['sequencing_experiment']:\n sample_info[sampleId]['sequencing_experiment'][strategy] = {\n 'sequencing_experiment_analysis_id': [analysisId],\n 'matchedNormalSubmitterSampleId': [matchedNormalSubmitterSampleId]\n }\n else:\n sample_info[sampleId]['sequencing_experiment'][strategy]['sequencing_experiment_analysis_id'].append(analysisId)\n sample_info[sampleId]['sequencing_experiment'][strategy]['matchedNormalSubmitterSampleId'].append(matchedNormalSubmitterSampleId)\n\n # print(json.dumps({'donor_id': donor_id}))\n # print(json.dumps(sample_info))\n\n samples, issues = value_discrepancy_check(sample_info, sample_fields)\n # print(json.dumps(samples))\n # print(json.dumps(issues))\n\n submitterSampleId2SampleId = mapping_sumbitter_sample_id_to_sample_id(samples)\n # print(json.dumps(submitterSampleId2SampleId))\n\n # figure out tumour-normal pairs\n tumour_normal_pairs, tumour_not_paired, normal_not_paired, normal_paired, issues = resolve_tumour_normal_pairs(samples, submitterSampleId2SampleId)\n # print(json.dumps(tumour_normal_pairs))\n # print(json.dumps(normal_paired))\n # print(json.dumps(tumour_not_paired))\n # print(json.dumps(normal_not_paired))\n # print(json.dumps(issues))", "def test_compare_genomes_1(self):\n import_genome.compare_genomes(self.genome_pair, self.eval_flags)\n count = count_status(self.genome_pair, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.genome_pair.evaluations), 12)\n with self.subTest():\n self.assertEqual(count, 0)", "def process_samples_for_limma(\n expression_filename,\n grp_metadata_filename,\n out_expression_filename=None,\n process_metadata_filename=None,\n):\n\n # Read data\n expression = pd.read_csv(expression_filename, sep=\"\\t\", index_col=0, header=0)\n if process_metadata_filename is not None:\n process_metadata = pd.read_csv(\n process_metadata_filename, sep=\"\\t\", index_col=0, header=0\n )\n grp_metadata = pd.read_csv(grp_metadata_filename, sep=\"\\t\", header=0, index_col=0)\n\n if process_metadata_filename is not None:\n # Get samples ids to remove\n samples_to_remove = list(\n process_metadata[process_metadata[\"processing\"] == \"drop\"].index\n )\n\n # Remove samples\n expression = expression.drop(samples_to_remove)\n\n # Check ordering of sample ids is consistent between gene expression data and metadata\n metadata_sample_ids = grp_metadata.index\n expression_sample_ids = expression.index\n\n if metadata_sample_ids.equals(expression_sample_ids):\n print(\"sample ids are ordered correctly\")\n else:\n # Convert gene expression ordering to be the same as\n # metadata sample ordering\n print(\"sample ids don't match, going to re-order gene expression samples\")\n expression = expression.reindex(metadata_sample_ids)\n\n assert expression.index.equals(metadata_sample_ids)\n\n # Save\n if out_expression_filename is not None:\n expression.to_csv(out_expression_filename, sep=\"\\t\")\n else:\n expression.to_csv(expression_filename, sep=\"\\t\")", "def test_compare_genomes_3(self):\n self.eval_flags = {\"check_replace\": False}\n import_genome.compare_genomes(self.genome_pair, self.eval_flags)\n count = count_status(self.genome_pair, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.genome_pair.evaluations), 9)\n with self.subTest():\n self.assertEqual(count, 0)", "def test_ordering(self):\r\n def verify_order(source_usage_key, parent_usage_key, source_position=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n parent = self.get_item_from_modulestore(parent_usage_key)\r\n children = parent.children\r\n if source_position is None:\r\n self.assertFalse(source_usage_key in children, 'source item not expected in children array')\r\n self.assertEqual(\r\n children[len(children) - 1],\r\n usage_key,\r\n \"duplicated item not at end\"\r\n )\r\n else:\r\n self.assertEqual(\r\n children[source_position],\r\n source_usage_key,\r\n \"source item at wrong position\"\r\n )\r\n self.assertEqual(\r\n children[source_position + 1],\r\n usage_key,\r\n \"duplicated item not ordered after source item\"\r\n )\r\n\r\n verify_order(self.problem_usage_key, self.seq_usage_key, 0)\r\n # 2 because duplicate of problem should be located before.\r\n verify_order(self.html_usage_key, self.seq_usage_key, 2)\r\n verify_order(self.seq_usage_key, self.chapter_usage_key, 0)\r\n\r\n # Test duplicating something into a location that is not the parent of the original item.\r\n # Duplicated item should appear at the end.\r\n verify_order(self.html_usage_key, self.usage_key)", "def test_valid_genes_file(self):\n\n # Create a valid genes file\n valid_genes_file = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"data\", \"valid_genes_file.bed\")\n\n ref_name = \"ref1\"\n\n genes = {\"gene1\": {\"start\": 0, \"end\": 100},\n \"gene 2\": {\"start\": 101, \"end\": 200}, # Spaces are allowed in the gene name\n \"gene3\": {\"start\": 201, \"end\": 300}}\n\n with open(valid_genes_file, \"w+\") as f:\n for gene in genes:\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (ref_name, genes[gene][\"start\"],\n genes[gene][\"end\"], gene))\n\n parsed_genes = parse_genes_file(valid_genes_file, ref_name)\n\n for gene in parsed_genes:\n assert gene in genes\n assert parsed_genes[gene][\"start\"] == genes[gene][\"start\"]\n assert parsed_genes[gene][\"end\"] == genes[gene][\"end\"]\n assert parsed_genes[gene][\"frame\"] == genes[gene][\"start\"] % 3\n\n os.remove(valid_genes_file)", "def test_extrinsic_metadata(self):\n\n qs = FBO(\n path=TEST_FILES_ROOT,\n glob='*.rst',\n metadata=FileObject.MetadataInFileHead,\n ).all()\n\n self.assertEqual(\n 3,\n qs.count(),\n )\n # Have to test this both ways so that however it\n # comes out of the filesystem \"by default\" (ie\n # intrinsically, probably inode ordering) we'll get\n # a failure if our explicit ordering isn't applied.\n self.assertEqual(\n 'test1.rst',\n qs.order_by('title')[0].name,\n )\n self.assertEqual(\n 'test3.rst',\n qs.order_by('-title')[0].name,\n )", "def process_samples_for_DESeq(\n expression_filename,\n grp_metadata_filename,\n out_expression_filename=None,\n count_threshold=None,\n process_metadata_filename=None,\n):\n\n # Read data\n expression = pd.read_csv(expression_filename, sep=\"\\t\", index_col=0, header=0)\n if process_metadata_filename is not None:\n process_metadata = pd.read_csv(\n process_metadata_filename, sep=\"\\t\", index_col=0, header=0\n )\n grp_metadata = pd.read_csv(grp_metadata_filename, sep=\"\\t\", header=0, index_col=0)\n\n if process_metadata_filename is not None:\n # Get samples ids to remove\n samples_to_remove = list(\n process_metadata[process_metadata[\"processing\"] == \"drop\"].index\n )\n\n # Remove samples\n expression = expression.drop(samples_to_remove)\n\n # Cast as int\n expression = expression.astype(int)\n\n # Remove genes with 0 counts\n # all_zero_genes = list(expression.columns[(expression == 0).all()])\n # expression = expression.drop(columns=all_zero_genes)\n\n # assert len(list(expression.columns[(expression == 0).all()])) == 0\n\n # Remove genes below a certain threshold (if provided)\n if count_threshold is not None:\n genes_to_keep = expression.loc[:, expression.mean() >= count_threshold].columns\n expression = expression[genes_to_keep]\n\n # Check ordering of sample ids is consistent between gene expression data and metadata\n metadata_sample_ids = grp_metadata.index\n expression_sample_ids = expression.index\n\n if metadata_sample_ids.equals(expression_sample_ids):\n print(\"sample ids are ordered correctly\")\n else:\n # Convert gene expression ordering to be the same as\n # metadata sample ordering\n print(\"sample ids don't match, going to re-order gene expression samples\")\n expression = expression.reindex(metadata_sample_ids)\n\n assert expression.index.equals(metadata_sample_ids)\n\n # Save\n if out_expression_filename != None:\n expression.to_csv(out_expression_filename, sep=\"\\t\")\n else:\n expression.to_csv(expression_filename, sep=\"\\t\")", "def assert_filenames(self):\n print(\"Asserting filenames: \", end=\"\")\n error_files = []\n\n for data_dir in data_settings.BLOCK_DATA_DIRS:\n\n filenames = os.listdir(data_dir)\n\n for filename in filenames:\n\n if 'aux.xml' in filename or 'yield':\n\n continue\n\n try:\n\n filename_split = filename.split(\"_\")\n date = filename_split[0]\n _, suffix = filename_split[-1].split(\".\")\n\n assert suffix == 'tif', \"Wrong file suffix\"\n assert len(date) == 8, \"Wrong amount of numbers in date\"\n assert date[0:4] == '2017', \"Year is wrong\"\n assert date[4] == '0', \"No double digit months in dataset\"\n assert date[5] in ['4', '5', '6', '7', '8',\n '9'], \"Month outside dataset range\"\n assert date[6] in ['0', '1', '2',\n '3'], \"Ten-indicator for day is wrong\"\n assert date[7] in ['0', '1', '2', '3', '4', '5',\n '6', '7', '8', '9'], \"Date is not a digit\"\n assert 'ndvi' in filename or 'drone_rgb' in filename or 'drone_ndvi' in filename, \"Proper type is missing\"\n\n if 'sentinel_ndvi' in filename:\n\n assert len(filename) == 26, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n if 'drone_ndvi' in filename:\n\n assert len(filename) == 23, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n if 'drone_rgb' in filename:\n\n assert len(filename) == 22, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n except (AssertionError, ValueError) as ex:\n\n error_files.append(\"{}: {}\".format(\n ex, os.path.join(data_dir, filename)))\n\n if not error_files:\n\n print(\"All generated block datasets named correctly!\")\n\n else:\n\n print(\"There were some problems with the following files\")\n\n for error_file in error_files:\n print(\"\\t{}\".format(error_file))", "def test_parse_fasta_file(self):\r\n\r\n fasta_data = ['>seq1 SAMPLE1', 'AAACGT', '>seq2', 'ACGGT']\r\n\r\n expected_fasta = {'seq1': 'AAACGT', 'seq2': 'ACGGT'}\r\n\r\n expected_order = ['seq1 SAMPLE1', 'seq2']\r\n\r\n actual_fasta, actual_order = parse_fasta_file(fasta_data)\r\n\r\n self.assertEqual(actual_fasta, expected_fasta)\r\n\r\n self.assertEqual(actual_order, expected_order)", "def test_good_metadata_file_registry(self):\n # Setup test\n filename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_mf.xml\")\n out_name = \"physics_types_ddt\"\n in_source = os.path.join(_SAMPLE_FILES_DIR, out_name + '_se.F90')\n in_meta = os.path.join(_SAMPLE_FILES_DIR, out_name + '_se.meta')\n out_source = os.path.join(_TMP_DIR, out_name + '.F90')\n out_meta = os.path.join(_TMP_DIR, out_name + '.meta')\n remove_files([out_source, out_meta])\n # generate registry\n retcode, files = gen_registry(filename, 'se', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # Check return code\n amsg = \"Test failure for SE dycore, retcode={}\".format(retcode)\n self.assertEqual(retcode, 0, msg=amsg)\n flen = len(files)\n amsg = \"Test failure for SE dycore: Found {} files, expected 2\"\n self.assertEqual(flen, 2, msg=amsg.format(flen))\n amsg = \"{} does not exist\".format(out_meta)\n self.assertTrue(os.path.exists(out_meta), msg=amsg)\n amsg = \"{} does not exist\".format(out_source)\n self.assertTrue(os.path.exists(out_source), msg=amsg)\n # For each output file, make sure it matches input file\n amsg = \"{} does not match {}\".format(in_meta, out_meta)\n self.assertTrue(filecmp.cmp(in_meta, out_meta,\n shallow=False), msg=amsg)\n amsg = \"{} does not match {}\".format(in_source, out_source)\n self.assertTrue(filecmp.cmp(in_source, out_source,\n shallow=False), msg=amsg)\n # Check that the metadata file has the correct number of variables\n mfile = files[1]\n mvars = mfile.variable_list()\n num_vars = len(mvars)\n amsg = \"Expected 14 metadata variables, found {}\".format(num_vars)\n self.assertEqual(num_vars, 14, msg=amsg)", "def check_order(self, filename: str, section: str, texts: List[str]):\n alphas = sorted(texts, key=lambda x: x.split(':')[0].lower())\n if texts == alphas:\n return\n for text, alpha in zip(texts, alphas):\n if text != alpha:\n print(f'{filename}: {section}: {text} vs {alpha}')\n break", "def test_split_otu_table_on_sample_metadata(self):\r\n actual = list(split_otu_table_on_sample_metadata(self.otu_table_f1,\r\n self.mapping_f1,\r\n \"Treatment\"))\r\n for id_, e in actual:\r\n try:\r\n parse_biom_table(e)\r\n except:\r\n print e\r\n actual = [(id_, parse_biom_table(e)) for id_, e in actual]\r\n exp = [(id_, parse_biom_table(e)) for id_, e in otu_table_exp1]\r\n\r\n actual.sort()\r\n exp.sort()\r\n\r\n for a, e in zip(actual, exp):\r\n self.assertEqual(a, e, \"OTU tables are not equal:\\n%s\\n%s\" %\r\n (format_biom_table(a[1]), format_biom_table(e[1])))", "def test_gene_essentiality_from_data_qualitative(combined_dataframe):\n comparative_dataframe, exp = essential.prepare_qualitative_comparison(\n combined_dataframe\n )\n assert len(comparative_dataframe[comparative_dataframe[\"true_positives\"] == 1]) == 3", "def do_comparison(found_file, created_file):\n\n fh_f, fh_c, data_f, data_c = get_data(found_file, created_file)\n\n print('Initial found data shape ', data_f.shape)\n print(' and created data shape= ', data_c.shape)\n\n # Compare slice i of created to slice i+1 in found\n if (data_f.shape[0] == 1): # NIRCAM\n data_f = data_f[0, :, :, :]\n if (data_c.shape[0] == 1): # to accept output of mc_4d\n data_c = data_c[0, :, :, :]\n data_c_start = data_c[:-1, :, :]\n data_f_end = data_f[1:, :, :]\n elif (fh_f['SCI'].header['NAXIS'] == 3): # NIRSPEC\n data_c_start = data_c[:-1, :, :]\n data_f_end = data_f[1:, :, :]\n elif (data_f.shape[0] > 1 and fh_f['SCI'].header['NAXIS'] == 4): # MIRI\n # concatenate copies of created data (except for the last frame)\n num_ints = int(fh_f[1].data.shape[0]) # number of integrations\n data_c_start = (np.repeat(data_c[:-1, :, :], num_ints, axis=0))\n data_f_end = data_f[:, 1:, :, :]\n data_c_start = data_c_start.reshape(data_f_end.shape)\n else:\n print(' FATAL ERROR - unsupported instrument')\n\n print('Truncated found data shape ', data_f_end.shape)\n print(' and truncated created data shape= ', data_c_start.shape)\n try:\n assert(data_f_end.shape == data_c_start.shape)\n except AssertionError:\n print(' FATAL ERROR: adjusted found data shape ', data_f.shape, \\\n ' is not the same as adjusted created data shape= ', data_c.shape)\n\n neither = (data_c_start == 0.) & (data_f_end == 0.)\n both = (data_c_start != 0.) & (data_f_end != 0.) # created CR was found\n c_only = (data_c_start != 0.) & (data_f_end == 0.) # created CR not found\n f_only = (data_c_start == 0.) & (data_f_end != 0.) # found CR was not created\n\n try:\n assert(neither.sum() + both.sum() + c_only.sum() + f_only.sum() \\\n == data_c_start.size)\n except AssertionError:\n print('FATAL ERROR: sum of components must equal total number of pixels ')\n\n print(' Within the input dataset cubes:')\n print(' Number of created but not found pixels: ', c_only.sum())\n print(' Number of found but not created pixels: ', f_only.sum())\n print(' Number of pixels that are both found and created: ', both.sum())\n print(' Number of pixels that are neither found nor created: ', neither.sum())\n print(' ')\n print(' The fraction of all pixels that were found only: ', \\\n float(f_only.sum()) / float(data_c_start.size))\n print(' The fraction of all pixels that were created only: ', \\\n float(c_only.sum()) / float(data_c_start.size))\n print(' The fraction of pixels in the created file having cosmic rays:', \\\n float(c_only.sum()) / (data_c_start.shape[-2] * data_c_start.shape[-1]))\n print(' ')\n\n write_files(neither, both, c_only, f_only, fh_c, data_c_start)", "def test_matched_pairs():\n template_filelist = listdir(RTEMPLATE_PATH)\n\n R_files = []\n json_files = []\n orphan_files = []\n for file in template_filelist:\n if '.r' in file:\n file = file.replace('.r', '')\n R_files.append(file)\n elif '.json' in file:\n file = file.replace('.json', '')\n json_files.append(file)\n else:\n orphan_files.append(file)\n\n ## make sure there are no non R/json files\n assert not bool(orphan_files) #file in path isn't .json or .R\n\n ## make sure every R file has a json pair\n assert not bool(\n set(R_files) - set(json_files)\n )", "def testOutputs(self):\n # Remember original (correct) example outputs\n old_files = self.read_outputs()\n\n # Set up and run Xanthos\n ini = 'example/pm_abcd_mrtm.ini'\n xth = Xanthos(ini)\n res = xth.execute()\n\n # Check result dimensions\n self.assertEqual(res.Q.shape, (67420, 372))\n\n # Test that new outputs equal old outputs.\n new_files = self.read_outputs()\n for k in new_files.keys():\n pd.testing.assert_frame_equal(new_files[k], old_files[k])", "def test_sample_ids_from_metadata_description(self):\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"Treatment:Foo\")\n self.tutorial_mapping_f.seek(0)\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"DOB:!20061218,!20070314,!20071112,\"\n \"!20080116\")", "def test_compare_genomes_2(self):\n self.pmr_gnm.annotation_status = \"final\"\n self.pmr_gnm.name = \"Trixie\"\n import_genome.compare_genomes(self.genome_pair, self.eval_flags)\n count = count_status(self.genome_pair, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.genome_pair.evaluations), 13)\n with self.subTest():\n self.assertEqual(count, 0)", "def test_SampleIds(self):\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\n \"PC.634\", \"PC.635\", \"PC.636\"]\n obs = self.overview_map.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.no_metadata.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.empty_map.SampleIds\n self.assertEqual(obs, [])", "def test_compute_correlation_expected_expected_sample_id(self):\r\n # Using a single-sample expected ts.\r\n exp = ((0.83914639167827365, 0.036729,\r\n 0.13786213786213786, (0.032537093928499863,\r\n 0.98380431996767537)),\r\n [('S1', 'Expected', 0.86602540378443871, 0.33333333333333326,\r\n 0.66666666666666652, 0.6793206793206793, 1, (None, None)),\r\n ('S2', 'Expected', 1.0, 0, 0, 0.32667332667332666,\r\n 0.6533466533466533, (None, None))])\r\n np.random.seed(self.value_for_seed)\r\n obs = _compute_correlation(self.taxa_summary_obs2,\r\n self.taxa_summary_exp2,\r\n 'expected', 'spearman', 'two-sided',\r\n 1000, 0.96, True, 'Expected')\r\n self.compare_multiple_level_array(obs, exp)\r\n \r\n # Using a two-sample expected ts.\r\n exp = ((0.83914639167827365, 0.036729,\r\n 0.13786213786213786, (0.032537093928499863,\r\n 0.98380431996767537)),\r\n [('S1', 'Expected', 0.86602540378443871, 0.33333333333333326,\r\n 0.66666666666666652, 0.6793206793206793, 1, (None, None)),\r\n ('S2', 'Expected', 1.0, 0, 0, 0.32667332667332666,\r\n 0.6533466533466533, (None, None))])\r\n\r\n np.random.seed(self.value_for_seed)\r\n obs = _compute_correlation(self.taxa_summary_obs2,\r\n self.taxa_summary_exp3,\r\n 'expected', 'spearman', 'two-sided',\r\n 1000, 0.96, True, 'Expected')\r\n self.compare_multiple_level_array(obs, exp)", "def test_compare_taxa_summaries_paired_sample_id_map_partial(self):\r\n # The sample ID map has some mappings that are not complete- i.e. a\r\n # sample from one file has a new sample ID that doesn't match any other\r\n # new sample IDs. In this case, the sample should be ignored.\r\n exp = ('Taxon\\tS1\\tS2\\nArchaea\\t0.4\\t0.4\\nBacteria\\t0.5\\t'\r\n '0.7\\nEukarya\\t0.4\\t0.5\\n', 'Taxon\\tE1\\tE2\\nArchaea\\t0.5'\r\n '\\t0.6\\nBacteria\\t0.7\\t0.8\\nEukarya\\t0.5\\t0.6\\n',\r\n '# Correlation coefficient: pearson.\\n# The parametric p-value(s) '\r\n 'were calculated using a two-sided test of significance using a '\r\n 't-distribution.\\n# The '\r\n 'confidence interval(s) were constructed at a confidence level of '\r\n '95.0% using Fisher\\'s z-transformation (see Sokal and Rohlf 3rd '\r\n 'edition pg. 575). The confidence interval(s) are two-sided.\\n# '\r\n 'Number of samples that matched between the taxa summary files: 1'\r\n '\\nCorrelation coefficient\\tParametric p-value\\tNonparametric '\r\n 'p-value\\tCI (lower)\\tCI (upper)\\n1.0000\\t0.0000\\tN/A\\tN/A\\tN/A\\n',\r\n '# Correlation coefficient: pearson.\\n# The parametric p-value(s) '\r\n 'were calculated using a two-sided test of significance using a '\r\n 't-distribution.\\n# The '\r\n 'confidence interval(s) were constructed at a confidence level of '\r\n '95.0% using Fisher\\'s z-transformation (see Sokal and Rohlf 3rd '\r\n 'edition pg. 575). The confidence interval(s) are two-sided.\\n# '\r\n 'Number of samples that matched between the taxa summary files: 1'\r\n '\\nSample ID\\tSample ID\\tCorrelation coefficient\\tParametric '\r\n 'p-value\\tParametric p-value (Bonferroni-corrected)\\t'\r\n 'Nonparametric p-value\\tNonparametric p-value '\r\n '(Bonferroni-corrected)\\tCI (lower)\\tCI (upper)\\nS1\\tE2\\t1.0000\\t'\r\n '0.0000\\t0.0000\\tN/A\\tN/A\\tN/A\\tN/A\\n')\r\n\r\n obs = compare_taxa_summaries(self.taxa_summary_paired1,\r\n self.taxa_summary_paired4, 'paired', 'pearson',\r\n num_permutations=0,\r\n perform_detailed_comparisons=True,\r\n sample_id_map=self.taxa_summary_paired_samp_id_map2)\r\n # We can test exactly because there aren't any stochastic p-values.\r\n self.assertEqual(obs, exp)", "def test_split_otu_table_on_sample_metadata_extra_mapping_entries(self):\r\n actual = list(split_otu_table_on_sample_metadata(self.otu_table_f1,\r\n self.mapping_f2,\r\n \"Treatment\"))\r\n\r\n actual = [(id_, parse_biom_table(e)) for id_, e in actual]\r\n exp = [(id_, parse_biom_table(e)) for id_, e in otu_table_exp1]\r\n\r\n actual.sort()\r\n exp.sort()\r\n\r\n for a, e in zip(actual, exp):\r\n self.assertEqual(a, e, \"OTU tables are not equal:\\n%s\\n%s\" %\r\n (format_biom_table(a[1]), format_biom_table(e[1])))", "def test_genbank_consistency(path):\n gb_file = gb.GenBankFile.read(join(data_dir(\"sequence\"), path))\n ref_annot = gb.get_annotation(gb_file)\n\n gff_file = gff.GFFFile.read(join(data_dir(\"sequence\"), path[:-3] + \".gff3\"))\n test_annot = gff.get_annotation(gff_file)\n \n # Remove qualifiers, since they will be different\n # in GFF3 and GenBank\n ref_annot = seq.Annotation(\n [seq.Feature(feature.key, feature.locs) for feature in ref_annot]\n )\n test_annot = seq.Annotation(\n [seq.Feature(feature.key, feature.locs) for feature in test_annot]\n )\n for feature in test_annot:\n # Only CDS, gene, intron and exon should be equal\n # in GenBank and GFF3\n if feature.key in [\"CDS\", \"gene\", \"intron\", \"exon\"]:\n try:\n assert feature in test_annot\n except AssertionError:\n print(feature.key)\n for loc in feature.locs:\n print(loc)\n raise", "def test_SampleIds(self):\r\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\r\n \"PC.634\", \"PC.635\", \"PC.636\"]\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.no_metadata.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.empty_map.SampleIds\r\n self.assertEqual(obs, [])", "def perform_filecheck():\n\n\t# Open files\n\ttrain = open('train_aae_final', 'r')\n\ttest = open('test_aae_final', 'r')\n\n\n\t# Check number of training and testing samples\n\tprint (\"\")\n\tprint (\"Number of training samples =\", len(train.readlines()))\n\tprint (\"Number of testing samples =\", len(test.readlines()))\n\tprint (\"\")\n\n\ttrain.close()\n\ttest.close()", "def test_compare_taxa_summaries_paired_sample_id_map_mismatched_taxa(self):\r\n exp = ('Taxon\\tS1\\tS2\\nArchaea\\t0.4\\t0.4\\nBacteria\\t0.5\\t0.7\\nEukarya'\r\n '\\t0.4\\t0.5\\nFoobar\\t0.0\\t0.0\\n', 'Taxon\\tE1\\tE2\\nArchaea\\t0.5'\r\n '\\t0.6\\nBacteria\\t0.7\\t0.8\\nEukarya\\t0.5\\t0.6\\nFoobar\\t0.1\\t0.9'\r\n '\\n',\r\n '# Correlation coefficient: pearson.\\n# The parametric p-value(s) '\r\n 'were calculated using a two-sided test of significance using a '\r\n 't-distribution.\\n# The nonparametric p-value(s) were calculated '\r\n 'using a two-sided permutation test with permutations.\\n# The '\r\n 'confidence interval(s) were constructed at a confidence level of '\r\n '.% using Fisher\\'s z-transformation (see Sokal and Rohlf rd '\r\n 'edition pg. ). The confidence interval(s) are two-sided.\\n# '\r\n 'Number of samples that matched between the taxa summary files: '\r\n '\\nCorrelation coefficient\\tParametric p-value\\tNonparametric '\r\n 'p-value\\tCI (lower)\\tCI (upper)\\n-.\\t.\\t.\\t-.\\t.\\n', None)\r\n\r\n obs = compare_taxa_summaries(self.taxa_summary_paired1,\r\n self.taxa_summary_paired5, 'paired', 'pearson',\r\n perform_detailed_comparisons=False,\r\n sample_id_map=self.taxa_summary_paired_samp_id_map2)\r\n obs = (obs[0], obs[1], self.remove_nums(obs[2]), obs[3])\r\n self.assertEqual(obs, exp)", "def test_equal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n qs2 = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n self.assertEqual(\n qs[0],\n qs2[0],\n )", "def test_unequal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n # There are four of these.\n for a, b in combinations(qs.all(), 2):\n self.assertNotEqual(a, b)" ]
[ "0.57153094", "0.5681696", "0.56747234", "0.56506944", "0.56446517", "0.5587243", "0.5585911", "0.5583389", "0.5577927", "0.5573355", "0.55560887", "0.55500454", "0.5520259", "0.5505986", "0.5488156", "0.546695", "0.5445724", "0.54166615", "0.5415728", "0.54112035", "0.5392526", "0.5392427", "0.5384152", "0.5383674", "0.5374912", "0.53628814", "0.5354401", "0.53539443", "0.5338508", "0.5321517" ]
0.8004621
0
This function reads in pseudomonas pathway data from `pathway_DB_filename` and formats and outputs it to `output_filename` in order to be used in GSEA_analysis.R
def format_pseudomonas_pathway_DB(pathway_DB_filename, local_dir, out_filename): # Read in pathway data pa_pathway_DB = pd.read_csv( pathway_DB_filename, names=["pathway id", "num genes", "genes"], sep="\t", header=None, ) # Drop extra column pa_pathway_DB.drop(columns=["num genes"], inplace=True) # Make genes tab-separated pa_pathway_DB["genes"] = pa_pathway_DB["genes"].str.split(";").str.join("\t") # Need to temporarily write data to file in order # to remove extra '\' tmp_filename = os.path.join(local_dir, "pa_pathway_DB_tmp_filename.gmt") pa_pathway_DB.to_csv( tmp_filename, quoting=csv.QUOTE_NONE, escapechar="\\", index=False, header=False, sep="\t", ) with open(tmp_filename, "r") as ihf: tmp = ihf.read() with open(out_filename, "w") as ohf: ohf.write(tmp.replace("\\", ""))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_ripser_output(output_path,max_dim,output_name=None):\n # \\todo add persistence by density (columns pers by threshold and column pers by dens) ## only needed if input weighted network\n output_file_path =os.path.join(output_path,'output_ripser.txt')\n data = open(output_file_path,'rb').readlines()\n value_range = eval(data[1].rstrip().split(' ')[-1])\n holes = dict() ## save holes by dimension (birth, death, persistence)\n for dimH in range(0,max_dim+1):#[0,1,2]:\n print 'dimH ', dimH\n h_start, h_end = ripser_PDs_dim(data,dim=dimH)\n pers = np.array(h_end)-np.array(h_start)\n d = pd.DataFrame()\n d['birth'] = h_start\n d['death'] = h_end\n d['persistence'] = pers\n d['dimH'] = dimH\n holes[dimH] = d \n data_pds = pd.concat(holes.values())\n if(output_name!=None):\n output_file_path = os.path.join(output_path,'%s_PDS.csv'%output_name)\n data_pds.to_csv(output_file_path) ## save pandas file with PDs for dim 0,1,2\n print 'Saved results in %s'%(output_file_path)\n else:\n output_file_path = os.path.join(output_path,'outputs_PDS.csv')\n data_pds.to_csv(output_file_path) ## save pandas file with PDs for dim 0,1,2\n print 'Saved results in %s'%output_file_path\n return()", "def gpml2json(path_in, path_out, pathway_iri, wp_id, pathway_version, wd_sparql):\n\n dir_out = path.dirname(path_out)\n # example base_out: 'WP4542.json'\n base_out = path.basename(path_out)\n [stub_out, ext_out_with_dot] = path.splitext(base_out)\n\n gpml2pvjson_cmd = (\n f\"gpml2pvjson --id {pathway_iri} --pathway-version {pathway_version}\"\n )\n with open(path_in, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n gpml2pvjson_ps = subprocess.Popen(\n shlex.split(gpml2pvjson_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n gpml2pvjson_ps.communicate()[0]\n\n organism = None\n with open(path_out, \"r\") as json_f:\n pathway_data = json.load(json_f)\n pathway = pathway_data[\"pathway\"]\n organism = pathway[\"organism\"]\n entities_by_id = pathway_data[\"entitiesById\"]\n entities_with_valid_xrefs = list()\n for entity in entities_by_id.values():\n datasource_invalid = \"xrefDataSource\" in entity and (\n entity[\"xrefDataSource\"] in [\"undefined\"]\n or not entity[\"xrefDataSource\"]\n )\n xref_identifier_invalid = \"xrefIdentifier\" in entity and (\n entity[\"xrefIdentifier\"] in [\"undefined\"]\n or not entity[\"xrefIdentifier\"]\n )\n if datasource_invalid or xref_identifier_invalid:\n entity_id = entity[\"id\"]\n print(\n f\"Invalid xref datasource and/or identifier for {wp_id}, entity {entity_id}\"\n )\n # bridgedbjs fails when an identifier is something like 'undefined'.\n # Should it ignore datasources/identifiers it doesn't recognize\n # and just keep going?\n del entity[\"xrefDataSource\"]\n del entity[\"xrefIdentifier\"]\n else:\n entities_with_valid_xrefs.append(entity)\n with open(path_out, \"w\") as f_out:\n json.dump(pathway_data, f_out)\n\n if not organism:\n print(\"No organism. Can't call BridgeDb.\")\n elif len(entities_with_valid_xrefs) == 0:\n # TODO: bridgedbjs fails when no xrefs are present.\n # Update bridgedbjs to do this check:\n print(\"No xrefs to process.\")\n else:\n pre_bridgedb_json_f = f\"{dir_out}/{stub_out}.pre_bridgedb.json\"\n rename(path_out, pre_bridgedb_json_f)\n\n bridgedb_cmd = f\"\"\"bridgedb xrefs -f json \\\n -i '.entitiesById[].type' \"{organism}\" \\\n '.entitiesById[].xrefDataSource' \\\n '.entitiesById[].xrefIdentifier' \\\n ChEBI P683 Ensembl P594 \"Entrez Gene\" P351 HGNC P353 HMDB P2057 Wikidata\n \"\"\"\n with open(pre_bridgedb_json_f, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n bridgedb_ps = subprocess.Popen(\n shlex.split(bridgedb_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n bridgedb_ps.communicate()[0]\n\n no_wikidata_xrefs_by_bridgedb_key = dict()\n entity_ids_by_bridgedb_key = dict()\n with open(path_out, \"r\") as json_f:\n pathway_data = json.load(json_f)\n pathway = pathway_data[\"pathway\"]\n entities_by_id = pathway_data[\"entitiesById\"]\n for entity in entities_by_id.values():\n if (\n \"xrefIdentifier\" in entity\n and \"xrefDataSource\" in entity\n and entity[\"xrefDataSource\"] in BRIDGEDB2WD_PROPS\n and len(\n [\n entity_type\n for entity_type in entity[\"type\"]\n if entity_type.startswith(\"Wikidata:\")\n ]\n )\n == 0\n ):\n entity_id = entity[\"id\"]\n datasource = entity[\"xrefDataSource\"]\n xref_identifier = entity[\"xrefIdentifier\"]\n bridgedb_key = NON_ALPHANUMERIC_RE.sub(\n \"\", datasource + xref_identifier\n )\n no_wikidata_xrefs_by_bridgedb_key[bridgedb_key] = [\n datasource,\n xref_identifier,\n ]\n if bridgedb_key not in entity_ids_by_bridgedb_key:\n entity_ids_by_bridgedb_key[bridgedb_key] = [entity_id]\n else:\n entity_ids_by_bridgedb_key[bridgedb_key].append(entity_id)\n\n pathway_id_query = (\n '''\nSELECT ?item WHERE {\n?item wdt:P2410 \"'''\n + wp_id\n + \"\"\"\" .\nSERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n}\"\"\"\n )\n wd_pathway_id_result = wd_sparql.query(pathway_id_query)\n\n if len(wd_pathway_id_result[\"results\"][\"bindings\"]) == 0:\n print(f\"Pathway ID {wp_id} not found in Wikidata. Retrying.\")\n # retry once\n wd_pathway_id_result = wd_sparql.query(pathway_id_query)\n if len(wd_pathway_id_result[\"results\"][\"bindings\"]) == 0:\n # if it still doesn't work, skip it\n print(\n f\"Pathway ID {wp_id} still not found in Wikidata. Skipping conversion.\"\n )\n return False\n\n wikidata_pathway_iri = wd_pathway_id_result[\"results\"][\"bindings\"][0][\n \"item\"\n ][\"value\"]\n wikidata_pathway_identifier = wikidata_pathway_iri.replace(\n \"http://www.wikidata.org/entity/\", \"\"\n )\n\n # adding Wikidata IRI to sameAs property & ensuring no duplication\n if not \"sameAs\" in pathway:\n pathway[\"sameAs\"] = wikidata_pathway_identifier\n else:\n same_as = pathway[\"sameAs\"]\n if type(same_as) == str:\n pathway[\"sameAs\"] = list({wikidata_pathway_identifier, same_as})\n else:\n same_as.append(wikidata_pathway_identifier)\n pathway[\"sameAs\"] = list(set(same_as))\n\n headings = []\n queries = []\n for i, xref in enumerate(no_wikidata_xrefs_by_bridgedb_key.values()):\n [datasource, xref_identifier] = xref\n heading = \"?\" + NON_ALPHANUMERIC_RE.sub(\n \"\", datasource + xref_identifier\n )\n headings.append(heading)\n wd_prop = BRIDGEDB2WD_PROPS[datasource]\n queries.append(f'{heading} wdt:{wd_prop} \"{xref_identifier}\" .')\n\n # Here we chunk the headings and queries into paired batches and\n # make several smaller requests to WD. This is needed because some\n # of the GET requests become too large to send as a single request.\n\n batch_size = 10\n for [heading_batch, query_batch] in zip(\n grouper_it(batch_size, headings), grouper_it(batch_size, queries)\n ):\n headings_str = \" \".join(heading_batch)\n queries_str = (\n \"WHERE { \"\n + \" \".join(query_batch)\n + ' SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }}'\n )\n xref_query = f\"SELECT {headings_str} {queries_str}\"\n xref_result = wd_sparql.query(xref_query)\n xref_query = f\"SELECT {headings_str} {queries_str}\"\n xref_result = wd_sparql.query(xref_query)\n\n bridgedb_keys = xref_result[\"head\"][\"vars\"]\n for binding in xref_result[\"results\"][\"bindings\"]:\n for bridgedb_key in bridgedb_keys:\n # TODO: is this check needed?\n if type(binding[bridgedb_key][\"value\"]) == list:\n raise Exception(\"Error: expected list and got string\")\n\n wd_xref_identifier = binding[bridgedb_key][\"value\"].replace(\n \"http://www.wikidata.org/entity/\", \"\"\n )\n for entity_id in entity_ids_by_bridgedb_key[bridgedb_key]:\n entities_by_id[entity_id][\"type\"].append(\n f\"Wikidata:{wd_xref_identifier}\"\n )\n\n pre_wd_json_f = f\"{dir_out}/{stub_out}.pre_wd.json\"\n rename(path_out, pre_wd_json_f)\n with open(path_out, \"w\") as f_out:\n json.dump(pathway_data, f_out)", "def process_pathway_ontology(self) -> None:\n # Load pathway ontology from file\n pw = PathwayOntology(name=\"PW\",\n filename=self.pathway_ontology_file)\n pw.load_from_file()\n\n pw_dict = dict()\n\n for cl in pw.owl_classes:\n synonyms, annotations = pw.get_synonyms(cl)\n pw_dict[cl] = {\n 'name': pw.get_label(cl),\n 'aliases': pw.get_all_labels(cl) + synonyms,\n 'synonyms': annotations,\n 'definition': pw.get_definition(cl),\n 'subClassOf': pw.get_subClassOf(cl),\n 'part_of': pw.get_part_of(cl)\n }\n\n with open(self.pw_json_file, 'w') as outf:\n json.dump(pw_dict, outf, indent=4, sort_keys=True)", "def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )", "def export_diagram(db_id, pathway, genes, out_dir=None):\n # Re-enrich the genes in order to get the proper diagram\n # highlighting.\n enrich_genes(genes)\n if not out_dir:\n out_dir = os.getcwd()\n separator = re.compile('[^\\w]+')\n capitalized = [word[0].upper() + word[1:]\n for word in separator.split(pathway) if word]\n base_name = ''.join(capitalized)\n file_name = os.path.join(out_dir, \"%s.pdf\" % base_name)\n body = dict(dbId=db_id, pathwayName=pathway, fileName=file_name)\n requests.post(get_fi_url('exportPathwayDiagram'), json=body)\n print(\"Exported pathway '%s' to %s.\" % (pathway, file_name))\n return file_name", "def output(\n self,\n fileformat,\n **keywords\n ):\n \n # add the default parameters, they will be checked against the keywords\n defaults = {\n 'ref':'cogid',\n 'entry':'concept',\n 'missing':0,\n 'filename':'lingpy-{0}'.format(str(date.today())),\n }\n \n # compare with keywords and add missing ones\n for key in defaults:\n if key not in keywords:\n keywords[key] = defaults[key]\n\n if fileformat == 'paps.nex':\n paps = self.get_paps(\n ref=keywords['ref'],\n entry=keywords['entry'],\n missing=keywords['missing']\n )\n pap2nex(\n self.cols,\n paps,\n missing=keywords['missing'],\n filename=keywords['filename']+'.paps'\n )\n\n if fileformat == 'taxa':\n out = ''\n for col in self.cols:\n out += col + '\\n'\n f = open(keywords['filename'] + '.taxa','w')\n f.write(out)\n f.close()", "def BuildPathwayModel(request):\n form = BuildPathwayModelForm(request.POST, request.FILES)\n if not form.is_valid():\n logging.error(form.errors)\n return HttpResponseBadRequest('Invalid pathway form.')\n\n try:\n bounds = pathway_result_page.make_bounds(form)\n aq_params = pathway_result_page.make_aq_params(form)\n except Exception as e:\n logging.error(e)\n return HttpResponseBadRequest(e)\n\n try:\n f = request.FILES['pathway_file']\n fname_base, ext = os.path.splitext(f.name)\n output_fname = '%s_pH%.2f_I%.2f.tsv' % (\n fname_base, aq_params.pH, aq_params.ionic_strength)\n logging.info(output_fname)\n\n pp = ParsedPathway.from_csv_file(\n f, bounds=bounds, aq_params=aq_params)\n except PathwayParseError as ppe:\n logging.error(ppe)\n return HttpResponseBadRequest(ppe)\n\n response = HttpResponse(content_type='text/tab-separated-values')\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % \\\n output_fname\n response.write(pp.to_full_sbtab())\n\n return response", "def rnase_p_model_info(filename, db_url, output):\n r2dt.write_rfam(filename, db_url, output)", "def load_pathway(path_idx=1, preprocess=True):\n\n assert path_idx in [1, 2], 'Unavailable index, must be 1 or 2.'\n url = f'https://raw.githubusercontent.com/PengTao-HUST/GDNB/master/data/pathway{path_idx}.txt'\n cache_dir = sys.modules['gdnb'].__path__[0] + '/data/'\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n data_file = os.path.basename(url)\n full_path = cache_dir + data_file\n\n if not os.path.exists(full_path):\n urlretrieve(url, cache_dir + data_file)\n\n if preprocess:\n traj = np.loadtxt(full_path)\n traj = np.apply_along_axis(normalize_by_mean, 0, traj[:, 1:])\n disdat = traj.reshape(10, 50, -1).transpose((0, 2, 1))\n return disdat\n else:\n return full_path", "def mainPSM(myPath, result_file):\n def maxQuant(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[0].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n def proteomeDiscoverer(my_file):\n\n peptideList = list()\n table = str.maketrans('', '', string.ascii_lowercase)\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[4].split(\".\")[1].rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptide = peptide.translate(table)\n peptideList.append(peptide)\n\n return peptideList\n\n def galaxyP(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[2].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n def MPA(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[2].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n # Open a file\n sample_db = os.listdir(myPath)\n # dictionary for a db1-5\n completeResultsDict = dict() # key = se; value = dict(key = dataset, value = peptidelist)\n\n # This would print all the files and directories\n for se in sample_db:\n if se not in completeResultsDict.keys():\n # sub-dictionary for a certain search pipeline\n searchEngineDict = dict() # key = dataset, value = peptidelist)\n completeResultsDict[se] = searchEngineDict\n\n for result in os.listdir(myPath + \"/\" + se):\n peptideList = list()\n if se == \"MQ\":\n peptideList = maxQuant(myPath + \"/\" + se + \"/\" + result)\n elif se == \"PD\":\n peptideList = proteomeDiscoverer(myPath + \"/\" + se + \"/\" + result)\n elif se == \"GP\":\n if result.endswith(\".tabular\"):\n peptideList = galaxyP(myPath + \"/\" + se + \"/\" + result)\n elif se == \"MPA\":\n peptideList = MPA(myPath + \"/\" + se + \"/\" + result)\n else:\n print(\"Are you sure?\")\n\n # updating the completeResultsDict\n if peptideList:\n myDict = completeResultsDict.get(se)\n myDict[result.split(\".\", maxsplit=1)[0]] = peptideList\n\n # nested for-loop: {search engine: {dataset : peptidelist}}\n nonRedundantPeptideSet = set()\n count = 0\n for se, result in completeResultsDict.items():\n for dataset, peptides in result.items():\n for peptide in peptides:\n nonRedundantPeptideSet.add(peptide)\n count += 1\n nonRedundantPeptideList = sorted(list(nonRedundantPeptideSet))\n\n peptideMatrix = dict()\n peptideMatrix[\"PeptideSeq\"] = nonRedundantPeptideList\n headerList = list()\n headerList.append(\"se_dataset\")\n for se, result in completeResultsDict.items():\n print(se)\n for dataset, peptides in result.items():\n print(dataset)\n headerList.append(\"{}_{}\".format(se, dataset))\n peptideList = []\n for peptide in nonRedundantPeptideList:\n if peptide in peptides:\n peptideList.append(1)\n else:\n peptideList.append(0)\n peptideMatrix[\"{}_{}\".format(se, dataset)] = peptideList\n\n\n df = pandas.DataFrame(data=peptideMatrix)\n df.to_csv(open(result_file, \"w\", newline=''), index=False)", "def writeProteins( self ):\n\n self.logger.info( 'writeProteins: START' )\n\n proteinsDestination = self.openInsertFile( 'proteinsInsert.psql' )\n accessionsDestination = self.openInsertFile( 'accessionsInsert.psql' )\n\n proteins = {}\n\n totalOfSequences = self.reader.getTotalOfSequences()\n\n self.logger.info( 'writeProteins: total of sequences: ' + str(totalOfSequences) + '.' )\n\n files = self.reader.getPepFiles()\n\n self.logger.info( 'writeProteins: total of sequence files: ' + str(len(files)) + '.' )\n\n # For log purposes only!\n counter = 0\n\n for pepFile in files:\n f = self.reader.openPepFile( pepFile )\n\n positions = self.reader.getPepEntriesPositions()\n\n # Just for the log system.\n fileName = self.afs.getFileName( pepFile ) \n self.logger.info( 'writeProteins: writing file: ' + str(fileName) + '.' )\n self.logger.info( 'writeProteins: file: ' + str(fileName) + ' have : ' + str(len(positions)) + ' entries.' )\n # END of log stuff.\n\n for position in positions:\n\n # Only log how long it's taking to run.\n # By thousands.\n counter += 1\n if ( counter % 100000 ) == 0:\n self.logger.info( 'writeProtein: step: ' + str(counter) + '.')\n # END log step.\n\n\n entry = self.reader.getPepParsedEntry( position )\n\n # Sometimes there's 'pep' files without related organism. It happens in KEGG database.\n # We skip completely sequences without related organism.\n if not entry.organism.code in self.importerOrganism.organismsInserted:\n self.logger.info( 'writeProteins: ORGANISM NOT FOUND: ' + entry.organism.code )\n\n # Skip the 'pep' file completely.\n break\n\n else:\n organismId = self.importerOrganism.organismsInserted[ entry.organism.code ]\n\n self.logger.info( 'writeProteins: writing entry : ' + str(entry.identification) + '.' )\n\n #self.writeProteinsFile( proteinsDestination, entry.identification, entry.fullFastaHeader, entry.description, organismId, entry.sequence )\n proteinInserted = self.writeFile( proteinsDestination, 'proteins', [ str(entry.identification), str(entry.fullFastaHeader), str(entry.description), str(organismId), str(entry.sequence) ] )\n self.proteinsInserted[ entry.identification ] = proteinInserted\n\n accessionInserted = self.writeFile( accessionsDestination, 'accessions', [ str(entry.identification) ] )\n self.accessionsInserted[ entry.identification ] = accessionInserted \n #self.writeAccessionsFile( accessionsDestination, entry.identification )\n\n\n self.logger.info( 'writeProteins: DONE' )", "def rnase_p_model_info(filename, output):\n r2dt.write_rnase_p(filename, output)", "def path(filename, path):\n\n # If the line is not empty:\n if len(path) > 0:\n # Open the file for appending\n with open(filename, \"a\") as file:\n # Define format string\n write = \"{:.2f},{:.2f},{:d},{:d},{:d},\\n\"\n\n # Find the first point\n first = path[0]\n # Write the first point with \"no extruding\" option\n file.write(write.format(float(first[1][0]), float(first[1][1]), 0, 0, 0))\n\n # For each line in the path\n for i, line in enumerate(path):\n # If line isn't a repeated point\n if True or (line[1][0] != line[2][0]) and (line[1][1] != line[2][1]):\n\n # If the line is somewhere in the middle of the list write it with \"extruding\" option\n if i < len(path) - 1:\n file.write(write.format(float(line[2][0]), float(line[2][1]), 1, 0, 0))\n\n # If the line is the last of the path, write it with \"extruding\" and \"end of island\" options\n else:\n file.write(write.format(float(line[2][0]), float(line[2][1]), 1, 1, 0))", "def writeProteinRelations( self ):\n\n self.logger.info( 'writeProteinRelations: START' )\n\n self.logger.info( 'writeProteinRelations: keggreader.getAllProteinMaps() : START' )\n\n # Get all protein maps relations.\n # Notice that proteins without any map wont exist in the result below. That's important to save memory (no other reason at all).\n proteinMaps = self.reader.getAllProteinMaps()\n\n self.logger.info( 'writeProteinRelations: keggreader.getAllProteinMaps() : DONE' )\n\n\n self.logger.info( 'writeProteinRelations: proteinEcFile is: proteinEcsInsert.psql' )\n\n # Open protein_ecs insert file.\n proteinEcFile = self.openInsertFile( 'proteinEcsInsert.psql' )\n\n\n self.logger.info( 'writeProteinRelations: proteinMapFile is: proteinMapsInsert.psql' )\n\n # Open protein_maps insert file.\n proteinMapFile = self.openInsertFile( 'proteinMapsInsert.psql' )\n\n\n self.logger.info( 'writeProteinRelations: iterating through all the proteins: START' )\n\n # Keep a counter to know how long it's taking.\n counter = 0\n\n # Now we have to write protein_ecs table.\n # That means get the proteins ids and its related ecs ids.\n # Those ids comes from dictionary variables generated by the 'write' methods for each table.\n # So, we run through proteins ids and get ec from KeggReader 'getEcNumberByGene' method and make the correct relation.\n for protein,relationalDatabaseId in self.proteinsInserted.iteritems():\n\n # Only log how long it's taking to run.\n # By thousands.\n counter += 1\n if ( counter % 100000 ) == 0:\n self.logger.info( 'writeProteinRelations: step: ' + str(counter) + '.')\n # END log step.\n\n self.logger.info( 'writeProteinRelations: keggreader.getEcNumbersByGene(): START' )\n\n # We get all EC numbers related to the specific protein.\n ecs = self.reader.getEcNumberByGene( protein ) \n\n self.logger.info( 'writeProteinRelations: keggreader.getEcNumbersByGene(): DONE' )\n\n # If there's EC number (almost of proteins doesn't has a related EC number - which means they're no enzymes).\n if ecs:\n\n self.logger.info( 'writeProteinRelations: FOUND EC Numbers for the protein: ' + str(protein) + '.' )\n self.logger.info( 'writeProteinRelations: ' + str(protein) + ' : Total of EC Numbers FOUND: ' + str(len(ecs)) + '.' )\n\n # Iterate through the ECs found for that specific protein.\n for ec in ecs:\n # Get the relational database EC id for that EC number being iterated \n ecId = self.importerEc.ecsInserted[ str(ec) ] \n proteinId = relationalDatabaseId\n\n # Actual write protein_ecs file.\n #self.writeProteinEcsFile( proteinEcFile, proteinId, ecId )\n self.writeFile( proteinEcFile, 'protein_ecs', [ str(proteinId), str(ecId) ] )\n else:\n self.logger.info( 'writeProteinRelations: NOT FOUND EC Numbers for the protein: ' + str(protein) + '.' )\n\n\n # Maps to specific protein.\n if protein in proteinMaps:\n maps = proteinMaps[ protein ]\n\n if maps:\n self.logger.info( 'writeProteinRelations: FOUND MAP Numbers for the protein: ' + str(protein) + '.' )\n self.logger.info( 'writeProteinRelations: ' + str(protein) + ' : Total of MAP Numbers FOUND: ' + str(len(maps)) + '.' )\n\n for proteinMap in maps:\n\n # Some maps aren't metabolic pathways but simple pathways for other molecular mechanisms.\n # And we're interested only in metabolic maps at this moment.\n if proteinMap in self.importerPathway.pathwayMapsInserted:\n mapId = self.importerPathway.pathwayMapsInserted[ proteinMap ]\n proteinId = relationalDatabaseId\n\n #self.writeProteinMapsFile( proteinMapFile, proteinId, mapId )\n self.writeFile( proteinMapFile, 'protein_maps', [ str(proteinId), str(mapId) ] )\n else:\n self.logger.info( 'writeProteinRelations: NOT FOUND MAP Numbers for the protein: ' + str(protein) + '.' )\n\n\n self.logger.info( 'writeProteinRelations: iterating through all the proteins: DONE' )\n self.logger.info( 'writeProteinRelations: DONE' )", "def create_all(graph,first_last_fn):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n #paths = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n #paths[trip_id] = p\n while p.next_line != len(graph.lines):#file_length:\n graph.trip_id2line_num[trip_id] = line_num\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n #trip_id = dg.normalize(lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n # paths[trip_id] = p\n graph.trip_id2line_num[trip_id] = line_num\n graph.num_trips = num_trips\n\n\n with open(first_last_fn,'wb') as output:\n pickle.dump(graph.first_last2trip_ids,output)\n\n with open('pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)\n #return paths", "def _convert_and_save_to_FILE_pdbformat(self, cur, FILE, supress_model_separation=False):\n\n\n rows = cur.fetchall()\n if not supress_model_separation:\n pdb_previous = rows[0]['pdbID']; struc_previous = rows[0]['strucID']\n FILE.write(\"MODEL\\n\")\n for row in rows:\n pdb=row['pdbID']; struc=row['strucID']\n if pdb!=pdb_previous or struc!=struc_previous:\n FILE.write(\"ENDMDL\\n\")\n FILE.write(\"MODEL\\n\")\n line = self._morph_db_row_to_pdb_line(row)\n FILE.write(line+\"\\n\")\n pdb_previous = row['pdbID']; struc_previous = row['strucID']\n FILE.write('ENDMDL')\n else:\n FILE.write(\"MODEL\\n\")\n for row in rows:\n line = self._morph_db_row_to_pdb_line(row)\n FILE.write(line+\"\\n\")\n FILE.write('ENDMDL')\n FILE.close()", "def make_pdb(self, pdb_path, out_path, chain_letters, overwrite=False):\r\n chain_letters = [chain.upper() for chain in chain_letters]\r\n pdb_fn = os.path.split(pdb_path)[1]\r\n \r\n print \"OUT PATH:\",out_path\r\n\r\n # Skip PDB generation if the file already exists\r\n plural = \"s\" if (len(chain_letters) > 1) else \"\" # for printing\r\n if (not overwrite) and (os.path.isfile(out_path)):\r\n print(\"Chain%s %s of '%s' already extracted to '%s'.\" %\r\n (plural, \", \".join(chain_letters), pdb_fn, out_path))\r\n return out_path\r\n print(\"Extracting chain%s %s from %s...\" % (plural, \", \".join(chain_letters), pdb_fn))\r\n\r\n # Get structure, write new file with only given chains\r\n struct = self.parser.get_structure('protein', pdb_path)\r\n self.writer.set_structure(struct)\r\n self.writer.save(out_path, select=SelectChains(chain_letters))\r\n\r\n return out_path", "def generatePhasingScore(options,phase,cycle):\n score,readcount,readseq=readDataForPhasingScoreComputation(options,phase)\n phased_loci_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".positive_phase_loci\"\n final_phase_loci=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".phasing_score_phase_loci\"\n fhr=open(phased_loci_filename,\"r\")\n out4=open(final_phase_loci,\"w\")\n for line in fhr:\n chromosome,ss,ee=line.strip().split()\n ss=int(ss)\n ee=int(ee)\n #correct=list(range(ss,ee+1,phase))\n phasing_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".phasing_score\"\n abundance_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".abundance\"\n out=open(phasing_score_filename,\"w\")\n out2=open(abundance_score_filename,\"w\")\n score_count={}\n for site in range(ss,ee+1):\n start=site-(phase*4)\n end=site+(phase*5)-1\n max_within_site,max_within_count,all_scores=0,0,0\n for cor in range(start,end+1):\n if cor not in score[chromosome]:continue\n all_scores+=score[chromosome][cor]\n for i in readcount[chromosome][cor]:\n if max_within_count<readcount[chromosome][cor][i]:\n max_within_site=cor\n max_within_count=readcount[chromosome][cor][i]\n all_scores-=max_within_count\n P,k=0,0\n s=start\n while s<end:\n if s not in score[chromosome]:\n s+=phase\n continue\n if score[chromosome][s]!=0:\n P+=score[chromosome][s]\n k+=1\n if s == max_within_site:\n P-=max_within_count \n s+=phase\n U=all_scores-P\n \n #if U<0: continue\n if k>=3:\n #print(P,U,k)\n phas_score=math.log((1+(10*(P/(1+U))))**(k-2))\n \"\"\"if phas_score>max and site in correct:\n max=phas_score\"\"\"\n else:\n phas_score=0\n out.write(str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.write(chromosome+\"\\t\"+str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n if chromosome not in score_count:\n score_count[chromosome]={}\n if site not in score_count[chromosome]:\n score_count[chromosome][site]=phas_score\n if site in readcount[chromosome] and '+' in readcount[chromosome][site] and readcount[chromosome][site]['+']!=0:\n out2.write(str(site)+\"\\t\"+str(readcount[chromosome][site]['+'])+\"\\n\")\n if site in readcount[chromosome] and '-' in readcount[chromosome][site] and readcount[chromosome][site]['-']!=0:\n out2.write(str(site)+\"\\t-\"+str(readcount[chromosome][site]['-'])+\"\\n\")\n out.close()\n out2.close()\n \n #out4.write(chromosome+\"\\t\"+str(ss)+\"\\t\"+str(ee)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.close()", "def output_phased_data(phasing, sample_names, snp_names, options):\n things_to_output=[]\n things_to_output.append( (\"la\", \"local_ancestry\", parents_to_string))\n if options.get(\"best_parents\", None): things_to_output.append( (\"bp\", \"best_parents\", parents_to_string) )\n \n # Output phased data\n for suffix, tag, format_func in things_to_output:\n\n if(options.get(\"gzip\", None)):\n file_name = options[\"out\"]+\".\"+suffix+\".txt.gz\"\n out_file = gzip.open(file_name, \"w\")\n else:\n file_name = options[\"out\"]+\".\"+suffix+\".txt\"\n out_file = open(file_name, \"w\")\n \n #out_file.write( \"\\t\".join([\"POS\"]+sample_names) + \"\\n\" )\n for i in range(len(phasing[sample_names[0]][tag])):\n #out_file.write( \"\\t\".join([snp_names[i]]+[format_func(phasing[s][tag][i]) for s in sample_names] ) + \"\\n\")\n out_file.write( \" \".join([format_func(phasing[s][tag][i]) for s in sample_names] ) + \"\\n\")\n\n out_file.close()", "def path2trajectory( self, fname=\"relaxed_path.traj\" ):\n traj = TrajectoryWriter(fname,'w')\n for energy,state in zip(self.init_path[\"energy\"], self.init_path[\"symbols\"]):\n self.nuc_mc.network.reset()\n self.nuc_mc.set_state(state)\n self.nuc_mc.network(None)\n atoms = self.nuc_mc.network.get_atoms_with_largest_cluster( prohibited_symbols=[\"Al\",\"Mg\"] )\n if atoms is None:\n atoms = self.nuc_mc.atoms\n calc = SinglePointCalculator(atoms, energy=energy)\n traj.write(atoms)\n self.log( \"Trajectory written to {}\".format(fname))", "def get_msigdb_pathways(species, remap=None):\n LOGGER.info(\"Fetching MSigDB pathways\")\n\n def _get_requests():\n for file in MSIGDB_FILES:\n url = MSIGDB_URL + file\n\n LOGGER.info(\"Fetching {}\".format(url))\n\n response = requests.get(url, stream=True)\n response.raise_for_status()\n\n yield response\n\n def _get_data(line):\n line = line.decode(\"utf-8\")\n name, _, genes = line.split(\"\\t\", 2)\n # name, _, _, spec = name.split(\"%\")\n # assert species == spec\n return name, set(i for i in genes.split(\"\\t\"))\n\n pathways_df = pd.DataFrame(\n data=[\n _get_data(line)\n for response in _get_requests()\n for line in response.iter_lines()\n ],\n columns=[\"name\", \"set\"],\n )\n\n if remap and species not in [\"Homo sapiens\"]:\n to_name = \"{}{}\".format(\n species.split(\" \")[0][0],\n species.split(\" \")[1],\n ).lower()\n\n LOGGER.info(\"Remapping MSigDB to {} ({})\".format(species, to_name))\n\n mapper = EnsemblMapper(\n from_type='entrez',\n to_type='entrez',\n from_organism='hsapiens',\n to_organism=to_name,\n )\n pathways_df[\"set\"] = pathways_df[\"set\"].apply(\n lambda row: set(mapper.map_ids(row))\n )\n\n return pathways_df", "def gtrnadb_model_info(filename, output):\n r2dt.write_gtrnadb(filename, output)", "def write_coord_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n lis = []\n with open(filepath, 'r') as file:\n for line in file:\n if line[:4] == 'ATOM':\n line_split = line.split()\n lis.append(line_split[3:4])\n choice1 = input('Enter name for the output file: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as myfile:\n for i in lis:\n myfile.writelines(i)\n print('Done!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def make_pdb(self, pdb_path, out_path, residue_indices, overwrite=False):\r\n residue_indices = np.array(residue_indices)\r\n pdb_fn = os.path.split(pdb_path)[1]\r\n \r\n print \"OUT PATH:\",out_path\r\n\r\n # Skip PDB generation if the file already exists\r\n plural = \"s\" if (len(residue_indices) > 1) else \"\" # for printing\r\n if (not overwrite) and (os.path.isfile(out_path)):\r\n print(\"Residue%s %s of '%s' already extracted to '%s'.\" %\r\n (plural, \", \".join(residue_indices), pdb_fn, out_path))\r\n return out_path\r\n print(\"Extracting %i residue%s \\n%s from %s...\" % (len(residue_indices), plural, \", \".join(residue_indices.astype(str)), pdb_fn))\r\n\r\n # Get structure, write new file with only given chains\r\n struct = self.parser.get_structure('protein', pdb_path)\r\n self.writer.set_structure(struct)\r\n self.writer.save(out_path, select=SelectResidues(residue_indices))\r\n\r\n return out_path", "def single_epoch(g,rows,cols,midpoint):\n\n num_top = 10 \n #3 for 8x8\n one_to_select = 0 \n top_nodes = g.top_n_nodes(num_top)\n '''\n for k in range(num_top):\n node_num = top_nodes[k]\n trip_list = g.node2trip_ids[node_num]\n print \"Next Midpoint: %d\" % k\n print node_num\n print g.node_to_coords(node_num)\n print \"Num trips: %d\" % len(trip_list)\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n #\"\"\"\n '''\n\n #trip_list = g.node2trip_ids[g.best_node]\n #midpoint = top_nodes[one_to_select]\n trip_list = g.node2trip_ids[midpoint]\n print \"Selected midpoint: %d\" % midpoint \n print g.node_to_coords(midpoint)\n out_file = open(\"datasets/full_data_%d_%d_%d.txt\" % (rows,cols,midpoint),'w')\n partial_file = open(\"datasets/partials_%d_%d_%d.txt\" % (rows,cols,midpoint), 'w')\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num=line_num,midpoint=midpoint)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n \"\"\"\n out_string = str(p.edges)[1:-1]\n out_file.write(\"%s\\n\" % out_string)\n for i in range(p.graph.num_edges):\n if i in p.partials.keys():\n partial_file.write(\"%d\" % p.partials[i])\n else:\n partial_file.write(\"-1\")\n if i < p.graph.num_edges-1:\n partial_file.write(\",\")\n partial_file.write(\"\\n\")\n\n out_file.close()", "def get_opt_waypts(filename):\n\t# get ground truth for task 2 only!!!\n\there = os.path.dirname(os.path.realpath(__file__))\n\tsubdir = \"/data/experimental/\"\n\tdatapath = here + subdir + filename\n\tfirstline = True\n\twaypts = None\n\twith open(datapath, 'r') as f:\n\t\tmethodData = [None]*8\n\t\ti = 0\n\t\tfor line in f:\n\t\t\t# skip first line in tracked that has totalT\n\t\t\tif firstline:\n\t\t\t\tfirstline = False\n\t\t\t\tcontinue\n\t\t\tvalues = line.split(',')\n\t\t\tfinal_values = [float(v) for v in values[1:len(values)]]\n\t\t\tmethodData[i] = final_values\n\t\t\ti += 1\n\t\tdata = np.array(methodData)\n\t\twaypts = data\n\treturn waypts[1:8].T", "def _update_database_file(self, Temp, path):\n from datetime import datetime\n\n if path:\n filename = path + '/APD_DABA_{:.1f}_.txt'.format(Temp)\n else:\n filename = 'APD_DABA_{:.1f}_.txt'.format(Temp)\n self.printer('\\n ...Writing database file: {}...\\n'.format(filename))\n filepointer = open(filename, 'w')\n\n filepointer.write('# Database file for the APD-Toolkit\\n# Generated: {}\\n'.format(datetime.now()))\n for mname, molecule in self.items():\n if len(mname) > 1:\n filepointer.write('N {}\\n'.format(mname))\n for atom in molecule.atoms:\n filepointer.write('E {}\\n'.format(atom.element))\n\n for invariom_name, orientation in atom.invarioms.items():\n filepointer.write('I {} '.format(invariom_name))\n filepointer.write('{:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}\\n'.format(\n *(orientation[0].tolist() + orientation[1].tolist())))\n filepointer.write('C {:.3f} {:.3f} {:.3f}\\n'.format(*atom.cart))\n try:\n filepointer.write('A {:.2e} {:.2e} {:.2e} {:.2e} {:.2e} {:.2e}\\n'.format(*atom.adp['cart_int']))\n except KeyError:\n filepointer.write('A {:.2e} {:.2e} {:.2e} {:.2e} {:.2e} {:.2e}\\n'.format(0, 0, 0, 0, 0, 0))\n filepointer.close()", "def ProteinRead(pdb_file, Include_dAA = True, IncludeWATER = False):\n # structure from input file or fetched if not present\n if(pdb_file[-4:] == '.pdb' or pdb_file[-3:] == '.gz'):\n ppdb = PandasPdb().read_pdb(pdb_file)\n else:\n ppdb = PandasPdb().fetch_pdb(pdb_file)\n \n # lists for standard and d-AA used to save structure to dataset \n standardAA = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n d_AA = ['DAL','DAR','DSG','DAS','DCY','DGN','DGL','GLY','DHI','DIL','DLE','DLY','MED','DPN','DPR','DSN','DTH','DTR','DTY','DVA']#scan takes into account only standard amino acids\n\n for aa in standardAA: #ATOM entries, excluding water molecules \n if(aa==standardAA[0]):\n ppdb_ATOM = ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa] \n else:\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa]], ignore_index=True) \n\n if(Include_dAA):\n for i in range(0,len(d_AA)): \n if(d_AA[i]!='GLY'):\n ppdb_d_AA = pd.concat([ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == d_AA[i]],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == d_AA[i]]], ignore_index=True)\n pd.options.mode.chained_assignment = None \n ppdb_d_AA['residue_name'].iloc[:] = standardAA[i] #dAA considered as standard one for scan \n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_d_AA], ignore_index=True) \n\n ppdb_PROTEIN = ppdb_ATOM #protein atoms saved here \n ppdb_WATER = pd.concat([ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'HOH'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'HOH'],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'WAT'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'WAT']], ignore_index=True) #oxygen atoms of water molecules\n #can be both HETATM (standard pdb file) or ATOM (vmd output)\n if(len(ppdb_WATER)>0 and IncludeWATER):\n pd.options.mode.chained_assignment = None \n ppdb_WATER['residue_name'].iloc[:] = 'HOH'\n ppdb_WATER['chain_id'].iloc[:] = 'water'\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_WATER], ignore_index=True)\n\n Chains = []\n for i in range(0,len(ppdb_ATOM)):\n if(ppdb_ATOM['chain_id'].iloc[i] in Chains):\n continue\n else:\n Chains.append(ppdb_ATOM['chain_id'].iloc[i]) \n return ppdb_ATOM, Chains", "def update_trip_path(trip_mpois, paths, graph):\n n_nodes = len(trip_mpois)\n # adjacency matrix\n new_paths = np.zeros(shape=(n_nodes, n_nodes))\n\n # iterate through all the nodes and create a list of nodes with sequential id\n for i, node1 in enumerate(trip_mpois):\n for j, node2 in enumerate(trip_mpois):\n new_paths[i, j] = paths[node1, node2]\n\n # new_paths = new_paths/np.max(new_paths[new_paths < _INF])\n # new_paths[np.isinf(new_paths)] = _INF\n\n # create a dummy edge between end and start node with weight 0\n new_paths[1,0] = -_INF\n # new_paths[0,1] = _INF\n\n shortest_path = None\n if n_nodes > 5:\n shortest_path, dist = tsp.solve(n_nodes, new_paths)\n # shortest_path = range(n_nodes)\n else:\n shortest_path = range(n_nodes)\n\n trip_path = np.array(trip_mpois)[shortest_path]\n\n if ___DEBUG:\n fname = 'dump/' + str(n_nodes) + '.dist'\n np.savetxt(fname, new_paths, fmt='%.6f')\n \n mpoi_pos = np.zeros(shape=(n_nodes,2))\n \n for i, node in enumerate(trip_mpois):\n pos_3d = graph.vs[node]['position']\n assert node == graph.vs[node].index\n mpoi_pos[i,:] = pos_3d[:2]\n\n fname = 'dump/' + str(n_nodes) + '.pos'\n np.savetxt(fname, mpoi_pos)\n \n # print trip_mpois, trip_path\n\n return trip_path", "def kegg_pathway_enrichment(degs, negs, dbpaths=dbpaths, show_all=True, pthresh=0.01):\n\n deg_num_ko, deg_keggs = cbir_to_kegg(degs)\n neg_num_ko, neg_keggs = cbir_to_kegg(negs)\n\n print \"%-4d kegg pathways from %d DEGs\" % (len(deg_keggs), len(degs) )\n print \"%-4d kegg pathways from %d nonDEGs\" % (len(neg_keggs), len(negs) )\n\n # create dictionary of kegg pathways {pathwaytype:{pathway:[ko1,ko2,ko3]}}\n pathwaytype_dict = {}\n pathway_dict = {}\n pathway_lookup = {}\n\n print \"extracting pathways...\"\n ko1_h = open(dbpaths['kegg'], 'rb')\n for line in ko1_h:\n if line[0] == 'B': # Kegg path type eg: B <b>Replication and repair</b>\n pathtype_f = re.search('B.*<b>(.*)<', line)\n if pathtype_f is not None:\n pathtype = pathtype_f.group(1)\n else:\n pathtype = 'unknown'\n pathwaytype_dict[pathtype] = {}\n elif line[0] == 'C': # Kegg Pathway eg: 01200 Carbon metabolism [PATH:ko01200]\n pathway_f = re.search(\"C +([0-9]*) *(.*)\\[PATH\", line)\n if pathway_f is not None:\n pathway_id = pathway_f.group(1)\n pathway_name = pathway_f.group(2)\n else:\n pathway_id = 'unknown'\n pathway_name = 'unknown'\n pathway_dict[pathway_id] = {}\n pathway_lookup[pathway_id] = pathway_name\n elif line[0] == 'D': # Kegg term eg: K00844 HK; hexokinase [EC:2.7.1.1]\n koterm_f = re.search(\"(K[0-9]*)\", line)\n if koterm_f is not None:\n koterm = koterm_f.group(1)\n else:\n koterm = 'unknown'\n pathwaytype_dict[pathtype][koterm] = 1\n pathway_dict[pathway_id][koterm] = 1\n\n\n print \"calculating enrichment...\"\n pathwaytype_ps = {}\n pathway_ps = {}\n # count number of degs and negs in each pathway:\n for pathwaytype in pathwaytype_dict:\n pwtsize = len(pathwaytype_dict)\n degs_in_path = sum([1 for ko in pathwaytype_dict[pathwaytype] if ko in deg_keggs])\n negs_in_path = sum([1 for ko in pathwaytype_dict[pathwaytype] if ko in neg_keggs])\n degs_not_in = len(deg_keggs) - degs_in_path\n negs_not_in = len(neg_keggs) - negs_in_path\n\n oddrat, pval = fisher_exact([ [degs_in_path, degs_not_in],\n [negs_in_path, negs_not_in] ],\n alternative='greater')\n pathwaytype_ps[pathwaytype] = pval\n\n if pval < pthresh:\n print \"%s\\n \\\n In Path Not in Path\\n\\\n DEG : %-7d %d\\n\\\n non-DEG: %-7d %d\\n\\\n Odds Ratio:%.3f\\n\\\n P-value:%.4f\\n\" % (pathwaytype,degs_in_path,degs_not_in,negs_in_path,negs_not_in,\n oddrat, pval)\n\n\n for pathway in pathway_dict:\n pwtsize = len(pathway_dict)\n degs_in_path = sum([1 for ko in pathway_dict[pathway] if ko in deg_keggs])\n negs_in_path = sum([1 for ko in pathway_dict[pathway] if ko in neg_keggs])\n degs_not_in = len(deg_keggs) - degs_in_path\n negs_not_in = len(neg_keggs) - negs_in_path\n\n oddrat, pval = fisher_exact([ [degs_in_path, degs_not_in],\n [negs_in_path, negs_not_in] ],\n alternative='greater')\n pathway_ps[pathway + ' ' + pathway_lookup[pathway]] = pval\n\n ## Fisher's Exact Test:\n # In Pathway: Not in Pathway:\n # DEG : degs_in_path degs_not_in\n # non-DEG : negs_in_path negs_not_in\n #\n\n return pathwaytype_ps, pathway_ps" ]
[ "0.5886894", "0.5863955", "0.5786592", "0.5592899", "0.558136", "0.5495973", "0.5433662", "0.5431351", "0.5404583", "0.53899485", "0.53796154", "0.53388137", "0.5241749", "0.52406466", "0.52103275", "0.5181477", "0.5170354", "0.51633763", "0.5160058", "0.513753", "0.5134894", "0.51310784", "0.5117002", "0.5113737", "0.508565", "0.50651896", "0.50619346", "0.50533986", "0.50268906", "0.5023865" ]
0.78855044
0
Compute the hash of a parsed JSON value using the given hash object. This function does not hash the JSON value, it hashes the object tree that is the result of parsing a string in JSON format. Hashables (JSON objects) are hashed entry by entry in order of the lexicographical ordering on the keys. Iterables are hashed in their inherent order. If value or any of its children is an iterable with nondeterministic ordering of its elements, e.g. a set, this method will yield nondeterministic results.
def hash_json( hash_obj, value ): try: items = iter(list(value.items( ))) except AttributeError: # Must check for string before testing iterability since strings are iterable if isinstance( value, str ): _hash_string( hash_obj, value ) else: try: iterator = iter( value ) except TypeError: # We must check for bool first since it is subclass of int (wrongly, IMHO) if isinstance( value, bool ): _hash_bool( hash_obj, value ) elif isinstance( value, (int, float) ): _hash_number( hash_obj, value ) else: raise ValueError( 'Type "%s" is not supported.' % type( value ).__name__ ) else: _hash_iterable( hash_obj, iterator ) else: _hash_hashable( hash_obj, items )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_hash_json(self):\n # pre-sorted str object\n self.assertEqual('5348ed1f4cd2f73e576bb66b866f2800', \\\n comparator.hash_json('{\"a_1\": [{\"a_2\": 2, \"f_2\": 3, \"g_2\": 1}], \"c_3\": 1}'))\n # pre-sorted dict object\n self.assertEqual('5348ed1f4cd2f73e576bb66b866f2800', \\\n comparator.hash_json({\"a_1\": [{\"a_2\": 2, \"f_2\": 3, \"g_2\": 1}], \"c_3\": 1}))\n # unsorted dict object\n self.assertEqual('5348ed1f4cd2f73e576bb66b866f2800', \\\n comparator.hash_json({\"a_1\": [{\"f_2\": 3, \"g_2\": 1, \"a_2\": 2}], \"c_3\": 1}))", "def _hash(self, value, get_val, get_child):\n hasher = getattr(hashlib, self.hash_func)\n children = get_child(value)\n\n # If leaf node\n if len(children) < 1:\n return hasher(get_val(value)).hexdigest()\n\n h = hasher()\n for child in children:\n # Tree is created recursively\n n = Node(child, get_val, get_child,\n self.hash_func)\n self.c.append(n)\n h.update(n.h.encode(\"utf-8\"))\n return h.hexdigest()", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def get_hash(dictionary):\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "def _hash(value, trait):\n if isinstance(trait.trait_type, File):\n return hash_file(value, exists=trait.trait_type.exists)\n elif isinstance(trait.trait_type, List):\n if isinstance(value, (list, tuple)):\n return [_hash(v, trait.inner_traits[0]) for v in value]\n else:\n return _hash(value, trait.inner_traits[0])\n else:\n return value", "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def update_hash(hasher, obj):\r\n hasher.update(str(type(obj)))\r\n if isinstance(obj, (tuple, list)):\r\n for e in obj:\r\n update_hash(hasher, e)\r\n elif isinstance(obj, dict):\r\n for k in sorted(obj):\r\n update_hash(hasher, k)\r\n update_hash(hasher, obj[k])\r\n else:\r\n hasher.update(repr(obj))", "def dict_hash(obj, start=''):\n h = hashlib.sha1(to_bytes(start))\n h.update(to_bytes(obj.__class__.__name__))\n if isinstance(obj, dict):\n for key, value in sorted(obj.items()):\n h.update(to_bytes(key))\n h.update(to_bytes(dict_hash(value)))\n elif isinstance(obj, (list, tuple)):\n for el in obj:\n h.update(to_bytes(dict_hash(el)))\n else:\n # basic types\n if isinstance(obj, bool):\n value = str(int(obj))\n elif isinstance(obj, (six.integer_types, float)):\n value = str(obj)\n elif isinstance(obj, (six.text_type, bytes)):\n value = obj\n elif obj is None:\n value = b''\n else:\n raise ValueError(\"Unsupported value type: %s\" % obj.__class__)\n h.update(to_bytes(value))\n return h.hexdigest()", "def deep_hash(obj):\n pass", "def hash(self, hashed_states=None):\n if hashed_states is None:\n hashed_states = []\n hashed_states.append(self)\n\n result = '1' if self.final else '0'\n result += str(len(self.children))\n for symbol in self.children:\n child = self.children[symbol]\n if child in hashed_states:\n result += str(symbol) + child.hash_value\n else:\n result += str(symbol) + self.children[symbol].hash(hashed_states)\n\n self.hash_value = result\n return result", "def hasher(c):\n try:\n return hash(c)\n except TypeError:\n if isinstance(c, Mapping):\n subhash = []\n for k in sorted(c.keys()):\n subhash.append(hash(k))\n subhash.append(hasher(c[k]))\n return hash(tuple(subhash))\n elif isinstance(c, Iterable):\n return hash(tuple(hasher(item) for item in c))\n else:\n raise TypeError('cant figure out ' + repr(c))", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash_dict(_dict):\n return hashlib.sha256(json.dumps(_dict).encode('utf-8')).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def do_hash(dat: typing.Any) -> str:\n return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()", "def json_hash(obj: Any, encoder: type[json.JSONEncoder] | None = CoercingEncoder) -> str:\n json_str = json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True, cls=encoder)\n return hash_all([json_str])", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:", "def dict_hash(dictionary: Dict[str, Any]) -> str:\n d_hash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n d_hash.update(encoded)\n return d_hash.hexdigest()", "def hash(block):\n # The dictionary MUST be ordered, or we can have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def tree_hash(hashes):\n while len(hashes) > 1:\n hashes = [hashlib.sha256(\"\".join(h[i:i+1])).digest() for i in range(i,2)]\n return hashes[0]", "def dict_hash(dictionary) -> str:\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "def hexdigest(jsonable):\n string = json.dumps(jsonable, sort_keys=True).encode()\n return hashlib.sha1(string).hexdigest()", "def hash(block):\n\n # Dictionary must be ordered, else hashes will be inconsistent\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def structural_hash(obj: object) -> bytes:\n hasher = hashlib.blake2b()\n if isinstance(obj, (int, str, float, PurePath)):\n hasher.update(bytes(\"P\" + str(obj), \"utf-8\"))\n elif dataclasses.is_dataclass(obj):\n fields = dataclasses.fields(obj)\n hasher.update(bytes(f\"O{len(fields)}\\x20\", \"utf-8\"))\n for field in sorted(fields, key=lambda x: x.name):\n if not field.metadata.get(\"nohash\"):\n hasher.update(bytes(f\"F{len(field.name)}\\x20{field.name}\", \"utf-8\"))\n hasher.update(structural_hash(getattr(obj, field.name)))\n elif isinstance(obj, (collections.abc.Sequence, collections.abc.Set)):\n hasher.update(bytes(f\"L{len(obj)}\\x20\", \"utf-8\"))\n for member in obj:\n child_hash = structural_hash(member)\n hasher.update(bytes(f\"E{len(child_hash)}\\x20\", \"utf-8\"))\n hasher.update(child_hash)\n elif isinstance(obj, collections.abc.Mapping):\n hasher.update(bytes(f\"M{len(obj)}\\x20\", \"utf-8\"))\n for key, member in obj.items():\n child_hash = structural_hash(member)\n hasher.update(\n bytes(f\"E{len(key)}\\x20{key}\\x20{len(child_hash)}\\x20\", \"utf-8\")\n )\n hasher.update(child_hash)\n elif isinstance(obj, enum.Enum):\n hasher.update(bytes(str(obj), \"utf-8\"))\n elif obj is None:\n hasher.update(b\"N\")\n else:\n raise TypeError(\"Unhashable type\", obj)\n\n return hasher.digest()", "def make_hashable(value):\n if isinstance(value, dict):\n return tuple([\n (key, make_hashable(nested_value))\n for key, nested_value in sorted(value.items())\n ])\n # Try hash to avoid converting a hashable iterable (e.g. string, frozenset)\n # to a tuple.\n try:\n hash(value)\n except TypeError:\n if is_iterable(value):\n return tuple(map(make_hashable, value))\n # Non-hashable, non-iterable.\n raise\n return value" ]
[ "0.65041846", "0.6385206", "0.6255319", "0.62435746", "0.6187472", "0.6180679", "0.6165562", "0.6159233", "0.6080034", "0.5938248", "0.59185636", "0.5907186", "0.5907186", "0.5907186", "0.5907186", "0.5907186", "0.58739746", "0.58451295", "0.58451295", "0.58178645", "0.5810983", "0.5797206", "0.5788906", "0.5783955", "0.57684743", "0.5763555", "0.57598376", "0.575858", "0.57449573", "0.57237166" ]
0.79588073
0
Remove selected items from the tree. Because data is stored separately also need to deal with it, but deleting the matching items from the data list and updating all of the data indexes is a bit of a headache, so just make them empty.
def remove_treeItem(browser, tree): items = tree.selectedItems() for item in items: if item.listIndex: # Only dataset items have a listIndex browser.ui.workingDataTree.dataItems[item.listIndex] = [] sip.delete(item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_selected(self):\n if not self.tree_widget.selectedItems():\n self.configuration_widgets.logger.warning('Nothing has been selected. Please select an item and try again.')\n return\n _selected_items = self.tree_widget.selectedItems()\n root = self.tree_widget.invisibleRootItem()\n [(item.parent() or root).removeChild(item) for item in _selected_items]", "def unselectAll(self):\n\t\tself.tree.UnselectAll()", "def UnselectAll(self):\r\n\r\n rootItem = self.GetRootItem()\r\n\r\n # the tree might not have the root item at all\r\n if rootItem:\r\n self.UnselectAllChildren(rootItem)\r\n\r\n self.Unselect()", "def _remove_all(self):\n self._items.clear()\n self._listbox.delete(0, END)", "def remove_pos(self):\r\n selected_items = self.treeview.selection()\r\n for items in selected_items:\r\n values = self.treeview.item(items, 'values')\r\n if values[0] in self.holdings:\r\n del self.holdings[values[0]]\r\n self.treeview.delete(items)\r\n return None", "def clear_tree(self):\n self.treeview.delete(*self.treeview.get_children())", "def clean(self):\n filtered_items = {}\n for name, ls in self.items.items():\n filtered_ls = []\n for i in ls:\n if i.alive():\n filtered_ls.append(i)\n else:\n self.del_item(i)\n filtered_items[name] = filtered_ls\n self.items = filtered_items", "def deleteSelected(self):\n self.p.dat.flowsheet.deleteEdges(self.selectedEdges)\n self.selectedEdges = []\n self.p.dat.flowsheet.deleteNodes(self.selectedNodes)\n self.selectedNodes = []\n self.p.noneSelectedEmit()\n self.p.createScene()", "def clear(self) -> None:\n self.selected = {}\n self.orderings = []\n self.predicate = None\n self.limit_index = None\n self.offset_index = None\n self.callbacks.clear()", "def effect(self):\n self.load_node_info()\n nodes_to_remove = list()\n\n for id, node in self.svg.selected.items():\n selected_node_bbox = self.bbox(node)\n nodes_to_remove.append(node)\n\n # search the document tree for the selected node\n # when found, every subsequent node will be \"above\" it.\n # (i.e. svg documents draw from the background up, so a background\n # node will appear first, then nodes that are progressively\n # closer to the viewer will appear subsequently in the svg file)\n found_selected_node = False\n for node in self.document.getiterator():\n if not found_selected_node:\n if node == node:\n found_selected_node = True\n continue\n # Hereafter we are iterating over all nodes above the\n # selected node. We need to delete them if they appear to\n # be \"on top of\" the selection (i.e. within the bounding box\n # of the selection)\n try:\n node_bbox = self.bbox(node)\n except KeyError:\n continue\n if contains(selected_node_bbox, node_bbox):\n nodes_to_remove.append(node)\n\n # Now we remove the items we've previously found. Search and remove\n # need to be separate bulk steps because tree search is disrupted by\n # tree modification\n for condemned_node in set(nodes_to_remove):\n self.remove(condemned_node)", "def _clear(self):\n self._items = []\n self.key_listbox.delete(0, tk.END)\n self.value_listbox.delete(0, tk.END)", "def removeEmptyParents(self):\n\t\tremoveParents = []\n\t\tfor i in self.removeParents:\n\t\t\t# Remove pointer to item in this class if item is directory of some\n\t\t\t# kind of file type\n\t\t\tif i == self.lsmfiles:\n\t\t\t\tself.lsmfiles = None\n\t\t\telif i == self.leicafiles:\n\t\t\t\tself.leicafiles = None\n\t\t\telif i == self.bxdfiles:\n\t\t\t\tself.bxdfiles = None\n\t\t\telif i == self.oiffiles:\n\t\t\t\tself.oiffiles = None\n\t\t\telif i == self.bioradfiles:\n\t\t\t\tself.bioradfiles = None\n\t\t\telif i == self.interfilefiles:\n\t\t\t\tself.interfilefiles = None\n\t\t\telif i == self.liffiles:\n\t\t\t\tself.liffiles = None\n\t\t\telif i == self.mrcfiles:\n\t\t\t\tself.mrcfiles = None\n\t\t\telif i == self.ometiffiles:\n\t\t\t\tself.ometiffiles = None\n\n\t\t\tparent = self.tree.GetItemParent(i)\n\t\t\tself.tree.Delete(i)\n\t\t\tif parent and parent not in removeParents and self.tree.GetChildrenCount(parent) <= 0 and parent != self.tree.GetRootItem():\n\t\t\t\tremoveParents.append(parent)\n\t\tif removeParents:\n\t\t\tself.removeParents = removeParents\n\t\t\twx.CallAfter(self.removeEmptyParents)\n\t\telse:\n\t\t\tself.removeParents = []", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)", "def reset(self) -> None:\r\n self.tree.delete(*self.tree.get_children())", "def leftdelalllistitems(self):\n self._leftlist.delete()", "def clear(self):\n self._last_item = None\n self._connected_items = []\n\n for item in self._items:\n item.deleteLater()\n\n self._items = []\n self._row_index = 1", "def clear(self):\n for pathItem in self.pathItem_list:\n self.scene.removeItem(pathItem)", "def clear(self):\n self.cb_attr.clear()\n self.group_listbox.clear()\n self.data = None\n self.__groups = None\n self.graph.reset()\n self.infoLabel.setText(\"No data on input.\")", "def UnselectAllChildren(self, item):\r\n\r\n if item.IsSelected():\r\n item.SetHilight(False)\r\n self.RefreshLine(item)\r\n \r\n if item.HasChildren():\r\n for child in item.GetChildren():\r\n self.UnselectAllChildren(child)", "def OnClearSelected(self, event):\n\n for i in range(self.m_dataViewListCtrlCloud.GetItemCount()):\n if self.m_dataViewListCtrlCloud.GetToggleValue(i, 0):\n series = self.m_dataViewListCtrlCloud.GetValue(i, 1)\n self.controller.db.deleteSeriesData(series)\n self.m_dataViewListCtrlCloud.DeleteItem(i)\n print('Row removed: ', i)", "def _uncheck_all(self):\n for item in self.list_cb_data.values():\n data_ctrl, _, _, _, _, _, _, _ = item\n self.tree_ctrl.CheckItem(data_ctrl, False)\n self.enable_append()\n self.enable_freeze()\n self.enable_plot()\n self.enable_import()\n self.enable_remove()", "def clean():\n new_tree = None", "def removeObject(self):\n\t\tfor SelectedItem in self.objects_lw.selectedItems():\n\t\t\tself.objects_lw.takeItem(self.objects_lw.row(SelectedItem) )", "def clear(self):\n self._ll_tree.clear()", "def test_remove_all_values1(delete_tree):\n delete_tree.remove(\"ted\")\n delete_tree.remove(\"tea\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabaggers\")\n delete_tree.remove(\"teabagged\")", "def test_remove_all_values2(delete_tree):\n delete_tree.remove(\"ted\")\n delete_tree.remove(\"teabagged\")\n delete_tree.remove(\"tea\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"teabaggers\")", "def clear(self, emit_signal=True):\n if not self._selection:\n return\n\n for widget in self._selection:\n refresh_selected_nodes(widget)\n\n self._selection = []\n\n if emit_signal:\n self.selection_changed()", "def __onRemoveClicked(self):\n\t\tresults = self.deleteSelectedListWidgetItems(self.ui.listWidget, \"Remove Items?\", \"Are you sure that you want to remove the selected items?\")\n\t\t# force the iteration... removal from the list is our only goal.\n\t\tfor item in results:\n\t\t\tpass", "def clear_data(self):\n if DataLoader.data is None:\n return\n\n self.clear_tree()\n # Clears the Header\n self.treeview['columns'] = []\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n # Clears the Data\n\n DataLoader.data = None\n gc.collect()\n self.summary_label.destroy()\n\n # Replace with default values\n self.treeview['columns'] = list(DataLoader.default.columns)\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n for index, row in DataLoader.default.iterrows():\n self.treeview.insert(\"\", 0, text=self.default.shape[0] - 1 - index, values=list(row))\n self.treeview.column('#1', width=500)" ]
[ "0.72222024", "0.67945594", "0.6788877", "0.6749571", "0.66043264", "0.6599819", "0.65675104", "0.6546712", "0.6505644", "0.634928", "0.6347942", "0.6327042", "0.632324", "0.63223594", "0.62746567", "0.6272785", "0.6232611", "0.6196286", "0.61239", "0.6118763", "0.6097092", "0.60642225", "0.6055485", "0.6043105", "0.6001546", "0.59916574", "0.59836197", "0.5969646", "0.59419525", "0.59349173" ]
0.72647107
0
Clone h5 item. Useful for Drag & Drop
def clone_item(item): i = h5Item(item.text(0)) i.path = item.path i.listIndex = item.dataIndex i.originalIndex = item.originalIndex i.data = item.data return i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):", "def clone(self):\n raise NotImplementedError", "def copy(self, h5file=None):\n h5 = qpimage.core.copyh5(self.h5, h5file)\n return FLImage(h5file=h5, h5dtype=self.h5dtype)", "def copy(self):\n new_h5 = FileHDFio(file_name=self.file_name, h5_path=self.h5_path)\n new_h5._filter = self._filter\n return new_h5", "def copy(self):\n new_h5 = ProjectHDFio(\n project=self._project, file_name=self._file_name, h5_path=self._h5_path\n )\n new_h5._filter = self._filter\n return new_h5", "def clone(self):\r\n #return copy(self)\r\n cp = self.__class__(self.type, None, None, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp", "def clone(self, *args):\n return _osgAnimation.Bone_clone(self, *args)", "def clone(self):\r\n cp = self.__class__(self.type, self.data, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp", "def onClone(self):\n pass", "def bclone():\n node = nuke.selectedNodes()\n if len(node)==1:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(node[0].name()+\"\\nClone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(node[0].name()+\"\\nClone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n\n if len(node)==0:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(\"Clone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(\"Clone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n if len(node)!=0 and len(node)!=1:\n nuke.message('Just select one node to clone !')", "def _clone_node(self) -> 'Tag':\n clone = type(self)()\n for attr in self.attributes:\n clone.setAttribute(attr, self.getAttribute(attr))\n for c in self.classList:\n clone.addClass(c)\n clone.style.update(self.style)\n # TODO: should clone event listeners???\n return clone", "def copyItem(self):\n # extract all selected item\n itms = []\n for item in self.scene.selectedItems():\n if isinstance(item, DiagramItem):\n itms.append(item.data)\n\n # pickle data\n mime = QMimeData()\n mime.setData( self.__mime__ , QByteArray(pickle.dumps(itms)) )\n\n # copy to clipboard\n QApplication.clipboard().setMimeData(mime,QClipboard.Clipboard)\n self.pasteAction.setEnabled(True)", "def copy(self):\n # YOUR CODE HERE\n raise NotImplementedError()", "def copy(self):", "def clone(self):\n return self.__class__(self.name, *self)", "def Copy(self, item):\r\n\r\n self._id = item._id\r\n self._name = item._name\r\n self._title = item._title\r\n self._isGroup = item._isGroup\r\n self._breakColumn = item._breakColumn\r\n self._rect = item._rect\r\n self._font = item._font\r\n self._textColour = item._textColour\r\n self._bitmap = item._bitmap\r\n self._description = item._description\r\n self._rowPos = item._rowPos\r\n self._colPos = item._colPos\r\n self._window = item._window", "def clone(context, request):\n if request.has_permission('create'):\n return {\n 'name': 'clone',\n 'title': 'Clone',\n 'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),\n 'href': '{item_uri}#!clone'.format(item_uri=request.resource_path(context)),\n }", "def clone(self):\n new = super().clone()\n new._description = None\n return new", "def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned", "def clone(self):\n return None", "def GetClone(self, *args, **kwargs):\n pass", "def copy( self ):\n n = None\n if self.nodeType == 1:\n n = HtmlDomNode( self.nodeName, self.nodeType )\n n.children = self.children\n n.attributes = self.attributes\n elif self.nodeType == 3:\n n = HtmlDomNode()\n n.text = self.text\n return n", "def test_clone_scenario(self):\n pass", "def insert_copied(self, *args):\n copy = self.get_copied_food()\n if copy is not None:\n fsp = self.get_food_search_panel()\n fsp.reset_food_icon()\n if type(copy.food) == Food:\n food = FoodUsage(food=copy.food, amount=decimal.Decimal(100))\n if self.meal.foods is None:\n self.meal.foods = [food]\n else:\n self.meal.foods.append(food)\n\n self.add_food_node(food)\n else: # copy.food is Recipe\n recipe_exec = Recipe(name=copy.food.name, is_template=False, notes=\"\",\n serving_size=decimal.Decimal(1), template=copy.food)\n for ing in copy.food.ingredients:\n recipe_exec.add_food(ing.food, ing.amount)\n self.meal.add_recipe(recipe_exec)\n recipe_node = TreeViewRecipe(recipe=recipe_exec,\n meal_tree_box=self.meal_tree_box,\n parent_node=self)\n fsp.remove_copy()\n self.update_text(True)", "def clone(self, *args):\n return _osgAnimation.Skeleton_clone(self, *args)", "def clone(self) -> Mutator:\n raise NotImplementedError", "def copy (self):\n return self.__class__(self.name, self[:])", "def clone(self):\n return self", "def clone_rand(self):", "def copy( self ):\n New = grid(self.data, self.xllcorner, self.yllcorner, self.cellsize, 'copy-'+self.name, self.nodata)\n return New" ]
[ "0.6020959", "0.58370215", "0.5827161", "0.5780741", "0.57302743", "0.57141834", "0.568242", "0.5659609", "0.5652022", "0.56075025", "0.55323535", "0.5526989", "0.5505679", "0.5489802", "0.54832345", "0.5482787", "0.5463926", "0.53882384", "0.5372098", "0.5372057", "0.533904", "0.5332013", "0.52844596", "0.5269466", "0.5263963", "0.52488846", "0.5233581", "0.522172", "0.520511", "0.514406" ]
0.74541837
0
Helper function to convert SPARQL results into a Pandas data frame.
def get_sparql_dataframe(query, service = "https://query.wikidata.org/sparql"): sparql = SPARQLWrapper(service) sparql.setQuery(query) sparql.setReturnFormat(JSON) result = sparql.query() processed_results = json.load(result.response) cols = processed_results['head']['vars'] out = [] for row in processed_results['results']['bindings']: item = [] for c in cols: item.append(row.get(c, {}).get('value')) out.append(item) return pd.DataFrame(out, columns=cols)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sparql_dataframe(service, query):\n sparql = SPARQLWrapper(service)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def get_sparql_dataframe(self, query: str, text: str = \"\") -> pd.DataFrame:\n\n if self.verbose:\n print(tm.strftime(f\"[%H:%M:%S] Transmission {text} en cours...\"), end='')\n\n self.sparql.setQuery(query)\n\n processed_results: Wrapper.QueryResult = self.sparql.query()\n\n # We will check if the results are incomplete due to server limitations\n if 'x-sparql-maxrows' in processed_results.info():\n max_size: int = int(processed_results.info()['x-sparql-maxrows'])\n warnings.warn(f\"Warning: The server has limited the number of rows to {max_size}: result incomplete.\")\n\n if 'x-sql-state' in processed_results.info():\n warnings.warn(\"Warning: The server has limited the time of queries: partial result for a timed out query\")\n\n processed_results: dict = processed_results.convert()\n\n if self.verbose:\n print(tm.strftime(f\"\\r[%H:%M:%S] Transmission {text} réussi, conversion en Data Frame...\"), end='')\n\n cols: list[str] = processed_results['head']['vars']\n\n out: list[list[str]] = [[row.get(c, {}).get('value') for c in cols] for row in\n processed_results['results']['bindings']]\n\n if self.verbose:\n print(tm.strftime(f\" Effectué\"))\n\n return pd.DataFrame(out, columns=cols)", "def get_dataframe(q):\n cnx = create_engine(postgres_str)\n query = q\n return pd.read_sql_query(query, cnx)", "def query_to_df(query):\n df = pd.DataFrame(query.all())\n df.columns = [x['name'] for x in query.column_descriptions]\n return df", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def get_query_result_to_df(self, query):\r\n try:\r\n return pd.read_sql_query(query, self.conn)\r\n except pd.pandas.io.sql.DatabaseError:\r\n print('Execution failed. Database error')", "def create_dataframe(result):\n # List of elements in the search result\n names = []\n snippet = []\n url = []\n \n # Append search results to list\n for j,item in enumerate(result):\n for i,element in enumerate(result[j]['items']):\n names.append(result[j]['items'][i]['title'])\n snippet.append(result[j]['items'][i]['snippet'])\n url.append(result[j]['items'][i]['link'])\n \n # Create a dataframe\n df = pd.DataFrame(list(zip(names, snippet,url)), \n columns =['name', 'snippet','url']) \n \n return df", "def query2df(query):\n df = pd.DataFrame(data = list(itertools.product([0, 1], repeat=len(query.variables))), columns=query.variables)\n df['p'] = query.values.flatten()\n return df", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:\n data = client.read_sparql(query)\n df = None\n if \"results\" in data and \"bindings\" in data[\"results\"]:\n df = pd.DataFrame(data[\"results\"][\"bindings\"])\n df.applymap(lambda x: x[\"value\"])\n else:\n df = pd.DataFrame(data)\n\n return df", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def sql(q, database_url):\r\n output, cur_description = Q(q, database_url, out=True, description=True)\r\n # print(cur_description)\r\n cols = [i[0] for i in cur_description]\r\n return pd.DataFrame(output, columns=cols)", "def query_to_df(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n return psql.read_sql(sql, conn)", "def query(self, sql):\n df = pd.read_sql(sql, self.conn)\n return df", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def do_query(self) -> pd.DataFrame:\n if self.resultSize > self.step:\n query: str = self.query + f\" LIMIT {self.step}\"\n return pd.concat(\n [self.get_sparql_dataframe(query + f\" OFFSET {value}\", f\"{value} sur {self.resultSize}\") for value in\n range(0, self.resultSize, self.step)])\n return self.get_sparql_dataframe(self.query)", "def db_to_df(query):\n conn = loader.database._connection\n return sql.read_frame(query, conn)", "def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df", "def get_frame_from_query(the_query, colnames):\n df = DataFrame.from_records(list(the_query), columns=colnames)\n return df", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def qset_to_df(qset, datatype='object'):\n df = pd.DataFrame(list(qset.values()), dtype=datatype)\n return df", "def to_DataFrame(cls, qs):\n dates = [pd.to_datetime(x[0]) for x in qs.values_list('date')]\n data = qs.values('open', 'close', 'high', 'low', 'volume')\n df = pd.DataFrame.from_records(data, index=dates)\n return df", "def query_into_pandas(self, query, fields=None, parameters=None, names=None):\n target_url = self.build_query(query, fields=fields, parameters=parameters)\n\n col_id = 'columns'\n col_names = None\n if names is None:\n # If the columns of the query are specified (used for 'tab' or 'txt' value of\n # parameters['format'] only), then we use the same for the DataFrame\n if col_id in parameters:\n col_names = parameters[col_id].split(',')\n else:\n col_names = names\n\n db = pd.read_csv(\n target_url,\n delimiter=\"\\t\",\n skiprows=1,\n header=None,\n names=col_names\n )\n return db", "def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def run_query(query):\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n df = pd.DataFrame.from_records(dbFetched)\n return df" ]
[ "0.7584528", "0.7129623", "0.7129623", "0.711426", "0.7094009", "0.7050637", "0.70471936", "0.6952182", "0.6945529", "0.6940668", "0.6868899", "0.6827423", "0.6801506", "0.6724016", "0.67137694", "0.6685681", "0.6672927", "0.6663773", "0.66266656", "0.6613739", "0.6602476", "0.6570724", "0.6532051", "0.6480355", "0.64583874", "0.6457778", "0.6451386", "0.6431007", "0.6378793", "0.63534445" ]
0.7614764
0
Build a set of resources who are available for a given time. It might make more sense to work based on a given restricted resource set.
def avail(self, time, resource_group): a = set() for r in self.resource_group.resources: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_resources_by_age(self, resources: [], resource_age_minutes: int):\n all_resources = []\n for resource in resources:\n if resource_age_minutes:\n start = self._to_utc_datetime(resource.updated_on)\n end = datetime.utcnow().replace(tzinfo=pytz.UTC)\n interval = (end - start).total_seconds() / 60\n if interval >= resource_age_minutes:\n all_resources.append(resource)\n else:\n all_resources.append(resource)\n return all_resources", "def _get_doctor_available_times(self, date, time_start, time_end, addresses):\n availability = []\n for adress in addresses:\n timesheet = self._compute_current_timesheet(\n date, time_start, time_end, adress)\n if not timesheet:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(time_start),\n \"end_time\": str(time_end)\n\n }\n )\n continue\n else:\n from_datetime = datetime.datetime.combine(date, time_start)\n to_datetime = datetime.datetime.combine(date, time_end)\n meetings = self._compute_concurrency(from_datetime.replace(\n hour=0, minute=0), to_datetime.replace(hour=23, minute=59), adress)\n start_time = datetime.time(\n hour=int(timesheet.hour_from), minute=int(modf(timesheet.hour_from)[0] * 60))\n end_time = datetime.time(\n hour=int(timesheet.hour_to), minute=int(modf(timesheet.hour_to)[0] * 60))\n current_time = start_time\n if not meetings:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {adress.name} from {start_time} till {end_time}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(time_start),\n \"end_time\": str(time_end)\n }\n )\n continue\n for index, meeting in enumerate(meetings):\n tz = timezone(self.env.user.tz)\n start_date_meeting = pytz.utc.localize(\n meeting.start_date).astimezone(tz)\n end_date_meeting = pytz.utc.localize(\n meeting.end_date).astimezone(tz)\n if start_date_meeting.time() > current_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {adress.name} from {current_time} till {start_date_meeting.time()}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(current_time),\n \"end_time\": str(start_date_meeting.time())\n }\n )\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name} from {current_time} till {end_date_meeting.time()}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(start_date_meeting.time()),\n \"end_time\": str(end_date_meeting.time())\n }\n )\n current_time = end_date_meeting.time()\n\n if start_date_meeting.time() == current_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name} from {current_time} till {end_date_meeting.time()}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(start_date_meeting.time()),\n \"end_time\": str(end_date_meeting.time())\n }\n )\n current_time = end_date_meeting.time()\n\n if current_time < end_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {current_time} from {end_time} till {end_date_meeting.time()}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(current_time),\n \"end_time\": str(end_time)\n }\n )\n return availability", "def get_resource_available_in_dt_range(candidate_resources, dt_range,\n new_resource_occupations):\n for resource in candidate_resources:\n\n # Only occupations of current resource\n res_new_occupations = [y[1] for y in filter(\n lambda x: x[0] == clean_resource(resource),\n new_resource_occupations)]\n\n # Check availability\n availability = resource.get('availability')\n if (availability and not is_datetime_range_available(dt_range,\n availability)):\n continue\n\n # Check occupations\n occupations = resource.get('occupations', []) + res_new_occupations\n overlappings = [overlaps(dt_range, o) for o in occupations]\n if any(overlappings):\n continue\n\n return resource\n\n return None", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def _create_resource_consumption_dict():\n\n returned_resource_dict = {}\n\n # things that are quantities should start at 0.0\n for resource in resource_constants.quantity_resources:\n returned_resource_dict[resource] = 0.0\n\n for resource in resource_constants.item_resources:\n # double check there is no overlap...\n if resource in resource_constants.quantity_resources:\n raise InternalRepyError(\"Resource '\"+resource+\"' cannot be both quantity and item based!\")\n\n returned_resource_dict[resource] = set()\n\n # I need locks to protect races in accesses to some items...\n returned_resource_dict['fungible_locks'] = {}\n for init_resource in resource_constants.fungible_item_resources:\n returned_resource_dict['fungible_locks'][init_resource] = threading.Lock()\n\n returned_resource_dict['renewable_locks'] = {}\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_locks'][init_resource] = threading.Lock()\n\n\n # I also need to track when the last update of a renewable resource occurred\n returned_resource_dict['renewable_update_time'] = {}\n\n # (Aside) JAC: I've thought about this and looked through the commit history.\n # I don't see any reason to initialize the renewable resources with the\n # current time (as was done before).\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_update_time'][init_resource] = 0.0\n\n\n return returned_resource_dict", "def appointments(resources_slots, from_date, to_date, resources=[], status_all=[], resources_all={}):\n\n query = \"\"\"\n SELECT A.STARTTIME, A.ENDTIME, V.APPOINTMENTTYPEID, V.TYPE, \\\n A.RESOURCEID, APPOINTMENTDATE, S.STATUS, S.APPOINTMENTSTATUSID\n FROM PATIENT P\n JOIN PATIENT_APPOINTMENTS AS A ON P.PATIENTID = A.PATIENTID\n JOIN APPOINTMENTTYPE AS V ON a.APPOINTMENTTYPEID = v.APPOINTMENTTYPEID\n LEFT OUTER JOIN APPOINTMENTSTATUS AS S ON A.APPOINTMENTSTATUSID = S.APPOINTMENTSTATUSID\n left join (PATIENTINSURANCE PAI\n join INSURANCE_TYPE IT on IT.INSURANCE_TYPE_ID=PAI.INSURANCE_TYPEID\n join INSURANCE_COMPANY IC on IC.INSURANCE_COMPANY_ID=PAI.INSURANCE_COMPANY_ID)\n on P.PatientID=PAI.PATIENTID and PAI.INSURANCE_TYPEID=1 and PAI.ACTIVE = 1\n WHERE V.APPOINTMENTTYPEID = A.APPOINTMENTTYPEID AND P.PATIENTID = A.PATIENTID\n AND A.ACTIVE = 1\n \"\"\"\n\n if from_date and to_date:\n query += \" AND APPOINTMENTDATE >= '%s' AND APPOINTMENTDATE <= '%s' \" % (from_date, to_date)\n\n if resources:\n query += \" AND A.RESOURCEID IN (%s)\" % ','.join([str(r) for r in resources])\n\n query += \" ORDER BY A.STARTTIME\"\n results = []\n if not EMRSQLServer.connection():\n return results\n\n rows = EMRSQLServer.execute_query(query)\n\n output = defaultdict(list)\n for row in rows:\n output[row['RESOURCEID']].append(row)\n for item, value in output.items():\n studies = defaultdict(list)\n for i, v in enumerate(output[item]):\n studies_start_date = v['APPOINTMENTDATE'].strftime('%Y-%m-%d')\n studies[item].append({\n 'name': v['TYPE'],\n 'start_time': v['STARTTIME'],\n 'end_time': v['ENDTIME'],\n 'studies_start_date': studies_start_date,\n 'status': v['STATUS'],\n 'APPOINTMENTSTATUSID': v['APPOINTMENTSTATUSID']\n })\n\n studies_by_date = defaultdict(list)\n studies_seen = defaultdict(list)\n for st in studies[item]:\n studies_by_date[st['studies_start_date']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n studies_seen[st['APPOINTMENTSTATUSID']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n\n number_of_confirmed_studies = sum([len(studies_seen[int(i)]) for i in status_all])\n days_taken_for_studies = len(studies_by_date)\n total_slots_for_days = resources_slots[item] * days_taken_for_studies\n utilization = (number_of_confirmed_studies * 100) // total_slots_for_days\n\n if utilization <= 79:\n color_code, text_color = '#d9534f', 'white'\n elif (utilization >= 80) and (utilization <= 89):\n color_code, text_color = '#ffe14b', 'black'\n elif utilization >= 90:\n color_code, text_color = '#3c903d', 'white'\n\n results.append({\n 'ResourceID': item,\n 'ResourceName': resources_all[item],\n 'TotalStudies': len(value),\n 'Studies': studies[item],\n 'studies_by_date': studies_by_date,\n 'utilization': '{0}%'.format(utilization),\n 'scheduled_percentage': '{0}%'.format((len(value) * 100) // total_slots_for_days),\n 'number_of_confirmed_studies': number_of_confirmed_studies,\n 'seen_percentage': '{0}%'.format((number_of_confirmed_studies * 100) // len(value)),\n 'total_slots_in_a_day': total_slots_for_days,\n 'color_code': color_code,\n 'text_color': text_color\n })\n return results", "def availableWorkersDuringPeriod(self, begin, end):\n availableWorkers = []\n for worker in self._workers:\n if worker.availableInPeriod(begin, end):\n availableWorkers.append(worker)\n return availableWorkers", "def available_hours(\n self,\n requested_date: datetime,\n student: \"Student\" = None,\n duration: int = None,\n only_approved: bool = False,\n places: Tuple[Optional[str]] = (None, None),\n ) -> Iterable[Tuple[datetime, datetime]]:\n if not requested_date:\n return []\n\n todays_appointments = self.appointments.filter(\n func.extract(\"day\", Appointment.date) == requested_date.day\n ).filter(func.extract(\"month\", Appointment.date) == requested_date.month)\n work_hours = self.work_hours_for_date(requested_date, student=student)\n taken_appointments = self.taken_appointments_tuples(\n todays_appointments, only_approved\n )\n blacklist_hours = {\"start_hour\": set(), \"end_hour\": set()}\n if student and work_hours:\n approved_taken_appointments = self.taken_appointments_tuples(\n todays_appointments, only_approved=True\n )\n hours = LessonRule.init_hours(\n requested_date, student, work_hours, approved_taken_appointments\n )\n for rule_class in rules_registry:\n rule_instance: LessonRule = rule_class(\n requested_date, student, hours, places\n )\n blacklisted = rule_instance.blacklisted()\n for key in blacklist_hours.keys():\n blacklist_hours[key].update(blacklisted[key])\n\n work_hours.sort(key=lambda x: x.from_hour) # sort from early to late\n for slot in work_hours:\n hours = (\n requested_date.replace(hour=slot.from_hour, minute=slot.from_minutes),\n requested_date.replace(hour=slot.to_hour, minute=slot.to_minutes),\n )\n yield from get_slots(\n hours,\n taken_appointments,\n timedelta(minutes=duration or self.lesson_duration),\n force_future=True,\n blacklist=blacklist_hours,\n )", "def get(self, id=None, o=None):\n\n response = []\n current_user = self.get_current_user()\n\n # [?timestamp_start=<XXX>&timestamp_end=<XXX>]\n ts = self.get_argument('timestamp_start',None)\n te = self.get_argument('timestamp_end',None)\n\n # GET /resources\n if not id and not o and not ts and not te:\n cursor = yield r.table('resources') \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n\n # GET /resources?timestamp_start=<XXX>&timestamp_end=<XXX>\n elif not id and not o:\n try:\n nb_leases = yield r.table(\"leases\").count().run(self.dbconnection)\n if nb_leases > 0:\n # Resources NOT in Leases\n cursor = yield r.table('resources') \\\n .filter({'available':'true'}) \\\n .filter( lambda resource:\n r.table(\"leases\").map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).contains(resource['id']).not_() \\\n ).run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n\n if ts and te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n r.or_(l['start_time'].gt(int(te)),l['end_time'].lt(int(ts)))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n logger.debug(in_leases)\n response = response + in_leases\n\n if ts and not te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n l['end_time'].lt(int(ts))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n response = response + in_leases\n\n if not ts and te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n l['start_time'].gt(int(te))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n response = response + in_leases\n else:\n # All available Resources (No Leases in DB)\n cursor = yield r.table('resources') \\\n .filter({'available':'true'}) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n except Exception as e:\n logger.exception(e)\n\n # GET /resources/<id>\n elif not o and id and self.isUrn(id):\n\n cursor = yield r.table('resources') \\\n .filter({'id': id}) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/leases\n elif id and self.isUrn(id) and o == 'leases':\n cursor = yield r.table(o) \\\n .filter(lambda lease: lease[\"resources\"].contains(id)) \\\n .run(self.dbconnection)\n #\n\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/slices\n elif id and self.isUrn(id) and o == 'slices':\n cursor = yield r.table(o) \\\n .filter(lambda slice: slice[\"resources\"]==id) \\\n .run(self.dbconnection)\n #\n\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/testbeds\n elif id and self.isUrn(id) and o == 'testbeds':\n cursor = yield r.table('resources') .filter({'id': id}) \\\n .pluck('id','testbed','manager') \\\n .merge(lambda res: {\n 'testbeds': r.table('testbeds').get_all(res['testbed'], index='id') \\\n .coerce_to('array')\n }) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n else:\n self.userError(\"invalid request\")\n\n return\n\n self.finish(json.dumps({\"result\": response}, cls=myJSONEncoder))", "def get_resource_information():\n\n\n # the resources we are allowed to use is easy. We just copy this...\n resource_limit_dict = _resources_allowed_dict.copy()\n\n \n # from the other dict, we only take the resource information. (this omits\n # locks and timing information that isn't needed)\n\n # first, let's do the easy thing, the quantity resources. These are just \n # floats\n resource_use_dict = {}\n for resourcename in resource_constants.quantity_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename]\n\n # for the fungible resources (files opened, etc,), we only need a count...\n for resourcename in resource_constants.fungible_item_resources:\n resource_use_dict[resourcename] = len(_resources_consumed_dict[resourcename])\n\n # for the individual item resources (ports, etc,), we copy the set...\n for resourcename in resource_constants.individual_item_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename].copy()\n\n # and that's it!\n return (resource_limit_dict, resource_use_dict)", "def _get_resource_tasks(self, existing, desired):\n unmanaged = {\n name: resource for name, resource in list(existing.items())\n if resource.whitelist is True\n }\n managed = {\n name: resource for name, resource in list(existing.items())\n if resource.whitelist is False\n }\n\n desired_set = set(desired)\n existing_set = set(existing)\n unmanaged_set = set(unmanaged)\n managed_set = set(managed)\n # Create any managed resource that doesn't currently exist\n create_list = [\n desired[resource] for resource in\n desired_set - existing_set\n ]\n\n # Update managed resources that diff between desired and actual\n update_list = [\n desired[resource] for resource in desired_set & managed_set\n if desired[resource] != managed[resource]\n ]\n\n # Merge unmanaged resources with desired if needed\n for resource in unmanaged_set:\n update_resource = self._merge_resource(\n resource, desired, unmanaged)\n if update_resource:\n update_list.append(update_resource)\n\n # Delete any managed resource that isn't still desired\n delete_list = [\n managed[resource] for resource in\n managed_set - desired_set\n ]\n\n # These resources, and the resource they reference,\n # should not be deleted\n unmanaged_list = [\n unmanaged[resource] for resource in unmanaged_set\n ]\n\n return (create_list, update_list, delete_list, unmanaged_list)", "def resource_restrictions(self, resource_type, params, username, group):\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query restricted resources\n restrictions = self.resource_permission_handler.restrictions(\n resource_type, params, username, group, session\n )\n\n # close session\n session.close()\n\n return {\n 'restrictions': restrictions\n }", "def get_avail_time_slots(self, cid, date):\n booked = self.get_time_slots(cid, date)\n avail_time_slots = []\n for time in self.initial_time_slots:\n if time not in booked:\n avail_time_slots.append(time)\n return avail_time_slots", "def get_resources():\n # Acquire the lock...\n get_resources_lock.acquire()\n\n # ...but always release it\n try:\n # Construct the dictionaries as copies from nanny\n (limits,usage) = nanny.get_resource_information()\n\n\n # Calculate all the usage's\n pid = os.getpid()\n\n # Get CPU and memory, this is thread specific\n if ostype in [\"Linux\", \"Darwin\"]:\n \n # Get CPU first, then memory\n usage[\"cpu\"] = os_api.get_process_cpu_time(pid)\n\n # This uses the cached PID data from the CPU check\n usage[\"memory\"] = os_api.get_process_rss()\n\n # Get the thread specific CPU usage\n usage[\"threadcpu\"] = os_api.get_current_thread_cpu_time() \n\n\n # Windows Specific versions\n elif ostype in [\"Windows\"]:\n \n # Get the CPU time\n usage[\"cpu\"] = windows_api.get_process_cpu_time(pid)\n\n # Get the memory, use the resident set size\n usage[\"memory\"] = windows_api.process_memory_info(pid)['WorkingSetSize'] \n\n # Get thread-level CPU \n usage[\"threadcpu\"] = windows_api.get_current_thread_cpu_time()\n\n # Unknown OS\n else:\n raise EnvironmentError(\"Unsupported Platform!\")\n\n # Use the cached disk used amount\n usage[\"diskused\"] = cached_disk_used\n\n finally:\n # Release the lock\n get_resources_lock.release()\n\n # Copy the stop times\n stoptimes = process_stopped_timeline[:]\n\n # Return the dictionaries and the stoptimes\n return (limits,usage,stoptimes)", "def _wait_till_resources_ready(self, resources, timeout):\n start = time.time()\n elapsed = 0\n while resources and elapsed < timeout:\n resource = resources.popleft()\n if not resource.is_ready():\n resources.append(resource)\n\n time.sleep(5)\n elapsed = time.time() - start\n\n if resources:\n raise ValidationError(\"Failed to verify all VM resources started\")", "def list_resources(self, metadata_query=None,\n start_time=None,\n end_time=None,\n limit=None):\n\n self.list_matched_resources(filter_by_project_id=True)\n self.list_matched_resources(filter_by_user_id=True)\n self.list_matched_resources(filter_by_resource_id=True)\n if metadata_query:\n self.list_matched_resources(metadata_query=metadata_query)\n if start_time:\n self.list_matched_resources(start_time=start_time)\n if end_time:\n self.list_matched_resources(end_time=end_time)\n if start_time and end_time:\n self.list_matched_resources(start_time=start_time,\n end_time=end_time)\n if limit:\n self.list_matched_resources(limit=limit)", "def lock_resources(self, request):\n locked_resources = []\n\n client = request.worker.name\n user_name, _ = client.split(\":\") # splitting <user_name>:<port>\n\n if not auth_models.User.objects.filter(username=user_name).exists():\n raise UnknownUserError(\n \"User %r has no matching object in the DB\" % user_name)\n\n user = auth_models.User.objects.get(username=user_name)\n\n groups = list(user.groups.all())\n\n for descriptor_dict in request.message.descriptors:\n\n desc = ResourceDescriptor.decode(descriptor_dict)\n self.logger.debug(\"Locking %r resource\", desc)\n\n # query for resources that are usable and match the user's\n # preference, which are either belong to a group he's in or\n # don't belong to any group.\n query = (Q(is_usable=True, **desc.properties) &\n (Q(group__isnull=True) | Q(group__in=groups)))\n matches = desc.type.objects.filter(query).order_by('-reserved')\n\n if matches.count() == 0:\n raise ResourceDoesNotExistError(\"No existing resource meets \"\n \"the requirements: %r\" % desc)\n\n availables = (resource for resource in matches\n if resource.is_available(client))\n\n try:\n resource = availables.next()\n\n self._lock_resource(resource, client)\n locked_resources.append(resource)\n self.logger.debug(\"Resource %r locked successfully\", desc)\n\n except StopIteration:\n timeout = request.message.timeout\n waiting_time = time.time() - request.creation_time\n if timeout is not None and waiting_time > timeout:\n raise ResourceUnavailableError(\"No available resource \"\n \"meets the requirements: \"\n \"%r\" % desc)\n\n raise _WaitingForResourceException(\"Resource %r is unavailable\"\n \", waiting for it to be \"\n \"released\", desc)\n\n return ResourcesReply(resources=locked_resources)", "def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)", "def getResources( dummy = False , retryPeriod = 0.5 ) :\n\n resources = {}\n\n\n # --------------------------------------------------\n # Add GPS\n # --------------------------------------------------\n import Gps\n if Config.IS_MONSTRO :\n resources.update({ \"gps\" : Gps.getGps( dummy = dummy ) })\n else :\n resources.update({ \"gps\" : Gps.getGps( dummy = True ) })\n\n\n if not dummy :\n\n # --------------------------------------------------\n # Add roboMote\n # --------------------------------------------------\n import RoboMote\n\n # find the com ports associated with motes\n done = False\n while not done :\n\n moteComs = findMotes()\n if (not moteComs) and (retryPeriod > 0) :\n time.sleep(retryPeriod)\n continue\n\n roboMote = None\n for i in range(len(moteComs)-1,-1,-1) :\n moteCom = moteComs[i]\n if RoboMote.isRoboMote( moteCom ) :\n del moteComs[i] \n roboMote = RoboMote.RoboMote( moteCom )\n done = True\n break\n\n if not roboMote :\n if ( retryPeriod > 0 ) :\n time.sleep(retryPeriod)\n continue\n else :\n raise RoboMote.RoboMoteException , \"Could not connect to the mote providing RoboMote\"\n\n # Add the roboMote to the resource list\n resources.update({ \"roboMote\" : roboMote })\n\n\n return resources", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def list_matched_resources(self, filter_by_user_id=False,\n filter_by_project_id=False,\n filter_by_resource_id=False,\n metadata_query=None,\n start_time=None,\n end_time=None,\n limit=None):\n\n query = self._make_general_query(filter_by_project_id,\n filter_by_user_id,\n filter_by_resource_id,\n metadata_query)\n query += self._make_timestamp_query(start_time, end_time)\n self._list_resources(query, limit)", "def get_resource_occupations_in_dt_range(dt_range, service_recipe, resources):\n new_resource_occupations = []\n\n for resource_needed in service_recipe:\n candidate_resources = filter(\n lambda r: r['type'] == resource_needed['type'],\n resources)\n\n for period in resource_needed['delta_periods']:\n period_dt_range = by_timedelta_range(period, dt_range[0])\n\n new_res_occupations_for_type = filter(\n lambda r: r[0]['type'] == resource_needed['type'],\n new_resource_occupations)\n available_resource = get_resource_available_in_dt_range(\n candidate_resources, period_dt_range,\n new_res_occupations_for_type)\n\n if available_resource is None:\n return None\n\n new_resource_occupations.append(\n (clean_resource(available_resource), period_dt_range))\n\n return new_resource_occupations", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def compute_resources(instance: dict):\r\n\r\n # Retrieve usefull infos\r\n Interventions = instance[INTERVENTIONS_STR]\r\n T_max = instance[T_STR]\r\n Resources = instance[RESOURCES_STR]\r\n # Init resource usage dictionnary for each resource and time\r\n resources_usage = {}\r\n for resource_name in Resources.keys():\r\n resources_usage[resource_name] = np.zeros(T_max)\r\n # Compute value for each resource and time step\r\n for intervention_name, intervention in Interventions.items():\r\n # start time should be defined (already checked in scheduled constraint checker)\r\n if not START_STR in intervention:\r\n continue\r\n start_time = intervention[START_STR]\r\n start_time_idx = start_time - 1\r\n # index of list starts at 0\r\n intervention_worload = intervention[RESOURCE_CHARGE_STR]\r\n intervention_delta = int(intervention[DELTA_STR][start_time_idx])\r\n # compute effective worload\r\n for (\r\n resource_name,\r\n intervention_resource_worload,\r\n ) in intervention_worload.items():\r\n for time in range(start_time_idx, start_time_idx + intervention_delta):\r\n # null values are not available\r\n if (\r\n str(time + 1) in intervention_resource_worload\r\n and str(start_time) in intervention_resource_worload[str(time + 1)]\r\n ):\r\n resources_usage[resource_name][\r\n time\r\n ] += intervention_resource_worload[str(time + 1)][str(start_time)]\r\n\r\n return resources_usage", "def add_resources(event):\n anuket_resources.need()", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def get_3779resources(self):\n\n resources = rpki.resource_set.resource_bag.from_POW_rfc3779(self.get_POW().getRFC3779())\n try:\n resources.valid_until = self.getNotAfter() # pylint: disable=E1101\n except AttributeError:\n pass\n return resources", "def getAvailableTimeslots(self, allTimeslots) -> [Timeslot]:\r\n # List with all Timeslots any of the Teachers is not available at.\r\n notAvailableTimeslotsTeachers = flatMap(lambda t: t.not_available_timeslots, self.teachers)\r\n # notAvailableTimeslotsTeachers = [item for sublist in map(lambda t: t.not_available_timeslots, self.teachers) for item in sublist]\r\n # If Lesson can only take place on forenoon, create list with all afternoon timeslots.\r\n if self.course.only_forenoon:\r\n notAvailableTimeslotsForenoon = list(filter(lambda t: t.number not in Timeslot.getForenoonTimeslotNumbers(), allTimeslots))\r\n else:\r\n notAvailableTimeslotsForenoon = []\r\n\r\n timeslots = [x for x in allTimeslots if x not in (notAvailableTimeslotsTeachers + notAvailableTimeslotsForenoon)]\r\n if self.available_timeslots: # If list is not empty. Else no restrictions.\r\n timeslots = [x for x in timeslots if x in self.available_timeslots]\r\n\r\n return timeslots", "def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])", "def _update_available_resources(self, context):\n\n all_nodes = self.driver.get_available_nodes()\n all_rps = self.scheduler_client.reportclient\\\n .get_filtered_resource_providers({})\n node_uuids = [node.uuid for node in all_nodes]\n\n # Clean orphan resource providers in placement\n for rp in all_rps:\n if rp['uuid'] not in node_uuids:\n server_by_node = objects.Server.list(\n context, filters={'node_uuid': rp['uuid']})\n if server_by_node:\n continue\n self.scheduler_client.reportclient.delete_resource_provider(\n rp['uuid'])\n\n for node in all_nodes:\n if self.driver.is_node_consumable(node):\n self.scheduler_client.reportclient \\\n .delete_allocations_for_resource_provider(node.uuid)\n resource_class = sched_utils.ensure_resource_class_name(\n node.resource_class)\n inventory = self.driver.get_node_inventory(node)\n inventory_data = {resource_class: inventory}\n self.scheduler_client.set_inventory_for_provider(\n node.uuid, node.name or node.uuid, inventory_data,\n resource_class)" ]
[ "0.6184426", "0.60298663", "0.5953174", "0.5767035", "0.5638926", "0.5493595", "0.54747224", "0.5458828", "0.54446715", "0.5438135", "0.53841877", "0.53665984", "0.5353068", "0.53493536", "0.53424084", "0.53357965", "0.53341115", "0.5326668", "0.53145623", "0.5271738", "0.526316", "0.52614367", "0.525207", "0.5250566", "0.51932424", "0.5174389", "0.51693815", "0.51542294", "0.5148677", "0.5097271" ]
0.73468834
0
Test to see if the mongodb client logger can persist a log entry to the database
def test_mongo_logging_client_persists_log(): error_message = "This is a test message." logger = LoggingService(console_output=True) result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message)) logger.log(LogEntry(LogLevel.WARN, __name__, error_message)) logger.log(LogEntry(LogLevel.INFO, __name__, error_message)) logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message)) assert result.message == error_message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_log(self):\n message = \"Message is {0}\".format(random.random())\n resp = gracedb.writeLog(eventId, message)\n self.assertEqual(resp.status, 201)\n new_log_uri = resp.getheader('Location')\n new_log = resp.json()\n self.assertEqual(new_log_uri, new_log['self'])\n check_new_log = gracedb.get(new_log_uri).json()\n self.assertEqual(check_new_log['comment'], message)", "def check_logging(self):\n if datetime.datetime.utcnow().strftime('%Y%m%d') != self.logger_utc_date:\n # reset\n self.shut_down_logger()\n self.logger, self.logger_utc_date = self.set_up_logging(_name='archive', _mode='a')", "def test_fetch_log_valid():\n ident = _id()\n proj.fetch('test', ident)\n log = proj.fetch_log('test', ident)\n assert 'this should go into run.log' in log", "def _log_exists(name):\n return name in logging.Logger.manager.loggerDict", "def test_post_add_log_event(self):\n pass", "def checkPersistence(self, _, __): # pylint: disable=C0103\n return False", "def test_error_logging(self):\n # Verify nothing in the journal\n assert len(Record.objects.recent('heartbeat')) == 0\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': 'foosurvey',\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n errors = json.loads(resp.content)['errors']\n assert len(errors) > 0\n\n # Verify there's one entry now.\n assert len(Record.objects.recent('heartbeat')) == 1", "def test_create_log(self):\n log = self.log\n\n self.assertTrue(isinstance(log, Log))\n self.assertEqual(log.name, \"Test Log\")", "def Persist(self) -> bool:", "def Persist(self) -> bool:", "def _verify_logging(self):\n log_file = self.device.log_file_name\n self.assertTrue(os.path.exists(log_file),\n f\"{self.device.name}'s log file {log_file} does not exist\")\n self.assertTrue(os.path.getsize(log_file),\n f\"{self.device.name}'s log file {log_file} is empty\")", "def test_logging(self):\n self._verify_logging()", "def test_watchdogs_no_relevant(self):\n\n # distillery with no categories\n distillery = Distillery.objects.get_by_natural_key('mongodb.test_database.test_docs')\n distillery.collection.insert = Mock(return_value=self.mock_doc_id)\n\n doc_id = distillery._save_and_send_signal(self.data)\n\n alerts = Alert.objects.all()\n self.assertEqual(alerts.count(), 0)\n self.assertEqual(doc_id, self.mock_doc_id)", "def _check_write_consistency(self):\n self.logger.warning('Not checking write consistency')", "def check(log=False):\n return True", "def record(params, git_info = {}):\n print \"recording...\"\n\n try:\n # connect to MongoDB\n # config = json.load(open(os.environ.get('HOME') + \"/sandbox/config.json\"))\n config = json.load(open(os.environ.get('HOME') + \"/LSEMS/config.json\"))\n try:\n client = MongoClient(config[\"mongodb_url\"])\n except Exception as e:\n raise Exception(\"fail to connect to given MongoDB address: \" + DB_addr)\n\n # check and run the thing\n missing = checkKeys(params, ['data_set', 'src', 'type', 'param'])\n if len(missing) != 0:\n raise Exception(\"missing attribute\"+('s' if len(missing)!=1 else '')+\": \"+str(missing))\n\n params['time'] = asctime()\n params['commit_id'] = git_info['commit_id']\n params['name'] = git_info['name']\n repo_name = git_info['repo_name']\n params['repo_name'] = repo_name\n user = verifyUser(client, git_info['name'])\n\n exp = user.find_one({'exp_name': repo_name})\n if not exp:\n print 'adding new experiment '+repo_name+'...'\n user.insert({'exp_name': repo_name, 'exp_records':[]})\n old_records = user.find_one({'exp_name': repo_name})['exp_records']\n user.update({'exp_name': repo_name}, {'$set': {'exp_records': old_records + [params]}})\n\n print params\n #user.insert(params)\n client.close()\n return True,params\n except Exception as e:\n print e\n print \"Aborting...\"\n return False,{}", "def test_log_add_admin(self):\n log_count_init = LoggerHistory.objects.count()\n new_admin = UserFactory.create()\n AdminsFactory.create(project=self.project, user=new_admin)\n\n log = LoggerHistory.objects.last()\n log_count = LoggerHistory.objects.count()\n\n self.assertNotEqual(log.user, {\n 'id': str(self.user.id),\n 'display_name': self.user.display_name})\n self.assertEqual(log.project, {\n 'id': str(self.project.id),\n 'name': self.project.name})\n self.assertEqual(log.usergroup, None)\n self.assertEqual(log.category, None)\n self.assertEqual(log.field, None)\n self.assertEqual(log.location, None)\n self.assertEqual(log.observation, None)\n self.assertEqual(log.comment, None)\n self.assertEqual(log.subset, None)\n self.assertEqual(log.action, {\n 'id': 'created',\n 'class': 'Admins',\n 'user_id': str(new_admin.id),\n 'user_display_name': new_admin.display_name})\n self.assertEqual(log_count, log_count_init + 1)\n self.assertEqual(log.historical, None)", "def test_nonsilent_write_errors(error_logger):\n\n tracker = pawprint.Tracker(db=\"postgresql:///fail\", logger=error_logger)\n\n with pytest.raises(Exception):\n tracker.write()\n with pytest.raises(Exception):\n tracker.write(event=\"going_to_fail\")\n\n with open(\"pawprint.log\", mode=\"r\") as f:\n logs = f.readlines()\n print(logs[3])\n\n assert len(logs) == 6\n assert logs[0].startswith(\"pawprint: pawprint failed to write.\")\n assert \"Table: None. Query: INSERT INTO None () VALUES ();\" in logs[0]\n assert \"Query: INSERT INTO None (event) VALUES ('going_to_fail')\" in logs[3]\n\n os.remove(\"pawprint.log\")", "def test_log_update_isprivate(self):\n log_count_init = LoggerHistory.objects.count()\n\n original_isprivate = self.project.isprivate\n self.project.isprivate = False\n self.project.save()\n\n log = LoggerHistory.objects.last()\n log_count = LoggerHistory.objects.count()\n\n self.assertNotEqual(log.user, {\n 'id': str(self.user.id),\n 'display_name': self.user.display_name})\n self.assertEqual(log.project, {\n 'id': str(self.project.id),\n 'name': self.project.name})\n self.assertEqual(log.usergroup, None)\n self.assertEqual(log.category, None)\n self.assertEqual(log.field, None)\n self.assertEqual(log.location, None)\n self.assertEqual(log.observation, None)\n self.assertEqual(log.comment, None)\n self.assertEqual(log.subset, None)\n self.assertEqual(log.action, {\n 'id': 'updated',\n 'class': 'Project',\n 'field': 'isprivate',\n 'value': str(self.project.isprivate)})\n self.assertEqual(log_count, log_count_init + 1)\n history = self.project.history.get(pk=log.historical.get('id'))\n self.assertEqual(history.id, self.project.id)\n self.assertEqual(history.isprivate, original_isprivate)\n\n original_isprivate = self.project.isprivate\n self.project.isprivate = True\n self.project.save()\n\n log = LoggerHistory.objects.last()\n log_count = LoggerHistory.objects.count()\n\n self.assertNotEqual(log.user, {\n 'id': str(self.user.id),\n 'display_name': self.user.display_name})\n self.assertEqual(log.project, {\n 'id': str(self.project.id),\n 'name': self.project.name})\n self.assertEqual(log.usergroup, None)\n self.assertEqual(log.category, None)\n self.assertEqual(log.field, None)\n self.assertEqual(log.location, None)\n self.assertEqual(log.observation, None)\n self.assertEqual(log.comment, None)\n self.assertEqual(log.subset, None)\n self.assertEqual(log.action, {\n 'id': 'updated',\n 'class': 'Project',\n 'field': 'isprivate',\n 'value': str(self.project.isprivate)})\n self.assertEqual(log_count, log_count_init + 2)\n history = self.project.history.get(pk=log.historical.get('id'))\n self.assertEqual(history.id, self.project.id)\n self.assertEqual(history.isprivate, original_isprivate)", "def _save_experiment_to_db_if_possible(\n self, experiment: Experiment, suppress_all_errors: bool = False\n ) -> bool:\n if self.db_settings_set:\n save_experiment(experiment=experiment, db_settings=self.db_settings)\n return True\n return False", "def test_user_does_not_create_log(self):\n user = UserFactory.create()\n self.request.user = user\n user_logged_in.send(\n sender=user.__class__,\n request=self.request,\n user=user,\n )\n\n self.assertFalse(LogEntry.objects.count())", "def should_insert_db(prev_event, current_event):\n return (not prev_event[\"has_clip\"] and not prev_event[\"has_snapshot\"]) and (\n current_event[\"has_clip\"] or current_event[\"has_snapshot\"]\n )", "def check( log = False):\n return True", "def create_sync_entry(ts, coll, idx):\n sync_log = connection.ElasticLogs()\n sync_log.ts = ts\n sync_log.coll = unicode(coll)\n sync_log.idx = unicode(idx)\n sync_log.save()\n return True", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def test_logentry_save(self):\n logentry = LogEntry.objects.get(content_type__model__iexact=\"article\")\n action_time = logentry.action_time\n logentry.save()\n self.assertEqual(logentry.action_time, action_time)", "def test_silent_write_errors():\n\n tracker = pawprint.Tracker(db=None, table=None)\n\n try:\n tracker.write(event=\"This will fail silently.\")\n except Exception:\n pytest.fail(\"Failed to fail silently.\")", "def MongoLog(self, request_number, process, log_message):\n try:\n print(\"Attempting to connect to MongoDB...\")\n client = MongoClient('localhost', 27017)\n db = client.database\n collection = db.logging_database\n\n status_log = {\"Request_No\": request_number, \"Brewing_Process\": process, \"Log_Message\": log_message,\n \"Time\": datetime.datetime.now()}\n\n try:\n collection.insert_one(status_log)\n except TypeError: # Error Handling for MongoDB versions that do not implement insert_one() method\n collection.insert(status_log)\n\n print(status_log)\n except Exception as e:\n print(\"MongoDB connection Error:\" + str(e))", "def test_user_login_does_not_audit_save(self):\n self.user.save(update_fields=['last_login'])\n self.assertTrue(AuditTrail.objects.count() >= 1)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def logger():\n return TestListenerDB()" ]
[ "0.6045933", "0.586778", "0.5836898", "0.5810724", "0.57567275", "0.56929016", "0.56574714", "0.5656057", "0.5636488", "0.5636488", "0.5576545", "0.55317235", "0.5509709", "0.54728764", "0.5472665", "0.5462667", "0.54399234", "0.53963417", "0.53956026", "0.5391641", "0.53832144", "0.53706545", "0.5351509", "0.535034", "0.5348309", "0.5337135", "0.53240657", "0.5318239", "0.5310859", "0.5304698" ]
0.6278423
0
All horizontal squares from the piece's point of view. Returns a list of relative movements up to the board's bound.
def horizontals(self): horizontal_shifts = set(izip_longest(map( lambda i: i - self.x, range(self.board.length)), [], fillvalue=0)) horizontal_shifts.discard((0, 0)) return horizontal_shifts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spanning_squares(self):\n spanning = []\n for i in range(self.length):\n # Assume ACROSS and DOWN are the only valid directions\n if self.direction == \"ACROSS\":\n spanning.append((self.start_x + i, self.start_y))\n else:\n spanning.append((self.start_x, self.start_y + i))\n return spanning", "def get_moves(self):\n moves = []\n i, j = self._get_coordinates(0) # blank space\n\n if i > 0:\n moves.append(Puzzle(self._swap(i, j, i - 1, j))) # move up\n\n if j < self.PUZZLE_NUM_COLUMNS - 1:\n moves.append(Puzzle(self._swap(i, j, i, j + 1))) # move right\n\n if j > 0:\n moves.append(Puzzle(self._swap(i, j, i, j - 1))) # move left\n\n if i < self.PUZZLE_NUM_ROWS - 1:\n moves.append(Puzzle(self._swap(i, j, i + 1, j))) # move down\n\n return moves", "def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves", "def get_moves_for_square(self, square):\n (x,y) = square\n\n # determine the color of the piece.\n color = self[x][y]\n\n # skip empty source squares.\n if color==0:\n return []\n\n # search all possible directions.\n moves = []\n for direction in self.__directions:\n move = self._discover_move(square, direction)\n if move:\n # print(square,move,direction)\n moves.append(move)\n\n # return the generated move list\n return moves", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def available_moves(self):\n\n heaps = range(len(self.heaps))\n return [(h, take) for h in range(len(self.heaps))\n for take in range(1, self.heaps[h] + 1)]", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def get_move_list(self):\n return [\n tuple(x) for x in np.argwhere(self.board == HexBoard.EMPTY).tolist()\n ]", "def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves", "def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def get_moves(self):\n return self.piece_behavior.get_moves(self.board, self.position)", "def enumerate_moves(self):\n add_ew = lambda x: [x+'e', x+'w']\n allowed_catches = add_ew(self._directions[0])\n moves = []\n # First add the one/two step forward moves\n new_slot = self._board.get_dir(self._current_space, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n if (self._side == BLACK and self._current_space.row == 1) or \\\n (self._side == WHITE and self._current_space.row == self._board.size -2):\n new_slot = self._board.get_dir(new_slot, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n\n # Now add all the captures.\n for direction in allowed_catches:\n new_slot = self._board.get_dir(self._current_space, direction)\n if new_slot and new_slot.has_opponent(self._side):\n moves.append(ChessMove(self._current_space, new_slot, [new_slot]))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n return moves", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def list_squares(self):\n squares_lst = []\n row, col = 0, 0\n while row < self.board_size:\n while col < self.board_size:\n square = self.add_square(row, col)\n squares_lst.append(square)\n col += self.c_size\n row += self.r_size\n col = 0\n return squares_lst", "def _get_piece_moves(self, x, y):\n\n piece = self.get_piece(x, y)\n moves = []\n\n if not piece:\n return moves\n\n if piece.name == 'rook' or piece.name == 'queen':\n direcs = ['up', 'down', 'left', 'right']\n moves = [self._get_moves_indirection(x, y, direc) for direc in\n direcs]\n\n elif piece.name == 'bishop' or piece.name == 'queen':\n direcs = ['d1', 'd2', 'd3', 'd4']\n for direc in direcs:\n moves += self._get_moves_indirection(x, y, direc)\n\n elif piece.name == 'king':\n moves = [(x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1),\n (x, y+1), (x+1, y-1), (x+1, y), (x+1, y+1)]\n\n elif piece.name == 'knight':\n moves = [(x-1, y-2), (x-2, y-1), (x-2, y+1), (x-1, y+2),\n (x+1, y+2), (x+2, y+1), (x+1, y-2), (x+2, y-1)]\n\n elif piece.name == 'pawn':\n if piece.color == ChessGame.BLACK:\n moves = [(x-1, y), (x-1, y-1), (x-1, y+1)]\n else:\n moves = [(x+1, y), (x+1, y-1), (x+1, y+1)]\n\n tmp = list(moves)\n for u, v in tmp:\n if v != y and not self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n if v == y and self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n mycolor = piece.color\n valid = set()\n for (u, v) in moves:\n if not self.in_bounds(u, v):\n continue\n\n if not self.get_piece(u, v): # board is blank\n valid.add((u, v))\n\n if self.is_enemy(u, v, mycolor):\n valid.add((u, v))\n\n return valid", "def get_move_pieces(self, player):\n self.mark_moves(player)\n moves = [piece for piece in self.pieces if piece.get_state() == MOVE]\n self.clear_moves()\n return moves", "def _determine_horizontal_and_vertical_moves(self, coordinates):\n\n (row, col) = coordinates\n horizontal = [(r, col) for r in range(8) if r != row]\n vertical = [(row, c) for c in range(8) if c != col]\n return horizontal + vertical", "def moves(self):\n move_list = list()\n for i in range(self.n):\n row = self.queens[i][0]\n col = self.queens[i][1]\n for rd in [-1,0,1]:\n for cd in [-1,0,1]:\n if (rd == 0) and (cd == 0):\n continue\n new_pos = [row+rd, col+cd]\n if (new_pos[0] >= 0) and (new_pos[0] < self.n) and (new_pos[1] >= 0) and (new_pos[1] < self.n):\n if not new_pos in self.queens: \n move_list.append([i, new_pos])\n\n return move_list", "def squares(self):\n ship_length = self.type\n\n squares = [self.star_square]\n for step in range(1, ship_length):\n relative_square = self.get_square_at_relative_position(\n self.star_square, self.orientation, stepped_squares=step)\n squares.append(relative_square)\n return squares", "def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list", "def _get_possible_moves(board, lightcycle):\n result = []\n for diff in ((0, 1, PlayerActions.MOVE_DOWN), (1, 0, PlayerActions.MOVE_RIGHT), (0, -1, PlayerActions.MOVE_UP), (-1, 0, PlayerActions.MOVE_LEFT)):\n next_x = lightcycle['position'][0] + diff[0]\n next_y = lightcycle['position'][1] + diff[1]\n if 0 <= next_x < len(board) and 0 <= next_y < len(board[0]):\n if board[next_x][next_y] in (EMPTY, POWERUP):\n result += [diff]\n return result", "def _get_valid_moves(self, piece):\n moves = {}\n left = piece.col - 1 # Left position\n right = piece.col + 1 # Right position\n row = piece.row # Current row\n\n if piece.get_player() == Player.white or piece.is_king():\n # Checks the movements from the bottom to the top\n moves.update(self._traverse_left(row - 1, max(row - 3, -1), -1, piece.get_player(), left))\n moves.update(self._traverse_right(row - 1, max(row - 3, -1), -1, piece.get_player(), right))\n\n if piece.get_player() == Player.black or piece.is_king():\n # Checks the movements from the top to the bottom\n moves.update(self._traverse_left(row + 1, min(row + 3, constant.BOARD_DIMENSION),\n 1, piece.get_player(), left))\n moves.update(self._traverse_right(row + 1, min(row + 3, constant.BOARD_DIMENSION),\n 1, piece.get_player(), right))\n\n return moves", "def get_square_moves(self, moves):\r\n return self.board.get_square_moves(moves)", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def any_possible_moves_horizontal(self):\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW - 1):\n if self.main_grid_values[i][j] == self.main_grid_values[i][j+1]:\n return True\n\n return False", "def get_all_game_pieces_potential_moves(self):\n\n board = self.get_board()\n\n for row in board:\n\n for column in row:\n\n if column is not None:\n\n print(column.get_label(), ': ' , column.get_potential_moves())", "def moves(self):\n move_list = []\n for direction in Maze.possible_directions:\n move = Maze.dirs_to_moves[direction]\n if (0 <= (self.location[0]+move[0]) < len(self.grid) and\n 0 <= (self.location[1]+move[1]) < len(self.grid[0]) and\n self.grid[self.location[0]+move[0]][self.location[1]+move[1]] != 'X'):\n move_list.append(move)\n\n return move_list" ]
[ "0.6494722", "0.64518917", "0.6354893", "0.63426304", "0.63299483", "0.6264752", "0.6235491", "0.6193277", "0.6177784", "0.6174575", "0.61528724", "0.6148699", "0.61453056", "0.6122243", "0.6096462", "0.6087461", "0.60867596", "0.60618335", "0.6006694", "0.59964144", "0.5966052", "0.59410864", "0.5909955", "0.59032065", "0.58670235", "0.58619684", "0.58594424", "0.5836453", "0.5822827", "0.5804912" ]
0.7379711
0
All vertical squares from the piece's point of view. Returns a list of relative movements up to the board's bound.
def verticals(self): vertical_shifts = set(izip_longest([], map( lambda i: i - self.y, range(self.board.height)), fillvalue=0)) vertical_shifts.discard((0, 0)) return vertical_shifts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vertical(self, x, y):\n\n return [row[x] for row in self._board]", "def spanning_squares(self):\n spanning = []\n for i in range(self.length):\n # Assume ACROSS and DOWN are the only valid directions\n if self.direction == \"ACROSS\":\n spanning.append((self.start_x + i, self.start_y))\n else:\n spanning.append((self.start_x, self.start_y + i))\n return spanning", "def get_moves_for_square(self, square):\n (x,y) = square\n\n # determine the color of the piece.\n color = self[x][y]\n\n # skip empty source squares.\n if color==0:\n return []\n\n # search all possible directions.\n moves = []\n for direction in self.__directions:\n move = self._discover_move(square, direction)\n if move:\n # print(square,move,direction)\n moves.append(move)\n\n # return the generated move list\n return moves", "def list_squares(self):\n squares_lst = []\n row, col = 0, 0\n while row < self.board_size:\n while col < self.board_size:\n square = self.add_square(row, col)\n squares_lst.append(square)\n col += self.c_size\n row += self.r_size\n col = 0\n return squares_lst", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def get_moves(self):\n moves = []\n i, j = self._get_coordinates(0) # blank space\n\n if i > 0:\n moves.append(Puzzle(self._swap(i, j, i - 1, j))) # move up\n\n if j < self.PUZZLE_NUM_COLUMNS - 1:\n moves.append(Puzzle(self._swap(i, j, i, j + 1))) # move right\n\n if j > 0:\n moves.append(Puzzle(self._swap(i, j, i, j - 1))) # move left\n\n if i < self.PUZZLE_NUM_ROWS - 1:\n moves.append(Puzzle(self._swap(i, j, i + 1, j))) # move down\n\n return moves", "def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves", "def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def squares(self) -> list:\n return self.__squares", "def get_square_moves(self, moves):\r\n return self.board.get_square_moves(moves)", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves", "def any_possible_moves_vertical(self):\n for i in range(self.TILES_PER_ROW - 1):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == self.main_grid_values[i+1][j]:\n return True\n\n return False", "def _determine_horizontal_and_vertical_moves(self, coordinates):\n\n (row, col) = coordinates\n horizontal = [(r, col) for r in range(8) if r != row]\n vertical = [(row, c) for c in range(8) if c != col]\n return horizontal + vertical", "def squares(self):\n ship_length = self.type\n\n squares = [self.star_square]\n for step in range(1, ship_length):\n relative_square = self.get_square_at_relative_position(\n self.star_square, self.orientation, stepped_squares=step)\n squares.append(relative_square)\n return squares", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))", "def _get_piece_moves(self, x, y):\n\n piece = self.get_piece(x, y)\n moves = []\n\n if not piece:\n return moves\n\n if piece.name == 'rook' or piece.name == 'queen':\n direcs = ['up', 'down', 'left', 'right']\n moves = [self._get_moves_indirection(x, y, direc) for direc in\n direcs]\n\n elif piece.name == 'bishop' or piece.name == 'queen':\n direcs = ['d1', 'd2', 'd3', 'd4']\n for direc in direcs:\n moves += self._get_moves_indirection(x, y, direc)\n\n elif piece.name == 'king':\n moves = [(x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1),\n (x, y+1), (x+1, y-1), (x+1, y), (x+1, y+1)]\n\n elif piece.name == 'knight':\n moves = [(x-1, y-2), (x-2, y-1), (x-2, y+1), (x-1, y+2),\n (x+1, y+2), (x+2, y+1), (x+1, y-2), (x+2, y-1)]\n\n elif piece.name == 'pawn':\n if piece.color == ChessGame.BLACK:\n moves = [(x-1, y), (x-1, y-1), (x-1, y+1)]\n else:\n moves = [(x+1, y), (x+1, y-1), (x+1, y+1)]\n\n tmp = list(moves)\n for u, v in tmp:\n if v != y and not self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n if v == y and self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n mycolor = piece.color\n valid = set()\n for (u, v) in moves:\n if not self.in_bounds(u, v):\n continue\n\n if not self.get_piece(u, v): # board is blank\n valid.add((u, v))\n\n if self.is_enemy(u, v, mycolor):\n valid.add((u, v))\n\n return valid", "def get_moves(self):", "def get_moves(self):\n return self.piece_behavior.get_moves(self.board, self.position)", "def _get_square(self, start_row, start_col):\n end_row = start_row + self.block_size\n end_col = start_col + self.block_size\n\n result = np.array(self.board)[start_row:end_row,\n start_col:end_col]\n return result.tolist()", "def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves", "def enumerate_moves(self):\n add_ew = lambda x: [x+'e', x+'w']\n allowed_catches = add_ew(self._directions[0])\n moves = []\n # First add the one/two step forward moves\n new_slot = self._board.get_dir(self._current_space, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n if (self._side == BLACK and self._current_space.row == 1) or \\\n (self._side == WHITE and self._current_space.row == self._board.size -2):\n new_slot = self._board.get_dir(new_slot, self._directions[0])\n if new_slot and new_slot.is_free():\n moves.append(ChessMove(self._current_space, new_slot))\n\n # Now add all the captures.\n for direction in allowed_catches:\n new_slot = self._board.get_dir(self._current_space, direction)\n if new_slot and new_slot.has_opponent(self._side):\n moves.append(ChessMove(self._current_space, new_slot, [new_slot]))\n if (self._side == BLACK and new_slot.row == self._board.size - 1) or \\\n (self._side == WHITE and new_slot.row == 0):\n moves[-1].add_promotion()\n return moves", "def get_move_list(self):\n return [\n tuple(x) for x in np.argwhere(self.board == HexBoard.EMPTY).tolist()\n ]", "def moves(self):\n move_list = list()\n for i in range(self.n):\n row = self.queens[i][0]\n col = self.queens[i][1]\n for rd in [-1,0,1]:\n for cd in [-1,0,1]:\n if (rd == 0) and (cd == 0):\n continue\n new_pos = [row+rd, col+cd]\n if (new_pos[0] >= 0) and (new_pos[0] < self.n) and (new_pos[1] >= 0) and (new_pos[1] < self.n):\n if not new_pos in self.queens: \n move_list.append([i, new_pos])\n\n return move_list", "def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]" ]
[ "0.6549495", "0.65164524", "0.6475518", "0.6370953", "0.63641816", "0.63362104", "0.6315978", "0.62523115", "0.6206709", "0.6104576", "0.6071588", "0.60655314", "0.6062132", "0.60403407", "0.60398203", "0.6025194", "0.5976983", "0.5959147", "0.59579796", "0.5956464", "0.5949263", "0.59397024", "0.59184444", "0.591393", "0.5899039", "0.5862229", "0.5859187", "0.58585304", "0.5839112", "0.5821519" ]
0.7238823
0
Return list of relative movements allowed.
def movements(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMovableRange(self, unit):\n CostArr_mod = modifyMovCost(CostArr, ability)\n Obstacles = self.getUnpassable(player) # units that are not passable....\n pos_list, path_list = UCS_solve(unit.pos, CostArr_mod, unit.MovPnt)\n return pos_list, path_list", "def all_rel_actions(self, player):\n return [m for m in MOVES if m.norm() == 1 and m.direction() != (0,-1)]", "def moves(self):\n move_list = []\n for direction in Maze.possible_directions:\n move = Maze.dirs_to_moves[direction]\n if (0 <= (self.location[0]+move[0]) < len(self.grid) and\n 0 <= (self.location[1]+move[1]) < len(self.grid[0]) and\n self.grid[self.location[0]+move[0]][self.location[1]+move[1]] != 'X'):\n move_list.append(move)\n\n return move_list", "def get_goat_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_goat():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def valid_moves(self):\n valid = set()\n\n # If the center is filled, so unlimited movement is allowed\n if self._unlimited is True:\n\n # For each value of filled, add that value to the center until the value is out of bounds\n # to acquire each movement point that can result\n for pos in self._filled:\n loc = self._center\n while 0 < loc[0] < 20 and 0 < loc[1] < 20:\n loc = (loc[0] + pos[0], loc[1] + pos[1])\n valid.add(loc)\n\n else:\n # If the movement is limited, only allow movement up to 3 spaces\n loc = self._center\n for pos in self._filled:\n if 0 < loc[0] + pos[0] < 20 and 0 < loc[1] + pos[1] < 20:\n valid.add((loc[0] + pos[0], loc[1] + pos[1]))\n if 0 < loc[0] + 2 * pos[0] < 20 and 0 < loc[1] + 2 * pos[1] < 20:\n valid.add((loc[0] + 2 * pos[0], loc[1] + 2 * pos[1]))\n if 0 < loc[0] + 3 * pos[0] < 20 and 0 < loc[1] + 3 * pos[1] < 20:\n valid.add((loc[0] + 3 * pos[0], loc[1] + 3 * pos[1]))\n\n return valid", "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def getAllValidMoves(x0, y0):\n deltas = [\n (-2, -1),\n (-2, +1),\n (+2, -1),\n (+2, +1),\n (-1, -2),\n (-1, +2),\n (+1, -2),\n (+1, +2),\n ]\n validPositions = []\n\n for (x, y) in deltas:\n xCandidate = x0 + x\n yCandidate = y0 + y\n if 0 < xCandidate < 8 and 0 < yCandidate < 8:\n validPositions.append([xCandidate, yCandidate])\n\n return validPositions", "def _get_move_actions(self, exclude=None):\n rtn = []\n\n # Check for moving up\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({\n 'func': '_move',\n 'args': (self._pos + _Vec3(0, 1, 0),)\n })\n else:\n rtn.append({\n 'func': '_move_up',\n 'args': (exclude,)\n })\n\n # Check for moving down\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n\n # Check for side moves \n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n\n return rtn", "def getLegalActions(self):\n return ['forward', 'left', 'right', None]", "def get_valid_moves(self) -> list[int]:\n return self._valid_moves", "def get_movelist(self):\n return [move for move in self._get_frame_data()]", "def applyOperators(self):\r\n return [self.moveUp(), self.moveDown(),\r\n self.moveLeft(), self.moveRight()]", "def get_valid_move_actions(pos,obstacles):\n\n valid_actions = [False]*len(global_defs.Actions)\n for idx,action in enumerate(global_defs.Actions[:-1]):\n valid = check_valid(pos+global_defs.ACTIONS_TO_MOVES[action])\n if valid:\n valid_actions[idx] = True\n #The last action, i.e. WORK, is set to False, since we don't have any idea about deciding it.\n return np.array(valid_actions)", "def get_legal_moves(self, player):\r\n move_list = []\r\n if self._phase == GamePhase.SETUP:\r\n return self._setup_legal_moves(player)\r\n elif self._phase == GamePhase.MOVE:\r\n return self._move_legal_moves(player)\r\n elif self._phase == GamePhase.BUILD:\r\n return self._build_legal_moves(player)\r\n return move_list", "def getPossibleAction(self):\n possible = []\n if self.bGravity:\n j = 0\n for i in range(self.w):\n if self.world[j][i] == 0:\n possible.append((i,j))\n else:\n for j in range(self.h):\n for i in range(self.w):\n if self.world[j][i] == 0:\n possible.append((i,j))\n return possible", "def get_available_moves(self):\n available = []\n row, col = tuple(self.current_pos)\n if row - 1 >= 0 and self.maze[row - 1][col] != 'x':\n available.append('n')\n if row + 1 < len(self.maze) and self.maze[row + 1][col] != 'x':\n available.append('s')\n if col - 1 >= 0 and self.maze[row][col - 1] != 'x':\n available.append('w')\n if col + 1 < len(self.maze[row]) and self.maze[row][col + 1] != 'x':\n available.append('e')\n return available", "def allowed_move(new_x, new_y):\n x_collide = check_collision(new_x, player.y)\n y_collide = check_collision(player.x, new_y)\n both_collide = check_collision(new_x, new_y)\n if both_collide and x_collide and y_collide:\n # Player is allowed to move to new position\n return [new_x, new_y]\n elif x_collide:\n # Player is allowed to move on x-axis\n return [new_x, player.y]\n elif y_collide:\n # Player is allowed to move on y-axis\n return [player.x, new_y]\n\n return [player.x, player.y] # Player cannot move", "def checkPossibleMoves(self):\n possibleMovesArray = []\n\n for j in range(self.nrOfCars):\n minMaxChange = self.gridSize - self.length[j] + 1\n possibleMoves = []\n\n for i in range(1,minMaxChange):\n if self.checkMove(j, i) == 0:\n possibleMoves.append(i)\n else:\n break\n for i in range(1,minMaxChange):\n if self.checkMove(j, -i) == 0:\n possibleMoves.append(-i)\n else:\n break\n\n possibleMovesArray.append(possibleMoves)\n\n return possibleMovesArray", "def get_available_actions(self): \n actions = [] \n direction = [[1, 0], [0, 1]]\n for dir_ in direction:\n for point in self.points_generator(): \n dir_p = Point(*dir_)\n new_point = point + dir_p\n try:\n _ = self.game.board[new_point] \n actions.append((point, new_point))\n except OutOfBoardError:\n continue\n return actions", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def get_tiger_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_tiger():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def legal_moves(self):\n moves = \"\"\n swappable = self.swappable_positions\n empty_position = self.get_position(0)\n\n for s in swappable:\n pos_diff = empty_position[0] - s[0], empty_position[1] - s[1]\n if pos_diff[0] > 0:\n moves += \"U\"\n elif pos_diff[0] < 0:\n moves += \"D\"\n elif pos_diff[1] > 0:\n moves += \"L\"\n elif pos_diff[1] < 0:\n moves += \"R\"\n\n return moves", "def get_legal_moves(self):\n\n return self._legal_moves", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def get_move_list(self):\n return [\n tuple(x) for x in np.argwhere(self.board == HexBoard.EMPTY).tolist()\n ]", "def available_moves(self):\n available_moves = []\n for i in range(self.quadrants_count):\n quadrant_positions = self.play_area[i].available_positions()\n for p in quadrant_positions:\n position = p + i * 9\n for j in range(self.quadrants_count):\n move1 = [str(position), str(j + 1), \"l\"]\n move2 = [str(position), str(j + 1), \"r\"]\n available_moves.append(\" \".join(move1))\n available_moves.append(\" \".join(move2))\n return available_moves", "def guarded_places(self):\n guarded = []\n for x in range(8):\n for y in range(8):\n if self.squares[x][y].piece and self.squares[x][y].piece.color != self.turn:\n squares = self.squares[x][y].piece.actions(self, (x, y), True)\n if self.squares[x][y].piece.name != 'pawn': # pawns capture in different areas than they move\n guarded.extend(squares[0])\n guarded.extend(squares[1])\n return guarded", "def get_valid_moves(self):\n if self.king:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1],\n [self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n else:\n if self.player == 1:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1]]\n else:\n valid_moves = [[self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n return valid_moves", "def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def move_relative(self, shift):\n self._checkMoveRel(shift)\n\n fs = []\n for axis in shift:\n if axis == \"wavelength\":\n # cannot convert it directly to an absolute move, because\n # several in a row must mean they accumulate. So we queue a\n # special task. That also means the range check is delayed until\n # the actual position is known.\n f = self._executor.submit(self.set_wavelength_relative, shift[axis])\n fs.append(f)\n elif axis == \"slit\":\n f = self._executor.submit(self.set_slit_relative, shift[axis])\n fs.append(f)\n # TODO: handle correctly when more than one future\n return fs[-1]" ]
[ "0.6335595", "0.6219791", "0.61731637", "0.6113557", "0.6098801", "0.6082442", "0.6076151", "0.60303247", "0.59810054", "0.59727407", "0.59463936", "0.59408045", "0.5901733", "0.58617115", "0.58573633", "0.58515835", "0.58431864", "0.58416694", "0.5836999", "0.5812548", "0.5807593", "0.57783127", "0.57755226", "0.5773357", "0.5766193", "0.57612085", "0.5760016", "0.57569385", "0.5755676", "0.57555175" ]
0.6397189
0
Return the cached territory occupied by the piece.
def territory(self): cache_key = ( self.board.length, self.board.height, self.uid, self.index) if cache_key not in self.territory_cache: vector = self.compute_territory() self.territory_cache[cache_key] = vector else: vector = self.territory_cache[cache_key] return vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_territory(self):\n # Initialize the square occupancy vector of the board.\n vector = self.board.new_vector()\n\n # Mark current position as reachable.\n vector[self.index] = True\n\n # List all places reacheable by the piece from its current position.\n for x_shift, y_shift in self.movements:\n # Mark side positions as reachable if in the limit of the board.\n try:\n reachable_index = self.board.coordinates_to_index(\n self.x, self.y, x_shift, y_shift)\n except ForbiddenCoordinates:\n continue\n vector[reachable_index] = True\n\n return vector", "def get_occupied_tiles(self):\r\n occupied = np.zeros(self.searchenv.conv.num_tiles)\r\n #Convert current state (positions of agents) to tile indices\r\n tiles = self.searchenv.conv.state_to_tile(self.searchstate.positions)\r\n valid_tiles = tiles[self.searchstate.actives == 1]\r\n occupied[valid_tiles] = 1\r\n return occupied", "def get_all_pieces(self):\n occupied = []\n for pieces in self.piece_locs.values():\n occupied += pieces\n return occupied", "def occupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"occupied_cooling_setpoint\")", "def get_occupant(self):\n\t\tpass", "def use_level(self, n):\n\n # try to get cache for this level, no cache means no level\n try:\n self.tile_cache = self.cache[n]\n except KeyError:\n return None\n\n # get tile info\n info = self.get_info(n)\n if info is None:\n return None\n\n (self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info\n\n # cache partial path to level dir\n self.tile_level_dir = os.path.join(self.tile_dir, '%02d' % n)\n\n return (self.tile_size_x*self.num_tiles_x,\n self.tile_size_y*self.num_tiles_y,\n self.ppd_x, self.ppd_y)", "def get_our_tile(self, x, y):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\treturn self.our_tiles[x][y]\n\t\treturn None", "def occupied_heating_setpoint(self) -> int | None:\n return self.cluster.get(\"occupied_heating_setpoint\")", "def get_tile(self):\n return Tile.get_tile(self.get_number())", "def get_occupant(self):\n\t\treturn self.occupant", "def unoccupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"unoccupied_cooling_setpoint\")", "def getTile(self):\n return self.tile", "def get_tile(self, x, y):\n\n try:\n # if tile in cache, return it from there\n return self.tile_cache[(x,y)]\n except KeyError:\n # else not in cache: get image, cache and return it\n # exceptions are normally slow,\n # but we are reading a file if we get exception, so ...\n img_name = os.path.join(self.tile_level_dir,\n 'tile_%d_%d.png' % (x, y))\n\n# Optimization\n# removed since we *know* tiles are there, we generated them!\n# don't need to do filesystem operation.\n# maybe put back if tiles come from internet?\n# if not os.path.exists(img_name):\n# # if tile not there, use 'missing tile' file\n# img_name = os.path.join(self.tile_dir, MissingTileFilename)\n\n img = wx.Image(img_name, wx.BITMAP_TYPE_ANY)\n pic = img.ConvertToBitmap()\n self.tile_cache[(x,y)] = pic\n return pic", "def piece_coor(self):\n return self.piece_type[self.rotation]", "def get_area(self):\n raise NotImplementedError()", "def get_cache(self):\n self._topo.create_cache()\n with open('/run/geopm-service/geopm-topo-cache') as fid:\n result = fid.read()\n return result", "def get(self):\n # 8 timesteps, 6 piece types per player, 64 squares #FIXME: 1 timestep\n # 1 castling (which rooks can still castle)\n # 1 player color (1 if white, 0 if black)\n # 1 total move count\n # 1 moves without progress\n # TODO: add repetions (2): repetition count for that position (3 repitions is an autmatic draw)\n pieces = np.concatenate(self.boards)[::-1]\n pieces = np.concatenate(pieces)\n if len(pieces) == MAX_PIECE_INDEX:\n return pieces\n else:\n return np.concatenate((pieces, np.zeros(MAX_PIECE_INDEX-len(pieces), )))", "def get_tile(self, row, col):\r\n \r\n return self._cells[row][col]", "def get_tile(self, row, col):\r\n return self._grid[row][col]", "def get_tile(self, x, y):\n if x < 0 or x >= Settings.SIZE_X or y < 0 or y >= Settings.SIZE_Y:\n return MarkerType.NONE\n return self.__grid[y][x]", "def unoccupied_heating_setpoint(self) -> int | None:\n return self.cluster.get(\"unoccupied_heating_setpoint\")", "def reachable_province(self, ctx):\n return self.reachable_tiles(ctx)", "def get_tile(self, x, y):\r\n\r\n try:\r\n char = self.map[y][x]\r\n except IndexError:\r\n return {}\r\n try:\r\n return self.key[char]\r\n except KeyError:\r\n return {}", "def get_tile(self, row, col):\n return self.grid[row][col]", "def get_tile(self, row, col):\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return 0", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._cells[row][col]" ]
[ "0.6026444", "0.59270763", "0.5713847", "0.56876725", "0.5581824", "0.5523887", "0.5491418", "0.54348814", "0.54129577", "0.5392716", "0.5293671", "0.5249759", "0.5236839", "0.5229331", "0.5221467", "0.5203362", "0.51952547", "0.5169698", "0.5168036", "0.5155035", "0.51431674", "0.51128125", "0.51062506", "0.5101681", "0.5076661", "0.50596833", "0.50489426", "0.50489426", "0.50489426", "0.50348383" ]
0.6969132
0
Compute territory reachable by the piece from its current position. Returns a list of boolean flags of squares indexed linearly, for which a True means the square is reachable.
def compute_territory(self): # Initialize the square occupancy vector of the board. vector = self.board.new_vector() # Mark current position as reachable. vector[self.index] = True # List all places reacheable by the piece from its current position. for x_shift, y_shift in self.movements: # Mark side positions as reachable if in the limit of the board. try: reachable_index = self.board.coordinates_to_index( self.x, self.y, x_shift, y_shift) except ForbiddenCoordinates: continue vector[reachable_index] = True return vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def _get_rules_possibles_moves(cell, board_shape):\n return [(cell[0] + a[0], cell[1] + a[1])\n for a in [(-1, 0), (1, 0), (0, -1), (0, 1)]\n if ((0 <= cell[0] + a[0] < board_shape[0]) and (0 <= cell[1] + a[1] < board_shape[1]))]", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n x, y = state[0]\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n if not hitsWall:\n successors.append((((nextx,nexty), self.visited_corner), action, 1))\n \"*** YOUR CODE HERE ***\"\n\n return successors", "def Check(self):\n cleared = False\n while not cleared:\n for i in list(combinations([cell.Check() for cell in self.cells], 2)):\n # for i in list(combinations(zip(self.locations.x,self.locations.y,self.locations.length,self.locations.index),2)):\n x1 = i[0][0]\n y1 = i[0][1]\n r1 = i[0][2] / 2\n idx1 = i[0][3]\n x2 = i[1][0]\n y2 = i[1][1]\n r2 = i[1][2] / 2\n idx1 = i[0][3]\n idx2 = i[1][3]\n distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)\n radii = (r1 + r2) * (r1 + r2)\n if distance == radii:\n cleared = True\n elif distance > radii:\n cleared = True\n else:\n if x1 > x2 and y1 > y2:\n if (\n x1 + r1 > 0\n and x1 + r1 < self.boundaries[0]\n and y1 + r1 > 0\n and y1 + r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 + r1 / 2\n self.cells[idx1].y = y1 + r1 / 2\n elif x1 > x2 and y1 < y2:\n if (\n x1 + r1 > 0\n and x1 + r1 < self.boundaries[0]\n and y1 - r1 > 0\n and y1 - r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 + r1 / 2\n self.cells[idx1].y = y1 - r1 / 2\n elif x1 < x2 and y1 > y2:\n if (\n x1 - r1 > 0\n and x1 - r1 < self.boundaries[0]\n and y1 + r1 > 0\n and y1 + r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 - r1 / 2\n self.cells[idx1].y = y1 + r1 / 2\n else:\n if (\n x1 - r1 > 0\n and x1 - r1 < self.boundaries[0]\n and y1 - r1 > 0\n and y1 - r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 - r1 / 2\n self.cells[idx1].y = y1 - r1 / 2\n _logger.debug(\n f\"Bumped from {x1 :.2e}, {y1 :.2e} to {self.cells[idx1].x :.2e}, {self.cells[idx1].y :.2e}\"\n )\n cleared = False\n return", "def reachable_province(self, ctx):\n return self.reachable_tiles(ctx)", "def life(arr):\n\tres_arr = arr\n\tmax_x = len(arr[0]) - 1\n\tmax_y = len(arr) - 1\n\n\tfor y, y_value in enumerate(arr):\n\t\tfor x, x_value in enumerate(y_value):\n\t\t\tneighb_count = get_count_life_neighbor(arr, x, y, max_x, max_y)\n\t\t\tif x_value:\n\t\t\t\tif neighb_count < 2 or neighb_count > 3:\n\t\t\t\t\tres_arr[y][x] = False\n\t\t\telse:\n\t\t\t\tif neighb_count == 3:\n\t\t\t\t\tres_arr[y][x] = True\n\treturn res_arr", "def is_solvable(start, board):\r\n # If needed, a slower yet using less memory function can\r\n # be made with one function that multiplies visited cells by -1\r\n # and at the end corrects the whole list using abs().\r\n \r\n board = board[:] \r\n return is_solvable_new_board(start, board)", "def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves", "def is_solved(level_map):\n shape = level_map.shape\n for x in range(shape[0]):\n for y in range(shape[1]):\n i = (x, y)\n tile = level_map[i]\n if tile == 0:\n continue\n left_index, up_index, right_index, down_index = get_direction_indices(i)\n if has_connection_left(tile) and \\\n (tile_is_out_of_borders(left_index, shape) or not has_connection_right(level_map[left_index])):\n return False\n if has_connection_up(tile) and \\\n (tile_is_out_of_borders(up_index, shape) or not has_connection_down(level_map[up_index])):\n return False\n if has_connection_right(tile) and \\\n (tile_is_out_of_borders(right_index, shape) or not has_connection_left(level_map[right_index])):\n return False\n if has_connection_down(tile) and \\\n (tile_is_out_of_borders(down_index, shape) or not has_connection_up(level_map[down_index])):\n return False\n return True", "def neighboursContains(board, row, column, piece):\n\n neighboursList = []\n for rowIndex, columnIndex in BoardUtils.DIRECTIONS.values():\n if 0 <= row + rowIndex < len(board) and 0 <= column + columnIndex < len(board[0]):\n neighboursList.append((row + rowIndex, column + columnIndex))\n\n for rowIndex, columnIndex in neighboursList:\n if board[rowIndex][columnIndex] == piece:\n return True\n return False", "def reachable(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"reachable: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"reachable: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n visited = copy.deepcopy(maze)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n stack = [] # Define our stack of \"fringe\" squares\n stack.append(start) # Push the start square onto our stack\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(stack)): # While there exists items in the stack\n current = stack.pop() # Pop the last element\n\n if (current == goal):\n return True # If current is the goal, we found it!\n\n current_i, current_j = current # Unpack the current pair\n\n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n if (not visited[possible[0]][possible[1]]):\n stack.append(possible)\n visited[possible[0]][possible[1]] = 1\n return False # If the while loop goes out, and the stack is empty, then there is no possible path", "def getSuccessors(self, state):\n\n successors = []\n top, right = self.walls.height - 2, self.walls.width - 2\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n x, y = state[0]\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n La función sucesores funciona de la siguiente manera:\n * Si la acción no hace que choque con una pared, entonces...\n - Defino nextState como las coordenadas de lo que me da la acción\n - Creo una copia de la grid de true/false que tiene el estado, para así no modificar la original\n - A esta copia le actualizo la información, si el sucesor es una de las esquinas. Tengo que realizar\n esto manualmente dada la definición de mi grid de booleanos.\n - Creo una nueva variable que es una tupla en la que inserto las nuevas coordenadas y la grid actualizada\n - La añado a la lista de sucesores\n \"\"\"\n if not hitsWall:\n nextState = (nextx, nexty) # Defino la tupla que será la posición del sucesor\n nextFood = state[1].copy() # Hago una copia para así poder modificarla tranquilamente\n if nextState == (1, 1): # Manualmente miro si es alguna de las esquinas\n nextFood[1][0] = False # Si lo es, actualizo de true a false el elemento correspondiente\n if nextState == (1, top):\n nextFood[0][0] = False\n if nextState == (right, 1):\n nextFood[1][1] = False\n if nextState == (right, top):\n nextFood[0][1] = False\n nextStateFood = (nextState, nextFood) # Lo añado como tupla\n cost = 1 # Por orden del enunciado, el coste es siempre 1\n successors.append((nextStateFood, action, cost)) # Lo añado a la lista de sucesores\n self._expanded += 1\n return successors", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def successors(state):\n free_coordinates = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == '_':\n free_coordinates.append([i, j])\n\n return free_coordinates", "def can_reach_square(self, start, end):\n raise NotImplementedError", "def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors", "def game_of_life():\n # 3x3 neighbourhood\n offsets = [[(y, x) for y in range(-1, 2)] for x in range(-1, 2)]\n\n # Create mappings\n mappings = {}\n for i in range(2 ** 9):\n\n # Determine the initial state (key)\n key = f\"{bin(i)[2:]:0>9}\" # As binary string\n key = tuple(k == \"1\" for k in key) # As tuple of bools\n key = tuple(key[i * 3:i * 3 + 3] for i in range(3)) # Reshape into 2D grid\n\n # Alive counts\n centre = key[1][1]\n others = sum(sum(row) for row in key) - centre\n\n # Skip if state does not evaluate to True\n if centre:\n if others not in (2, 3):\n continue\n\n else:\n if others != 3:\n continue\n\n mappings[key] = True\n\n return Mapping2DRuleset(mappings, offsets)", "def check_path(self, cur_pos, new_pos, board, state):\n\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n cannon_pieces = [Cannon('BLUE'), Cannon('RED')]\n \n # Ensures the range is always in the right order\n if new_row > cur_row: \n ran_r = range(cur_row + 1, new_row, 1)\n elif cur_row > new_row:\n ran_r = range(cur_row - 1, new_row, -1)\n \n elif new_col > cur_col:\n ran_c = range(cur_col + 1, new_col, 1)\n elif cur_col > new_col:\n ran_c = range(cur_col - 1, new_col, -1)\n else:\n return False\n \n # Checking if the movement is left or right is legal\n if new_row == cur_row:\n print(\"it's in the new_row == cur_row\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n print(counter)\n for col_spot in ran_c:\n if board[cur_row][col_spot] is not None:\n counter += 1\n\n if counter == 0: \n print(\"jump!\")\n return True\n \n # Checking if the movement vertical is legal\n if new_col == cur_col:\n print(\"it's in the new_col == cur_col\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n for row_spot in ran_r:\n if board[row_spot][cur_col] is not None:\n counter += 1\n print(board[row_spot][cur_col])\n print(counter)\n if counter == 0:\n print(\"jump!\")\n return True", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n # Checking diagonals in the palace\n if cur_pos and new_pos in self._special:\n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking all possible diagonals\n elif new_row == cur_row + 1 and new_col == cur_col + 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row - 1 and new_col == cur_col - 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row + 2 and new_col == cur_col + 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_col - 2 and new_row == cur_col - 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True \n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n else:\n return False\n else:\n return False", "def check_status(self, x: int, y: int, piece: int) -> typing.Union[int, None]:\n winnable = piece * self.continuous_pieces_to_win\n\n # Horizontally continuous pieces\n if np.sum(self.board_values[x, :]) == winnable:\n return piece\n\n # Vertically continuous pieces\n if np.sum(self.board_values[:, y]) == winnable:\n return piece\n\n # Diagonally continuous pieces\n if x == y or (x == self.size - y - 1):\n upper_diagonal_values = np.sum([self.board_values[self.board_length - 1 - i, i] for i in range(self.board_width)])\n lower_diagonal_values = np.sum([self.board_values[i, i] for i in range(self.board_width)])\n if upper_diagonal_values == winnable or lower_diagonal_values == winnable:\n return piece\n\n # Game board is full and no player wins\n if np.all(self.tiles_taken):\n return 0\n\n # Game is not over\n return None", "def isSolvable(self):\n tiles = []\n for i in range(len(self.tiles)):\n for j in range(len(self.tiles)):\n if self.tiles[j][1] * 3 + self.tiles[j][0] + 1 == i + 1:\n tiles.append(j + 1)\n count = 0\n for i in range(len(tiles) - 1):\n for j in range(i + 1, len(tiles)):\n if tiles[i] > tiles[j] and tiles[i] != 9:\n count += 1\n return count % 2 == 0 and count != 0", "def is_solved(self):\n colors = ['green', 'blue', 'red', 'orange', 'white', 'yellow']\n for row in range(3):\n for column in range(3):\n if self.front[row][column] != colors[0]:\n return False\n for row in range(3):\n for column in range(3):\n if self.back[row][column] != colors[1]:\n return False\n for row in range(3):\n for column in range(3):\n if self.right[row][column] != colors[2]:\n return False\n for row in range(3):\n for column in range(3):\n if self.left[row][column] != colors[3]:\n return False\n for row in range(3):\n for column in range(3):\n if self.up[row][column] != colors[4]:\n return False\n for row in range(3):\n for column in range(3):\n if self.down[row][column] != colors[5]:\n return False\n return True", "def inner(pos, camefrom):\r\n\t\tlabyrinth[pos[0]][pos[1]] = VISITED\r\n\t\tif pos == GOAL:\r\n\t\t\treturn [pos], True\r\n\t\tfor neighbour in neighbours(pos):\r\n\t\t\tif neighbour != camefrom and is_inside(neighbour):\r\n\t\t\t\tif labyrinth[neighbour[0]][neighbour[1]] != BLOCKED and labyrinth[neighbour[0]][neighbour[1]] != VISITED:\r\n\t\t\t\t\tway, success = inner(neighbour, pos)\r\n\t\t\t\t\tif success == True:\r\n\t\t\t\t\t\treturn [pos]+way, True\r\n\t\treturn None, False", "def get_legal_moves(self, color):\n moves = set() # stores the legal moves.\n color = max(0, color)\n\n # Get all the squares with pieces of the given color.\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n newmoves = self.get_moves_for_square((x,y))\n moves.update(newmoves)\n return list(moves)", "def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:\n # Points will be added to this list if they havent been traversed yet\n possible_points = dict()\n\n # -- NORTH\n p_pt = north(pt)\n # This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.\n if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"N\"\n\n # -- EAST\n p_pt = east(pt)\n if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"E\"\n\n # -- SOUTH\n p_pt = south(pt)\n if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"S\"\n\n # -- WEST\n p_pt = west(pt)\n if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"W\"\n\n return possible_points", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "def goal_test(state): \n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] != i*size + j:\n return False \n return True", "def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final" ]
[ "0.5903241", "0.5804372", "0.57658273", "0.57489616", "0.5739321", "0.5720743", "0.5670181", "0.5648418", "0.5639911", "0.5635544", "0.5632291", "0.5622091", "0.5598855", "0.559796", "0.5573405", "0.55529225", "0.5552635", "0.5536598", "0.55041766", "0.55024284", "0.54945725", "0.5486781", "0.5478534", "0.5454685", "0.54447544", "0.5437914", "0.54351544", "0.5425267", "0.54134375", "0.54091257" ]
0.6670306
0
Generate M3U file for the given software into out_dir
def generate(software, out_dir, suffix, dry_run): m3u_filename = software.name + (suffix if suffix else '') + '.m3u' if not dry_run: m3u_fd = open(os.path.join(out_dir, m3u_filename), 'w') for i in software.images(): image_rel_path = os.path.relpath(i.path, out_dir) if not dry_run: m3u_fd.write((image_rel_path + '\n')) if not dry_run: m3u_fd.close() logging.info('Created M3U file for %s (%i image files)', software.name, len(software.images()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_output_matrix_files(self, year, max_zone_id):\r\n from opus_emme2.travel_model_output import TravelModelOutput\r\n tm_output = TravelModelOutput(self.emme_cmd)\r\n year_config = self.config['travel_model_configuration'][year]\r\n for x in 1,2,3:\r\n if \"bank%i\" % x in year_config['matrix_variable_map']:\r\n bank_dir = self.get_emme2_dir(year, \"bank%i\" % x)\r\n for matrix_name in year_config['matrix_variable_map'][\"bank%i\" % x].keys():\r\n tm_output._get_matrix_into_data_file(matrix_name, max_zone_id, bank_dir, \"%s_one_matrix.txt\" % matrix_name)", "def create_m3u_file(\n file_name: str,\n song_list: List[Song],\n template: str,\n file_extension: str,\n short: bool = False,\n) -> str:\n\n m3u_content = create_m3u_content(song_list, template, file_extension, short)\n\n with open(file_name, \"w\", encoding=\"utf-8\") as m3u_file:\n m3u_file.write(m3u_content)\n\n return m3u_content", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def process_m4(args, dirname, names):\n\n global processed_count\n global nonprocessed_count\n\n if len(args) < 2:\n raise Exception(\"in or out path not configured, see example in main()\")\n\n if not args[0] or not args[1]:\n raise Exception(\"in or out path not configured, see example in main()\")\n\n inputdir = args[0]\n outputdir = args[1]\n\n #print \"dir: \" + dirname\n if dirname[-3:] == \"CVS\":\n return\n \n regex = re.compile(\"(.*)(%s)(.*)\" % inputdir)\n mobj = regex.search(dirname)\n if mobj:\n outputdir = outputdir + mobj.group(3)\n else:\n raise Exception(\"no mobj?\")\n \n if not os.path.exists(outputdir):\n os.mkdir(outputdir)\n if verbose_mode:\n print \"Created directory %s\" % outputdir\n \n for name in names:\n path = os.path.join(dirname, name)\n outpath = os.path.join(outputdir, name)\n if os.path.isdir(path):\n continue\n \n if name[-5:] != \".html\":\n cmd = \"%s %s %s\" % (CPPATH, path, outpath)\n ret = os.system(cmd)\n if ret:\n print \"cmd failed: %s\" % cmd\n else:\n nonprocessed_count += 1\n if verbose_mode:\n print \"Added %s\" % outpath\n else:\n cmd = \"%s -P <%s >%s\" % (M4PATH, path, outpath)\n ret = os.system(cmd)\n if ret:\n print \"cmd failed: %s\" % cmd\n else:\n processed_count += 1\n if verbose_mode:\n print \"Processed %s\" % outpath", "def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()", "def write_setup(project_name, root_dir):\r\n setup_path = get_file_path(root_dir, None, \"setup.py\") #Get the path for setup.py\r\n setup_content = get_setup_text(project_name)\r\n \r\n setup_file = open(setup_path, 'w')\r\n setup_file.write(setup_content)\r\n setup_file.close()\r\n print_file(setup_path, \" +++\")", "def Build(self, out_file):\n raise NotImplementedError", "def main(quiet=False):\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n newpath = r'%s/models' % os.getcwdu()\n if not os.path.exists(newpath): os.makedirs(newpath)\n newpath = r'%s/out' % os.getcwdu()\n if not os.path.exists(newpath): os.makedirs(newpath)\n existing = sorted(os.listdir('%s/%s' % (os.getcwdu(), 'models'))) \n\n urls = [\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/parameters.fits.gz',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2J.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2H.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2K.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I1.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I2.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I3.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I4.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M1.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M2.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M3.fits',\n 'http://caravan.astro.wisc.edu/protostars/files/extinction_law.tar.gz'\n ]\n file_names = [\n 'models/parameters.fits.gz',\n 'models/2J.fits',\n 'models/2H.fits',\n 'models/2K.fits',\n 'models/I1.fits',\n 'models/I2.fits',\n 'models/I3.fits',\n 'models/I4.fits',\n 'models/M1.fits',\n 'models/M2.fits',\n 'models/M3.fits',\n 'models/extinction_law.tar.gz']\n\n for i in range(len(urls)):\n if not os.path.isfile(file_names[i]):\n f = open(file_names[i], 'wb')\n f.write(urllib2.urlopen(urls[i]).read())\n f.close()\n print('Downloaded %s from %s' % (file_names[i],urls[i]), file=output_stream)\n\n if not os.path.isfile('modesl/extinction_law.ascii'):\n f = tarfile.open('models/extinction_law.tar.gz', 'r:gz')\n try: f.extractall()\n finally: f.close()", "def create_out_dir_name(params):\n\n current_timestamp = timestamp()\n out_dir = os.path.join('out', current_timestamp)\n return out_dir", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def output_file_name_maker(args):\n log.debug(\"Entering output_file_name_maker()\")\n path = os.getcwd() + '/out_files/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n if args.output is None:\n out_file_name = path + args.input[:-4] + '_' + args.type + '_' + args.layer\n else:\n out_file_name = path + args.output\n\n log.debug(\"Exiting output_file_name_maker()\")\n return out_file_name", "def gen_dtu_mvs_path(dtu_data_folder, mode='training'):\n sample_list = []\n \n # parse camera pairs\n cluster_file_path = dtu_data_folder + '/Cameras/pair.txt'\n cluster_list = open(cluster_file_path).read().split()\n\n # 3 sets\n training_set = [2, 6, 7, 8, 14, 16, 18, 19, 20, 22, 30, 31, 36, 39, 41, 42, 44,\n 45, 46, 47, 50, 51, 52, 53, 55, 57, 58, 60, 61, 63, 64, 65, 68, 69, 70, 71, 72,\n 74, 76, 83, 84, 85, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\n 101, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 119, 120,\n 121, 122, 123, 124, 125, 126, 127, 128]\n validation_set = [3, 5, 17, 21, 28, 35, 37, 38, 40, 43, 56, 59, 66, 67, 82, 86, 106, 117]\n evaluation_set = [1, 4, 9, 10, 11, 12, 13, 15, 23, 24, 29, 32, 33, 34, 48, 49, 62, 75, 77, \n 110, 114, 118]\n\n # for each dataset\n data_set = []\n if mode == 'training':\n data_set = training_set\n elif mode == 'validation':\n data_set = validation_set\n elif mode == 'evaluation':\n data_set = evaluation_set\n\n # for each dataset\n for i in data_set:\n\n image_folder = os.path.join(dtu_data_folder, ('Rectified/scan%d' % i))\n cam_folder = os.path.join(dtu_data_folder, 'Cameras')\n depth_folder = os.path.join(dtu_data_folder, ('Depths/scan%d' % i))\n\n if mode == 'training':\n # for each lighting\n for j in range(0, 7):\n # for each reference image\n for p in range(0, int(cluster_list[0])):\n paths = []\n # ref image\n ref_index = int(cluster_list[22 * p + 1])\n ref_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))\n ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))\n paths.append(ref_image_path)\n paths.append(ref_cam_path)\n # view images\n for view in range(FLAGS.view_num - 1):\n view_index = int(cluster_list[22 * p + 2 * view + 3])\n view_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))\n view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))\n paths.append(view_image_path)\n paths.append(view_cam_path)\n # depth path\n depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))\n paths.append(depth_image_path)\n sample_list.append(paths)\n else:\n # for each reference image\n j = 5\n for p in range(0, int(cluster_list[0])):\n paths = []\n # ref image\n ref_index = int(cluster_list[22 * p + 1])\n ref_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))\n ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))\n paths.append(ref_image_path)\n paths.append(ref_cam_path)\n # view images\n for view in range(FLAGS.view_num - 1):\n view_index = int(cluster_list[22 * p + 2 * view + 3])\n view_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))\n view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))\n paths.append(view_image_path)\n paths.append(view_cam_path)\n # depth path\n depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))\n paths.append(depth_image_path)\n sample_list.append(paths)\n \n return sample_list", "def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n pass\n os.makedirs(OUTDIR, exist_ok=True)", "def generate_output_file(data, extension, headers):\n output_data = _replace_boolean(data)\n output_name = _generate_output_name(extension)\n with open(output_name, 'a', newline='') as file:\n _file_writer(file, extension, output_data, headers)", "def write_file(country, season, final, var):\n if var=='label':\n path='../results/kmeans/'\n elif var=='cluster':\n path='../results/sequence_analysis/'\n country_ = country.lower()\n season_ = season.replace('-','_')\n file_name=country_+\"_\"+season_\n newpath=path+file_name+'/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n f = open(newpath+file_name+\".txt\",\"w\") \n f.write(final)\n f.close()", "def generate_file(material_id):\n apr=get_doc_from_MP(material_id)\n mat_list=generate_matrix(apr)\n formu=POSCAR_title(apr)\n cell_for=generate_cell_formula(apr)\n needed_dos=generate_dos_str(material_id)\n revise_dos=dos_into_string(needed_dos)\n ordered_list=generate_ordered_list(revise_dos)\n my_ordered_elements=generate_ordered_elements(revise_dos,ordered_list)\n my_ordered_numbers=generate_ordered_numbers(revise_dos,ordered_list,cell_for)\n generate_POSCAR(formu,mat_list,my_ordered_elements,my_ordered_numbers,revise_dos)", "def generate_fmu_from_backend(backend: str, output_path):\n\n backend_manifest = toml.loads(\n pkg_resources.resource_string(__name__, \"resources/backends.toml\").decode()\n )[\"backend\"][backend]\n\n if \"files\" not in backend_manifest:\n raise RuntimeError(\"'files' attribute is not defined in the configuration\")\n\n # create phyiscal files in tmpdir, such that the copy/mv semantics can be implemented with function of standard lib\n with TemporaryDirectory() as tmpdir_resources, TemporaryDirectory() as tmpdir_fmu:\n tmpdir_resources = Path(tmpdir_resources)\n tmpdir_fmu = Path(tmpdir_fmu)\n\n dirs_to_output = {}\n files_to_output = {}\n\n # dump all resources into a temporary directory\n # while this is not very effective, it ensures a file structure identical to the resources directory.\n # concretely it makes it easier to check which paths refer to directories or files\n for src in list_resource_files(\"resources\"):\n file_out = tmpdir_resources / src\n makedirs(file_out.parent, exist_ok=True)\n\n stream = pkg_resources.resource_string(__name__, f\"{src}\")\n with open(file_out, \"wb\") as f:\n f.write(stream)\n\n # copy the files needed for the particular backend\n\n if \"files\" in backend_manifest:\n for src, dst in backend_manifest[\"files\"]:\n files_to_output = {\n **files_to_output,\n **{src: dst},\n }\n\n if \"dirs\" in backend_manifest:\n for src, dst in backend_manifest[\"dirs\"]:\n dirs_to_output = {\n **dirs_to_output,\n **{src: dst},\n }\n\n for src, dst in files_to_output.items():\n\n src = tmpdir_resources / \"resources\" / src\n\n if not src.exists():\n raise FileNotFoundError(f\"The file {src} does not any known resource\")\n\n if not src.is_file():\n raise FileNotFoundError(\n f\"The path {src} exists, but does not refer to a file\"\n )\n\n dst = tmpdir_fmu / dst\n makedirs(dst.parent, exist_ok=True)\n copy(src, dst)\n\n for src, dst in dirs_to_output.items():\n\n src = tmpdir_resources / \"resources\" / src\n dst = tmpdir_fmu / dst\n makedirs(dst.parent, exist_ok=True)\n copytree(src, dst)\n\n shutil.copytree(tmpdir_fmu, output_path)", "def makeLibrary(self):\n #------------------------------------------ Instance for the output file\n outputFile = open(\"%s/%s\" % (self.sceneryPath,self.libTxtFileName),\"w\")\n #------------------------------------------------------ write the header\n for line in self.header:\n outputFile.write(\"%s\\n\" % (line))\n #------------------------------------------------- Loop over all folders\n packageContent = os.walk(self.sceneryPath)\n for folder in packageContent:\n for fileName in folder[2]:\n fileType = fileName.split(\".\")[-1]\n if fileType in self.objectTypes:\n realPath = folder[0][len(self.sceneryPath)+1:].replace(\"\\\\\",\"/\")\n filePath = \"%s/%s\" % (realPath,fileName)\n print filePath\n outputFile.write(\"EXPORT %s%s %s%s\\n\" % (self.libPrefix,filePath,self.realPathPrefix,filePath))\n outputFile.close()", "def generate_metadata(self):\n\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n\n if self.options.profile == 'mercator':\n\n south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)\n north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)\n south, west = max(-85.05112878, south), max(-180.0, west)\n north, east = min(85.05112878, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate googlemaps.html\n if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):\n with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:\n f.write(self.generate_googlemaps().encode('utf-8'))\n\n # Generate openlayers.html\n if self.options.webviewer in ('all', 'openlayers'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):\n with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:\n f.write(self.generate_openlayers().encode('utf-8'))\n\n # Generate leaflet.html\n if self.options.webviewer in ('all', 'leaflet'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):\n with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:\n f.write(self.generate_leaflet().encode('utf-8'))\n\n elif self.options.profile == 'geodetic':\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n south, west = max(-90.0, south), max(-180.0, west)\n north, east = min(90.0, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all', 'openlayers'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):\n with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:\n f.write(self.generate_openlayers().encode('utf-8'))\n\n elif self.options.profile == 'raster':\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all', 'openlayers'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):\n with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:\n f.write(self.generate_openlayers().encode('utf-8'))\n\n # Generate tilemapresource.xml.\n if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):\n with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:\n f.write(self.generate_tilemapresource().encode('utf-8'))\n\n if self.kml:\n # TODO: Maybe problem for not automatically generated tminz\n # The root KML should contain links to all tiles in the tminz level\n children = []\n xmin, ymin, xmax, ymax = self.tminmax[self.tminz]\n for x in range(xmin, xmax+1):\n for y in range(ymin, ymax+1):\n children.append([x, y, self.tminz])\n # Generate Root KML\n if self.kml:\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):\n with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:\n f.write(generate_kml(\n None, None, None, self.tileext, self.tilesize, self.tileswne,\n self.options, children\n ).encode('utf-8'))", "def write_uem(uemf, uem, n_digits=3):\n with open(uemf, 'wb') as f:\n for file_id in sorted(iterkeys(uem)):\n for onset, offset in sorted(uem[file_id]):\n line = ' '.join([file_id,\n '1',\n format_float(onset, n_digits),\n format_float(offset, n_digits)\n ])\n f.write(line.encode('utf-8'))\n f.write(b'\\n')", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def writeAVUToXMLFile(outfile, target, attribute, value, unit=None):\n outfile.write('\\t<AVU>\\n')\n outfile.write(\"\\t\\t<Target>%s</Target>\\n\" % (escape(target),))\n outfile.write(\"\\t\\t<Attribute>%s</Attribute>\\n\" % (escape(attribute),) )\n outfile.write(\"\\t\\t<Value>%s</Value>\\n\" % (escape(value),) )\n if unit:\n outfile.write(\"\\t\\t<Unit>%s</Unit>\\n\" % (unit,) )\n else:\n outfile.write('\\t\\t<Unit />\\n')\n outfile.write('\\t</AVU>\\n')", "def setup(outpath):\n time = datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\n temp = os.path.join(outpath, \"data\", \"temp\")\n result = os.path.join(outpath, \"results\")\n logs = os.path.join(outpath, \"logs\")\n download = os.path.join(outpath, \"data\", \"download\")\n chromsizes = os.path.join(outpath,\n \"data\", \"chromsizes\")\n if not os.path.exists(download):\n os.makedirs(download)\n if not os.path.exists(temp):\n os.makedirs(temp)\n if not os.path.exists(result):\n os.makedirs(result)\n if not os.path.exists(logs):\n os.makedirs(logs)\n if not os.path.exists(chromsizes):\n os.makedirs(chromsizes)\n\n logname = time + \"_tfanalyzer.log\"\n logfile = os.path.join(logs, logname)\n logging.basicConfig(filename=logfile, level=logging.INFO)\n return logfile", "def get_output_file(run, lens_chunk, source_tilename):\n d=get_output_dir(run, lens_chunk)\n fname=\"%(run)s-lens-%(lens_chunk)06d-src-%(source_tilename)s.dat\"\n fname=fname % {'run':run,\n 'lens_chunk':lens_chunk,\n 'source_tilename':source_tilename}\n\n return os.path.join(d, fname)", "def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)", "def creation_srcmdl(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,outputfile,emin,emax):\n\tf_liste_sour=\"a.txt\"\n\n\tlect_ca(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,f_liste_sour,name)\n\tXML_EC_PL(name, f_liste_sour, outputfile, emin,emax)\n\tos.system(\"rm -rf a.txt\")", "def build_msms():\r\n\r\n # Prepare include file with dynamic data\r\n f = open(os.path.join(GME_ROOT, \"Install\", \"GME_dyn.wxi\"), 'w')\r\n print >> f, \"<!-- DO NOT EDIT THIS FILE. WILL BE REGENERATED BY THE BUILD SCRIPTS -->\"\r\n print >> f, \"<Include>\"\r\n print >> f, \" <?define GUIDSTRMETAGME='%s' ?>\" % (tools.query_GUID(mta_for_xmp(METAGME_XMP)))\r\n print >> f, \" <?define GUIDSTRHFSM='%s' ?>\" % (tools.query_GUID(mta_for_xmp(HFSM_XMP)))\r\n print >> f, \" <?define GUIDSTRSF='%s' ?>\" % (tools.query_GUID(mta_for_xmp(SF_XMP)))\r\n print >> f, \" <?define GUIDSTRUML='%s' ?>\" % (tools.query_GUID(mta_for_xmp(UML_XMP)))\r\n print >> f, \"</Include>\"\r\n f.close()\r\n\r\n import glob\r\n sources = [f for f in glob.glob(os.path.join(GME_ROOT, \"Install\", \"*.wxs\")) if os.path.basename(f) not in ('GME.wxs', 'GME_bundle.wxs')]\r\n if prefs['arch'] == 'x64':\r\n sources.remove(os.path.join(GME_ROOT, \"Install\", \"GME_paradigms.wxs\"))\r\n for file_ in sources:\r\n extras = []\r\n if os.path.basename(file_) == 'GME_paradigms.wxs':\r\n extras = glob.glob(os.path.join(GME_ROOT, \"Install\", \"PIA*/*.wxi\"))\r\n tools.build_WiX([file_] + extras)", "def generate_output(dataset_path, dataset_name, dest): #keep\n def func_name_extractor(x):\n x = os.path.basename(x)\n return x\n\n binaries = list(os.scandir(dataset_path))\n import numpy as np\n np.random.seed(42)\n np.random.shuffle(binaries)\n train_output = open(os.path.join(dataset_path, dataset_name + \"_train_output.txt\"), \"w\")\n test_output = open(os.path.join(dataset_path, dataset_name + \"_test_output.txt\"), \"w\")\n val_output = open(os.path.join(dataset_path, dataset_name + \"_val_output.txt\"), \"w\")\n mapper = dict()\n all_funcs = set()\n for i, entry in enumerate(binaries):\n funcs = list(glob(f\"{entry.path}/*\"))\n all_funcs.update(funcs)\n for func in funcs:\n func_name = func_name_extractor(func)\n func_name = func_name.split(\"_\")\n for label in func_name:\n if label not in mapper:\n mapper[label] = []\n mapper[label].append(func)\n\n well_named_funcs = set()\n popular_names = filter(lambda x: len(x[1]) >= 3, mapper.items())\n\n count_func_names = open(os.path.join(dataset_path, \"count_func_names.txt\"), \"w\")\n for name, name_funcs in mapper.items():\n line= name + \" \" + str(len(name_funcs)) + \"\\n\"\n count_func_names.write(line)\n\n\n names_hists = {name: {'free': len(name_funcs), 'train': 0, 'val': 0, 'test': 0} for name, name_funcs in popular_names}\n for partial in map(lambda x: x[1], filter(lambda x: len(x[1]) >= 3, mapper.items())):\n well_named_funcs.update(partial)\n well_named_funcs = list(well_named_funcs)\n\n # generate output\n np.random.shuffle(well_named_funcs)\n print(f\"{len(all_funcs)} functions, {len(well_named_funcs)} functions with a name that contains a common word\")\n # print(\"choosing 250 functions for test/validation\")\n\n global_counters = {'train': 0, 'val': 0, 'test': 0}\n less_than_th = 0\n less_than_five = 0\n less_than_8 = 0\n for i, func in enumerate(well_named_funcs):\n func_name_parts = func_name_extractor(func).split(\"_\") \n print_name = gen_shared_name(names_hists, func_name_parts)\n names_hists, dest = set_decide(names_hists, print_name, global_counters)\n global_counters[dest] += 1\n print_name = \"|\".join(print_name) \n if dest == 'train':\n output = train_output\n elif dest == 'test':\n output = test_output\n else:\n output = val_output\n\n try:\n with open(func, \"r\") as f:\n for line in f:\n line = line.split(\" \")\n line[0] = print_name\n line = \" \".join(line)\n line = line_process(line)\n m = len(line.split(\" \")[1].split(\",\")[1].split(\"|\"))\n if \"fp_const\" not in line:\n if m < 1000:\n less_than_th += 1 \n if m < 800:\n less_than_8 += 1\n if m < 500:\n less_than_five += 1\n train_output.write(line)\n except:\n pass\n print(\"num of lines with line less than 1000 is \", less_than_th)\n print(\"num of lines with line less than 800 is \", less_than_8)\n print(\"num of lines with line less than 500 is \", less_than_five)\n train_output.close()\n test_output.close()\n val_output.close()", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])" ]
[ "0.5642974", "0.5558042", "0.5551027", "0.5361542", "0.53559524", "0.5301094", "0.5252762", "0.5241855", "0.5204623", "0.5158686", "0.51586396", "0.5136852", "0.51365227", "0.51075536", "0.50907314", "0.5074185", "0.50671935", "0.50445044", "0.5027117", "0.5024382", "0.50210035", "0.50138086", "0.50049096", "0.49969366", "0.49948904", "0.4991258", "0.49833345", "0.49831536", "0.4975265", "0.4965941" ]
0.8317543
0
Generate M3U file for the list of softwares into out_dir
def generate_all(softwares, out_dir, suffix, dry_run): if not dry_run: if not out_dir.exists(): out_dir.mkdir(parents=True) multi_images_softwares = (x for x in softwares if x.nb_images() > 1) for i in multi_images_softwares: try: generate(i, out_dir, suffix, dry_run) except UnicodeEncodeError: logging.warning("Unicode error while processing %s", ascii(i.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate(software, out_dir, suffix, dry_run):\n m3u_filename = software.name + (suffix if suffix else '') + '.m3u'\n\n if not dry_run:\n m3u_fd = open(os.path.join(out_dir, m3u_filename), 'w')\n\n for i in software.images():\n image_rel_path = os.path.relpath(i.path, out_dir)\n\n if not dry_run:\n m3u_fd.write((image_rel_path + '\\n'))\n\n if not dry_run:\n m3u_fd.close()\n logging.info('Created M3U file for %s (%i image files)', \n software.name, len(software.images()))", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def create_m3u_file(\n file_name: str,\n song_list: List[Song],\n template: str,\n file_extension: str,\n short: bool = False,\n) -> str:\n\n m3u_content = create_m3u_content(song_list, template, file_extension, short)\n\n with open(file_name, \"w\", encoding=\"utf-8\") as m3u_file:\n m3u_file.write(m3u_content)\n\n return m3u_content", "def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()", "def main(quiet=False):\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n newpath = r'%s/models' % os.getcwdu()\n if not os.path.exists(newpath): os.makedirs(newpath)\n newpath = r'%s/out' % os.getcwdu()\n if not os.path.exists(newpath): os.makedirs(newpath)\n existing = sorted(os.listdir('%s/%s' % (os.getcwdu(), 'models'))) \n\n urls = [\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/parameters.fits.gz',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2J.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2H.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2K.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I1.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I2.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I3.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I4.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M1.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M2.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M3.fits',\n 'http://caravan.astro.wisc.edu/protostars/files/extinction_law.tar.gz'\n ]\n file_names = [\n 'models/parameters.fits.gz',\n 'models/2J.fits',\n 'models/2H.fits',\n 'models/2K.fits',\n 'models/I1.fits',\n 'models/I2.fits',\n 'models/I3.fits',\n 'models/I4.fits',\n 'models/M1.fits',\n 'models/M2.fits',\n 'models/M3.fits',\n 'models/extinction_law.tar.gz']\n\n for i in range(len(urls)):\n if not os.path.isfile(file_names[i]):\n f = open(file_names[i], 'wb')\n f.write(urllib2.urlopen(urls[i]).read())\n f.close()\n print('Downloaded %s from %s' % (file_names[i],urls[i]), file=output_stream)\n\n if not os.path.isfile('modesl/extinction_law.ascii'):\n f = tarfile.open('models/extinction_law.tar.gz', 'r:gz')\n try: f.extractall()\n finally: f.close()", "def create_output_matrix_files(self, year, max_zone_id):\r\n from opus_emme2.travel_model_output import TravelModelOutput\r\n tm_output = TravelModelOutput(self.emme_cmd)\r\n year_config = self.config['travel_model_configuration'][year]\r\n for x in 1,2,3:\r\n if \"bank%i\" % x in year_config['matrix_variable_map']:\r\n bank_dir = self.get_emme2_dir(year, \"bank%i\" % x)\r\n for matrix_name in year_config['matrix_variable_map'][\"bank%i\" % x].keys():\r\n tm_output._get_matrix_into_data_file(matrix_name, max_zone_id, bank_dir, \"%s_one_matrix.txt\" % matrix_name)", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def generate_fmu_from_backend(backend: str, output_path):\n\n backend_manifest = toml.loads(\n pkg_resources.resource_string(__name__, \"resources/backends.toml\").decode()\n )[\"backend\"][backend]\n\n if \"files\" not in backend_manifest:\n raise RuntimeError(\"'files' attribute is not defined in the configuration\")\n\n # create phyiscal files in tmpdir, such that the copy/mv semantics can be implemented with function of standard lib\n with TemporaryDirectory() as tmpdir_resources, TemporaryDirectory() as tmpdir_fmu:\n tmpdir_resources = Path(tmpdir_resources)\n tmpdir_fmu = Path(tmpdir_fmu)\n\n dirs_to_output = {}\n files_to_output = {}\n\n # dump all resources into a temporary directory\n # while this is not very effective, it ensures a file structure identical to the resources directory.\n # concretely it makes it easier to check which paths refer to directories or files\n for src in list_resource_files(\"resources\"):\n file_out = tmpdir_resources / src\n makedirs(file_out.parent, exist_ok=True)\n\n stream = pkg_resources.resource_string(__name__, f\"{src}\")\n with open(file_out, \"wb\") as f:\n f.write(stream)\n\n # copy the files needed for the particular backend\n\n if \"files\" in backend_manifest:\n for src, dst in backend_manifest[\"files\"]:\n files_to_output = {\n **files_to_output,\n **{src: dst},\n }\n\n if \"dirs\" in backend_manifest:\n for src, dst in backend_manifest[\"dirs\"]:\n dirs_to_output = {\n **dirs_to_output,\n **{src: dst},\n }\n\n for src, dst in files_to_output.items():\n\n src = tmpdir_resources / \"resources\" / src\n\n if not src.exists():\n raise FileNotFoundError(f\"The file {src} does not any known resource\")\n\n if not src.is_file():\n raise FileNotFoundError(\n f\"The path {src} exists, but does not refer to a file\"\n )\n\n dst = tmpdir_fmu / dst\n makedirs(dst.parent, exist_ok=True)\n copy(src, dst)\n\n for src, dst in dirs_to_output.items():\n\n src = tmpdir_resources / \"resources\" / src\n dst = tmpdir_fmu / dst\n makedirs(dst.parent, exist_ok=True)\n copytree(src, dst)\n\n shutil.copytree(tmpdir_fmu, output_path)", "def output_wave_files(predicted_mfccs_batch, true_target_mfccs_batch):\n # only outputting 1 wavefile in the batch, because otherwise it takes too long\n for i in range(min(1, predicted_mfccs_batch.shape[0])):\n print \"Converting wavefile \", i\n predicted_mfccs = predicted_mfccs_batch[i,:,:]\n target_mfccs = true_target_mfccs_batch[i]\n\n output_wave_file(predicted_mfccs, filename='autoencoder_pred_' + str(i)) \n output_wave_file(target_mfccs, filename='autoencoder_input_' + str(i))", "def savematrixasfilelist3(matrix, fileout):\n fout = open(fileout, \"w\")\n for i in matrix:\n fout.write(str(i) + \"\\n\")\n fout.close()", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def build_msms():\r\n\r\n # Prepare include file with dynamic data\r\n f = open(os.path.join(GME_ROOT, \"Install\", \"GME_dyn.wxi\"), 'w')\r\n print >> f, \"<!-- DO NOT EDIT THIS FILE. WILL BE REGENERATED BY THE BUILD SCRIPTS -->\"\r\n print >> f, \"<Include>\"\r\n print >> f, \" <?define GUIDSTRMETAGME='%s' ?>\" % (tools.query_GUID(mta_for_xmp(METAGME_XMP)))\r\n print >> f, \" <?define GUIDSTRHFSM='%s' ?>\" % (tools.query_GUID(mta_for_xmp(HFSM_XMP)))\r\n print >> f, \" <?define GUIDSTRSF='%s' ?>\" % (tools.query_GUID(mta_for_xmp(SF_XMP)))\r\n print >> f, \" <?define GUIDSTRUML='%s' ?>\" % (tools.query_GUID(mta_for_xmp(UML_XMP)))\r\n print >> f, \"</Include>\"\r\n f.close()\r\n\r\n import glob\r\n sources = [f for f in glob.glob(os.path.join(GME_ROOT, \"Install\", \"*.wxs\")) if os.path.basename(f) not in ('GME.wxs', 'GME_bundle.wxs')]\r\n if prefs['arch'] == 'x64':\r\n sources.remove(os.path.join(GME_ROOT, \"Install\", \"GME_paradigms.wxs\"))\r\n for file_ in sources:\r\n extras = []\r\n if os.path.basename(file_) == 'GME_paradigms.wxs':\r\n extras = glob.glob(os.path.join(GME_ROOT, \"Install\", \"PIA*/*.wxi\"))\r\n tools.build_WiX([file_] + extras)", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def process_m4(args, dirname, names):\n\n global processed_count\n global nonprocessed_count\n\n if len(args) < 2:\n raise Exception(\"in or out path not configured, see example in main()\")\n\n if not args[0] or not args[1]:\n raise Exception(\"in or out path not configured, see example in main()\")\n\n inputdir = args[0]\n outputdir = args[1]\n\n #print \"dir: \" + dirname\n if dirname[-3:] == \"CVS\":\n return\n \n regex = re.compile(\"(.*)(%s)(.*)\" % inputdir)\n mobj = regex.search(dirname)\n if mobj:\n outputdir = outputdir + mobj.group(3)\n else:\n raise Exception(\"no mobj?\")\n \n if not os.path.exists(outputdir):\n os.mkdir(outputdir)\n if verbose_mode:\n print \"Created directory %s\" % outputdir\n \n for name in names:\n path = os.path.join(dirname, name)\n outpath = os.path.join(outputdir, name)\n if os.path.isdir(path):\n continue\n \n if name[-5:] != \".html\":\n cmd = \"%s %s %s\" % (CPPATH, path, outpath)\n ret = os.system(cmd)\n if ret:\n print \"cmd failed: %s\" % cmd\n else:\n nonprocessed_count += 1\n if verbose_mode:\n print \"Added %s\" % outpath\n else:\n cmd = \"%s -P <%s >%s\" % (M4PATH, path, outpath)\n ret = os.system(cmd)\n if ret:\n print \"cmd failed: %s\" % cmd\n else:\n processed_count += 1\n if verbose_mode:\n print \"Processed %s\" % outpath", "def gen_m3u_files(\n query: List[str],\n file_name: Optional[str],\n song_list: List[Song],\n template: str,\n file_extension: str,\n short: bool = False,\n):\n\n # If no file name is provided, use the first list's name\n if not file_name:\n file_name = \"{list[0]}.m3u\"\n\n # If file_name ends with a slash. Does not have a m3u name with extension\n # at the end of the template, append `{list[0]}`` to it\n if (\n file_name.endswith(\"/\")\n or file_name.endswith(r\"\\\\\")\n or file_name.endswith(\"\\\\\\\\\")\n ):\n file_name += \"/{list[0]}.m3u\"\n\n # Check if the file name ends with .m3u\n if not file_name.endswith(\".m3u\"):\n file_name += \".m3u\"\n\n lists = []\n for request in query:\n if \"open.spotify.com\" in request and \"playlist\" in request:\n lists.append(Playlist.create_basic_list(request))\n elif \"open.spotify.com\" in request and \"album\" in request:\n lists.append(Album.create_basic_list(request))\n elif \"open.spotify.com\" in request and \"artist\" in request:\n lists.append(Artist.create_basic_list(request))\n elif request == \"saved\":\n lists.append(Saved.create_basic_list())\n\n if len(lists) == 0 and \"{list\" in template:\n raise ValueError(\n \"You must provide a playlist/album/artist/saved to use {list} in the template.\"\n )\n\n # Create a songs list from the lists and the song_list\n songs_lists = []\n for list_obj in lists:\n songs = []\n for song in song_list:\n if song.url in list_obj.urls:\n songs.append(song)\n\n songs_lists.append((list_obj.name, songs))\n\n if \"{list}\" in file_name:\n for list_name, new_song_list in songs_lists:\n create_m3u_file(\n file_name.format(\n list=list_name,\n ),\n new_song_list,\n template,\n file_extension,\n short,\n )\n elif \"{list[\" in file_name and \"]}\" in file_name:\n create_m3u_file(\n file_name.format(list=[list_name for list_name, _ in songs_lists]),\n song_list,\n template,\n file_extension,\n short,\n )\n else:\n create_m3u_file(\n file_name,\n song_list,\n template,\n file_extension,\n short,\n )", "def create_m3u_content(\n song_list: List[Song], template: str, file_extension: str, short: bool = False\n) -> str:\n\n text = \"\"\n for song in song_list:\n text += str(create_file_name(song, template, file_extension, short)) + \"\\n\"\n\n return text", "def generate_requirements(output_path=None):\n from django.conf import settings\n reqs = set()\n \n for app in settings.INSTALLED_APPS:\n if app in mapping.keys():\n reqs |= set(mapping[app])\n if output_path is None:\n print \"--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\"\n for item in reqs:\n print item\n else:\n try:\n out_file = open(output_path, 'w')\n out_file.write(\"--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\\n\")\n for item in reqs:\n out_file.write(\"%s\\n\" % item)\n finally:\n out_file.close()", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def generate_tools_list():\n out = {}\n\n # Set BETYDB_LOCAL_CACHE_FOLDER = /tools directory\n print(\"Dumping BETY experiments file into \"+os.environ.get('BETYDB_LOCAL_CACHE_FOLDER', \"/home/extractor/\"))\n #dump_experiments()\n\n toollist = [\n \"bin2tif.py\",\n \"nrmac.py\",\n \"canopyCover.py\",\n \"fieldmosaic.py\",\n \"submit_clowder.py\",\n \"submit_bety.py\",\n \"submit_geo.py\",\n \"bety_experiments.json\"\n ]\n\n print(\"Including /tools directory files\")\n for t in toollist:\n #tool_daxf = create_daxf(t, os.path.join(\"tests/workflow/workflow-pilot/workflow_terra/tools\", t))\n tool_daxf = create_daxf(t, os.path.join(os.getcwd(), \"tools\", t))\n # Use filename as dict key in case we need it as input later\n out[t] = tool_daxf\n\n sensor_metadata_list = [\n \"ua-mac/sensor-metadata/sensors/stereo/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/flirIrCamera/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/scanner3D/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/VNIR/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/scanalyzer/sensor_fixed_metadata.json\"\n ]\n print(\"Including sensor fixed metadata\")\n for s in sensor_metadata_list:\n sensor_metadata_daxf = create_daxf(s, os.path.join(sites_dir, s))\n # Use '$SENSOR_fixed' as dict key in case we need it as input later\n out[s.split(\"/\")[-2]+\"_fixed\"] = sensor_metadata_daxf\n\n return out", "def generate_output(dataset_path, dataset_name, dest): #keep\n def func_name_extractor(x):\n x = os.path.basename(x)\n return x\n\n binaries = list(os.scandir(dataset_path))\n import numpy as np\n np.random.seed(42)\n np.random.shuffle(binaries)\n train_output = open(os.path.join(dataset_path, dataset_name + \"_train_output.txt\"), \"w\")\n test_output = open(os.path.join(dataset_path, dataset_name + \"_test_output.txt\"), \"w\")\n val_output = open(os.path.join(dataset_path, dataset_name + \"_val_output.txt\"), \"w\")\n mapper = dict()\n all_funcs = set()\n for i, entry in enumerate(binaries):\n funcs = list(glob(f\"{entry.path}/*\"))\n all_funcs.update(funcs)\n for func in funcs:\n func_name = func_name_extractor(func)\n func_name = func_name.split(\"_\")\n for label in func_name:\n if label not in mapper:\n mapper[label] = []\n mapper[label].append(func)\n\n well_named_funcs = set()\n popular_names = filter(lambda x: len(x[1]) >= 3, mapper.items())\n\n count_func_names = open(os.path.join(dataset_path, \"count_func_names.txt\"), \"w\")\n for name, name_funcs in mapper.items():\n line= name + \" \" + str(len(name_funcs)) + \"\\n\"\n count_func_names.write(line)\n\n\n names_hists = {name: {'free': len(name_funcs), 'train': 0, 'val': 0, 'test': 0} for name, name_funcs in popular_names}\n for partial in map(lambda x: x[1], filter(lambda x: len(x[1]) >= 3, mapper.items())):\n well_named_funcs.update(partial)\n well_named_funcs = list(well_named_funcs)\n\n # generate output\n np.random.shuffle(well_named_funcs)\n print(f\"{len(all_funcs)} functions, {len(well_named_funcs)} functions with a name that contains a common word\")\n # print(\"choosing 250 functions for test/validation\")\n\n global_counters = {'train': 0, 'val': 0, 'test': 0}\n less_than_th = 0\n less_than_five = 0\n less_than_8 = 0\n for i, func in enumerate(well_named_funcs):\n func_name_parts = func_name_extractor(func).split(\"_\") \n print_name = gen_shared_name(names_hists, func_name_parts)\n names_hists, dest = set_decide(names_hists, print_name, global_counters)\n global_counters[dest] += 1\n print_name = \"|\".join(print_name) \n if dest == 'train':\n output = train_output\n elif dest == 'test':\n output = test_output\n else:\n output = val_output\n\n try:\n with open(func, \"r\") as f:\n for line in f:\n line = line.split(\" \")\n line[0] = print_name\n line = \" \".join(line)\n line = line_process(line)\n m = len(line.split(\" \")[1].split(\",\")[1].split(\"|\"))\n if \"fp_const\" not in line:\n if m < 1000:\n less_than_th += 1 \n if m < 800:\n less_than_8 += 1\n if m < 500:\n less_than_five += 1\n train_output.write(line)\n except:\n pass\n print(\"num of lines with line less than 1000 is \", less_than_th)\n print(\"num of lines with line less than 800 is \", less_than_8)\n print(\"num of lines with line less than 500 is \", less_than_five)\n train_output.close()\n test_output.close()\n val_output.close()", "def build_model_multi(self):\n\t\n\t\tif len(self.template) < 1: raise Exception('except: needs multiple templates '+str(self.template))\n\t\tif len(self.target) != 1: raise Exception('except: needs only one target '+str(self.template))\n\t\n\t\tprint 'preparing modeller scripts'\n\t\t#---variables passed to modeller via settings-homology.py\n\t\tvars_to_modeller = {\n\t\t\t'pdblist':self.template,\n\t\t\t'target_seq':self.target[0][0],\n\t\t\t'n_models':self.settings['n_models'],\n\t\t\t}\n\t\n\t\t#---write a settings file for the modeller script\n\t\twith open(self.rootdir+'settings-homology.py','w') as fp:\n\t\t\tfp.write('#!/usr/bin/python\\n\\n')\n\t\t\tfor var in vars_to_modeller.keys():\n\t\t\t\tval = '\\''+str(vars_to_modeller[var])+'\\'' \\\n\t\t\t\t\tif type(vars_to_modeller[var]) == str else vars_to_modeller[var]\n\t\t\t\tfp.write(var+' = '+str(val)+'\\n')\n\t\t\t\n\t\t#---write an ali file with the target\n\t\tfasta_linelen = 50\n\t\twith open(self.rootdir+self.target[0][0]+'.ali','w') as fp:\n\t\t\tfp.write('>P1;'+self.target[0][0]+'\\n')\n\t\t\tfp.write('sequence:'+self.target[0][0]+':::::::0.00:0.00\\n')\n\t\t\tseq = self.target[0][1]\n\t\t\tchopped = [seq[j*fasta_linelen:(j+1)*fasta_linelen] for j in range(len(seq)/fasta_linelen+1)]\n\t\t\tchopped = [i for i in chopped if len(i) > 0]\n\t\t\tfor i,seg in enumerate(chopped): fp.write(seg+('\\n' if i < len(chopped)-1 else '*\\n'))\n\t\t\n\t\tprint 'running modeller'\n\t\tcmd = [gmxpaths['modeller'],'script-multi.py']\n\t\tcall(cmd,logfile='log-modeller-script-multi',cwd=self.rootdir)", "def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def write_setup(project_name, root_dir):\r\n setup_path = get_file_path(root_dir, None, \"setup.py\") #Get the path for setup.py\r\n setup_content = get_setup_text(project_name)\r\n \r\n setup_file = open(setup_path, 'w')\r\n setup_file.write(setup_content)\r\n setup_file.close()\r\n print_file(setup_path, \" +++\")", "def outw():\n # make the record string\n # ok, pack em up...\n outstr = \"\".join(outlist)\n print(outstr)\n print(len(outstr))\n # of = open(\"workfile\", \"w\")\n of = open(\"workfile\", \"a\")\n # of.write(\\n)\n of.write(outstr)\n of.close()", "def m3u() -> Response:\n m3uText = \"#EXTM3U\\n\"\n for station in locast_service.get_stations():\n callsign = name_only(station.get(\"callSign_remapped\") or station.get(\n \"callSign\") or station.get(\"name\"))\n city = station[\"city\"]\n logo = station.get(\"logoUrl\") or station.get(\"logo226Url\")\n channel = station.get(\"channel_remapped\") or station[\"channel\"]\n networks = \"Network\" if callsign in [\n 'ABC', 'CBS', 'NBC', 'FOX', 'CW', 'PBS'] else \"\"\n groups = \";\".join(filter(None, [city, networks]))\n url = f\"http://{host_and_port}/watch/{station['id']}.m3u\"\n\n tvg_name = f\"{callsign} ({city})\" if config.multiplex else callsign\n\n m3uText += f'#EXTINF:-1 tvg-id=\"channel.{station[\"id\"]}\" tvg-name=\"{tvg_name}\" tvg-logo=\"{logo}\" tvg-chno=\"{channel}\" group-title=\"{groups}\", {callsign}'\n\n if config.multiplex:\n m3uText += f' ({city})'\n m3uText += f'\\n{url}\\n\\n'\n return m3uText", "def output_model(output_dir=\"./output\", model_out=None): \n # Find the path of MODEL_INIT via the parameter file\n par_file = os.path.join(output_dir, \"seisflows_paths.json\")\n with open(par_file) as f:\n model_init = json.load(f)[\"MODEL_INIT\"]\n\n assert(os.path.exists(model_init)), \\\n f\"MODEL_INIT does not exist\\n{model_init}\"\n print(f\"MODEL INIT: {model_init}\")\n\n # Determine the model number, only choose numbers, no 'init' or 'true'\n if model_out is None:\n available_models = glob(os.path.join(output_dir, \"model_[0-9]???\"))\n model_out = sorted(available_models)[-1]\n else:\n model_out = os.path.join(output_dir, model_out)\n\n assert(os.path.exists(model_out)), f\"MODEL_OUT does not exist\\n{model_out}\"\n print(f\"MODEL OUT: {model_out}\")\n\n # Quick check to make sure NPROC is the same for each directory\n nproc_check = [0, 0]\n for i, m in enumerate([model_init, model_out]):\n nprocs = [os.path.basename(_) for _ in glob(os.path.join(m, \"*\"))]\n # list comprehension strips string parts, e.g. 'proc000001_vp.bin' -> 1\n nproc_check[i] = max([int(_.split('_')[0][4:]) for _ in nprocs])\n assert(nproc_check[0] == nproc_check[1]), f\"NPROCS differ {nproc_check}\"\n print(f\"NPROC: {nproc_check[0]}\")\n \n # Symlink all available files that don't already exist in model_out\n model_init_files = glob(os.path.join(model_init, \"*\"))\n for src in model_init_files:\n dst = os.path.join(model_out, os.path.basename(src))\n if os.path.exists(dst):\n continue\n else:\n os.symlink(src, dst)", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def write_mtl(output_file_name, tex_map):\n\n def _build_entry(_tex_map, _idx=\"0\"):\n \"\"\"Builds a .mtl file entry.\n :_tex_map: dictionary: Map to look into.\n :_idx: string: The index to look for.\n Defaults to \"0\".\n Returns string data.\"\"\"\n return MATERIAL_ENTRY_TEMPLATE.format(\n mtl_name=_get_tex_name(tex_map, _idx),\n tex_file=tex_map.get(_idx, {}).get(\"name\", \".\"))\n\n materials = \"\"\n mat_num = len(tex_map)\n if mat_num:\n for idx in sorted(tex_map.keys()):\n materials += _build_entry(tex_map, idx)\n else:\n # Let define a default material when there's no map at all.\n materials += _build_entry(tex_map)\n\n with open(output_file_name, \"w\") as fd_out:\n fd_out.write(MATERIAL_TEMPLATE.format(header=COMMON_HEADER,\n mat_num=mat_num,\n materials=materials))\n print \" * Saved '%s'.\" % output_file_name", "def write_out(c2ptmk, ofn):\n print \"Writing out to [{}]\".format(ofn)\n with codecs.open(ofn, \"w\", \"utf8\") as ofd:\n for co, infos in sorted(c2ptmk.items()):\n ofd.write(u\"{}\\t{}\\t{}\\n\".format(\n co, infos[\"uri\"], \",\".join(\n [unicode(x) for x in infos[\"ptmks\"]])))" ]
[ "0.7786916", "0.5748073", "0.5662895", "0.5528735", "0.55101776", "0.5463695", "0.5421654", "0.5380845", "0.53311723", "0.5303696", "0.52977276", "0.52798694", "0.5260766", "0.52486426", "0.5233821", "0.51944816", "0.5123264", "0.5093341", "0.5092315", "0.50845486", "0.50689316", "0.5067993", "0.5039076", "0.50277865", "0.5021385", "0.5005266", "0.500289", "0.50015706", "0.49903527", "0.49807307" ]
0.59200364
1
Recursively parses XML contents to python dict. We assume that `object` tags are the only ones that can appear multiple times at the same level of a tree.
def recursive_parse_xml_to_dict(xml): if not xml: return {xml.tag: xml.text} result = {} for child in xml: child_result = recursive_parse_xml_to_dict(child) if child.tag != 'object': result[child.tag] = child_result[child.tag] else: if child.tag not in result: result[child.tag] = [] result[child.tag].append(child_result[child.tag]) return {xml.tag: result}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursive_parse_xml_to_dict(xml):\n if not xml:\n return {xml.tag: xml.text}\n result = {}\n for child in xml:\n child_result = recursive_parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}", "def xml_parser(xml_string):\n def rec_parse(elements):\n \"\"\"recursively add nodes to the dictionary\"\"\"\n node = {\"name\": elements.tag, \"children\": []}\n for elem in elements:\n node[\"children\"].append(rec_parse(elem))\n if len(elements) > 0:\n rec_parse.max_depth += 1\n return node\n\n root = Et.fromstring(xml_string)\n rec_parse.max_depth = 0\n xml_dict = rec_parse(root)\n print(xml_dict, \", \", rec_parse.max_depth)\n\n return xml_dict, rec_parse.max_depth", "def to_dict(xml):\n children = xml.getchildren()\n if not children:\n return xml.text\n else:\n out = {}\n for node in xml.getchildren():\n if node.tag in out:\n if not isinstance(out[node.tag], list):\n out[node.tag] = [out[node.tag]]\n out[node.tag].append(to_dict(node))\n else:\n out[node.tag] = to_dict(node)\n return out", "def elem2dict(node):\n result = {}\n\n for element in node.iterchildren():\n # Remove namespace prefix\n key = element.tag.split('}')[1] if '}' in element.tag else element.tag\n key = key[:1].lower() + key[1:]\n\n # Process element as tree element if the inner XML contains non-whitespace content\n if element.text and element.text.strip():\n value = element.text\n else:\n value = elem2dict(element)\n if key in result:\n if type(result[key]) is list:\n result[key].append(value)\n else:\n tempvalue = result[key].copy()\n result[key] = [tempvalue, value]\n else:\n result[key] = value\n return result", "def search_for_tree(xml_obj, container):\n if isinstance(container, list):\n for child in xml_obj:\n if child.attrib[\"type\"] == \"dict\":\n temp_container = dict()\n search_for_tree(child, temp_container)\n container.append(temp_container)\n elif child.attrib[\"type\"] == \"list\":\n temp_container = list()\n search_for_tree(child, temp_container)\n container.append(temp_container)\n elif child.attrib[\"type\"] == \"int\":\n container.append(int(child.text))\n else:\n container.append(child.text)\n elif isinstance(container, dict):\n for child in xml_obj:\n if child.attrib[\"type\"] == \"dict\":\n temp_container = dict()\n search_for_tree(child, temp_container)\n container[child.tag] = temp_container\n elif child.attrib[\"type\"] == \"list\":\n temp_container = list()\n search_for_tree(child, temp_container)\n container[child.tag] = temp_container\n elif child.attrib[\"type\"] == \"int\":\n container[child.tag] = int(child.text)\n else:\n container[child.tag] = child.text\n return container", "def etree2dict(element):\n i = dict(element.items())\n i.update(_make_content(i, element.text, strip=True))\n\n for child in element:\n tag = child.tag\n value = etree2dict(child)\n i.update(_make_content(i, value, tag))\n\n if element.text and not set(i).difference([\"content\"]):\n # element is leaf node and doesn't have attributes\n i = i.get(\"content\")\n\n return i", "def rec_parse(elements):\n node = {\"name\": elements.tag, \"children\": []}\n for elem in elements:\n node[\"children\"].append(rec_parse(elem))\n if len(elements) > 0:\n rec_parse.max_depth += 1\n return node", "def parse_xml_tree(root):\n if len(root) == 0:\n text = root.text\n value = int(text) if text.isnumeric() else text\n return value\n\n data = dict()\n for i, item in enumerate(root):\n tag = item.tag\n if len(item) > 0:\n if tag in data:\n temp = data[tag]\n if type(data[tag]) is not list:\n data[tag] = list()\n data[tag].append(temp)\n data[tag].append(parse_xml_tree(item))\n\n elif data.get(tag) is None:\n data[tag] = parse_xml_tree(item)\n else:\n data[tag] = parse_xml_tree(item)\n else:\n data[tag] = parse_xml_tree(item)\n\n return data", "def from_etree(self, data):\r\n if data.tag == 'request':\r\n # if \"object\" or \"objects\" exists, return deserialized forms.\r\n elements = data.getchildren()\r\n for element in elements:\r\n if element.tag in ('object', 'objects'):\r\n return self.from_etree(element)\r\n return dict((element.tag, self.from_etree(element)) for element in elements)\r\n elif data.tag == 'object' or data.get('type') == 'hash':\r\n return dict((element.tag, self.from_etree(element)) for element in data.getchildren())\r\n elif data.tag == 'objects' or data.get('type') == 'list':\r\n return [self.from_etree(element) for element in data.getchildren()]\r\n else:\r\n type_string = data.get('type')\r\n if type_string in ('string', None):\r\n return data.text\r\n elif type_string == 'integer':\r\n return int(data.text)\r\n elif type_string == 'float':\r\n return float(data.text)\r\n elif type_string == 'boolean':\r\n if data.text == 'True':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return None", "def xml_children_as_dict(node):\n return dict((e.tag, e.text) for e in node)", "def _xmlTreeToDict(cls, node):\n if not isinstance(node, ElementTree.Element):\n raise ATException('_xmlTreeToDict(), param: [node] expected a xml.etree.ElementTree.Element')\n\n nodeDict = {}\n\n if len(node.items()) > 0:\n nodeDict.update(dict(node.items()))\n\n for child in node:\n childItemDict = cls._xmlTreeToDict(child)\n if child.tag in nodeDict:\n if isinstance(nodeDict[child.tag], list):\n nodeDict[child.tag].append(childItemDict)\n else:\n nodeDict[child.tag] = [nodeDict[child.tag], childItemDict]\n else:\n nodeDict[child.tag] = childItemDict\n\n text = ''\n if node.text is not None:\n text = node.text.strip()\n\n if len(nodeDict) > 0:\n if len(text) > 0:\n nodeDict[node.tag + '_text'] = text\n else:\n nodeDict = text\n\n return nodeDict", "def test_parser_to_dict(self):\n xml = \"\"\"\\\n<foo>\n <bar>baz</bar>\n <quz>\n <wow>works!</wow>\n </quz>\n</foo>\n\"\"\"\n d = x.to_dict(xml, {})\n assert d.bar.text_ == u'baz'\n assert d.quz.wow.text_ == u'works!'", "def parsexml0(xmltext, startingat=0, toplevel=1,\n # snarf in some globals\n entityReplacer=unEscapeContentList,\n #len=len, None=None\n #LENCDATAMARKER=LENCDATAMARKER, CDATAMARKER=CDATAMARKER\n ):\n #print \"parsexml0\", repr(xmltext[startingat: startingat+10])\n # DEFAULTS\n NameString = NONAME\n ContentList = AttDict = ExtraStuff = None\n if toplevel is not None:\n #if verbose: print \"at top level\"\n #if startingat!=0:\n # raise ValueError, \"have to start at 0 for top level!\"\n xmltext = xmltext.strip()\n cursor = startingat\n #look for interesting starting points\n firstbracket = xmltext.find(\"<\", cursor)\n afterbracket2char = xmltext[firstbracket+1:firstbracket+3]\n #print \"a\", repr(afterbracket2char)\n #firstampersand = xmltext.find(\"&\", cursor)\n #if firstampersand>0 and firstampersand<firstbracket:\n # raise ValueError, \"I don't handle ampersands yet!!!\"\n docontents = 1\n if firstbracket<0:\n # no tags\n #if verbose: print \"no tags\"\n if toplevel is not None:\n #D = {NAMEKEY: NONAME, CONTENTSKEY: [xmltext[cursor:]]}\n ContentList = [xmltext[cursor:]]\n if entityReplacer: ContentList = entityReplacer(ContentList)\n return (NameString, AttDict, ContentList, ExtraStuff), len(xmltext)\n else:\n raise ValueError(\"no tags at non-toplevel %s\" % repr(xmltext[cursor:cursor+20]))\n #D = {}\n L = []\n # look for start tag\n # NEED to force always outer level is unnamed!!!\n #if toplevel and firstbracket>0:\n #afterbracket2char = xmltext[firstbracket:firstbracket+2]\n if toplevel is not None:\n #print \"toplevel with no outer tag\"\n NameString = name = NONAME\n cursor = skip_prologue(xmltext, cursor)\n #break\n elif firstbracket<0:\n raise ValueError(\"non top level entry should be at start tag: %s\" % repr(xmltext[:10]))\n # special case: CDATA\n elif afterbracket2char==\"![\" and xmltext[firstbracket:firstbracket+9]==\"<![CDATA[\":\n #print \"in CDATA\", cursor\n # skip straight to the close marker\n startcdata = firstbracket+9\n endcdata = xmltext.find(CDATAENDMARKER, startcdata)\n if endcdata<0:\n raise ValueError(\"unclosed CDATA %s\" % repr(xmltext[cursor:cursor+20]))\n NameString = CDATAMARKER\n ContentList = [xmltext[startcdata: endcdata]]\n cursor = endcdata+len(CDATAENDMARKER)\n docontents = None\n # special case COMMENT\n elif afterbracket2char==\"!-\" and xmltext[firstbracket:firstbracket+4]==\"<!--\":\n #print \"in COMMENT\"\n endcommentdashes = xmltext.find(\"--\", firstbracket+4)\n if endcommentdashes<firstbracket:\n raise ValueError(\"unterminated comment %s\" % repr(xmltext[cursor:cursor+20]))\n endcomment = endcommentdashes+2\n if xmltext[endcomment]!=\">\":\n raise ValueError(\"invalid comment: contains double dashes %s\" % repr(xmltext[cursor:cursor+20]))\n return (None, endcomment+1) # shortcut exit\n else:\n # get the rest of the tag\n #if verbose: print \"parsing start tag\"\n # make sure the tag isn't in doublequote pairs\n closebracket = xmltext.find(\">\", firstbracket)\n noclose = closebracket<0\n startsearch = closebracket+1\n pastfirstbracket = firstbracket+1\n tagcontent = xmltext[pastfirstbracket:closebracket]\n # shortcut, no equal means nothing but name in the tag content\n if '=' not in tagcontent:\n if tagcontent[-1]==\"/\":\n # simple case\n #print \"simple case\", tagcontent\n tagcontent = tagcontent[:-1]\n docontents = None\n name = tagcontent.strip()\n NameString = name\n cursor = startsearch\n else:\n if '\"' in tagcontent:\n # check double quotes\n stop = None\n # not inside double quotes! (the split should have odd length)\n if noclose or len((tagcontent+\".\").split('\"'))% 2:\n stop=1\n while stop is None:\n closebracket = xmltext.find(\">\", startsearch)\n startsearch = closebracket+1\n noclose = closebracket<0\n tagcontent = xmltext[pastfirstbracket:closebracket]\n # not inside double quotes! (the split should have odd length)\n if noclose or len((tagcontent+\".\").split('\"'))% 2:\n stop=1\n if noclose:\n raise ValueError(\"unclosed start tag %s\" % repr(xmltext[firstbracket:firstbracket+20]))\n cursor = startsearch\n #cursor = closebracket+1\n # handle simple tag /> syntax\n if xmltext[closebracket-1]==\"/\":\n #if verbose: print \"it's a simple tag\"\n closebracket = closebracket-1\n tagcontent = tagcontent[:-1]\n docontents = None\n #tagcontent = xmltext[firstbracket+1:closebracket]\n tagcontent = tagcontent.strip()\n taglist = tagcontent.split(\"=\")\n #if not taglist:\n # raise ValueError, \"tag with no name %s\" % repr(xmltext[firstbracket:firstbracket+20])\n taglist0 = taglist[0]\n taglist0list = taglist0.split()\n #if len(taglist0list)>2:\n # raise ValueError, \"bad tag head %s\" % repr(taglist0)\n name = taglist0list[0]\n #print \"tag name is\", name\n NameString = name\n # now parse the attributes\n attributename = taglist0list[-1]\n # put a fake att name at end of last taglist entry for consistent parsing\n taglist[-1] = taglist[-1]+\" f\"\n AttDict = D = {}\n taglistindex = 1\n lasttaglistindex = len(taglist)\n #for attentry in taglist[1:]:\n while taglistindex<lasttaglistindex:\n #print \"looking for attribute named\", attributename\n attentry = taglist[taglistindex]\n taglistindex = taglistindex+1\n attentry = attentry.strip()\n if attentry[0]!='\"':\n raise ValueError(\"attribute value must start with double quotes\" + repr(attentry))\n while '\"' not in attentry[1:]:\n # must have an = inside the attribute value...\n if taglistindex>lasttaglistindex:\n raise ValueError(\"unclosed value \" + repr(attentry))\n nextattentry = taglist[taglistindex]\n taglistindex = taglistindex+1\n attentry = \"%s=%s\" % (attentry, nextattentry)\n attentry = attentry.strip() # only needed for while loop...\n attlist = attentry.split()\n nextattname = attlist[-1]\n attvalue = attentry[:-len(nextattname)]\n attvalue = attvalue.strip()\n try:\n first = attvalue[0]; last=attvalue[-1]\n except:\n raise ValueError(\"attvalue,attentry,attlist=\"+repr((attvalue, attentry,attlist)))\n if first==last=='\"' or first==last==\"'\":\n attvalue = attvalue[1:-1]\n #print attributename, \"=\", attvalue\n D[attributename] = attvalue\n attributename = nextattname\n # pass over other tags and content looking for end tag\n if docontents is not None:\n #print \"now looking for end tag\"\n ContentList = L\n while docontents is not None:\n nextopenbracket = xmltext.find(\"<\", cursor)\n if nextopenbracket<cursor:\n #if verbose: print \"no next open bracket found\"\n if name==NONAME:\n #print \"no more tags for noname\", repr(xmltext[cursor:cursor+10])\n docontents=None # done\n remainder = xmltext[cursor:]\n cursor = len(xmltext)\n if remainder:\n L.append(remainder)\n else:\n raise ValueError(\"no close bracket for %s found after %s\" % (name,repr(xmltext[cursor: cursor+20])))\n # is it a close bracket?\n elif xmltext[nextopenbracket+1]==\"/\":\n #print \"found close bracket\", repr(xmltext[nextopenbracket:nextopenbracket+20])\n nextclosebracket = xmltext.find(\">\", nextopenbracket)\n if nextclosebracket<nextopenbracket:\n raise ValueError(\"unclosed close tag %s\" % repr(xmltext[nextopenbracket: nextopenbracket+20]))\n closetagcontents = xmltext[nextopenbracket+2: nextclosebracket]\n closetaglist = closetagcontents.split()\n #if len(closetaglist)!=1:\n #print closetagcontents\n #raise ValueError, \"bad close tag format %s\" % repr(xmltext[nextopenbracket: nextopenbracket+20])\n # name should match\n closename = closetaglist[0]\n #if verbose: print \"closetag name is\", closename\n if name!=closename:\n prefix = xmltext[:cursor]\n endlinenum = len(prefix.split(\"\\n\"))\n prefix = xmltext[:startingat]\n linenum = len(prefix.split(\"\\n\"))\n raise ValueError(\"at lines %s...%s close tag name doesn't match %s...%s %s\" %(\n linenum, endlinenum, repr(name), repr(closename), repr(xmltext[cursor: cursor+100])))\n remainder = xmltext[cursor:nextopenbracket]\n if remainder:\n #if verbose: print \"remainder\", repr(remainder)\n L.append(remainder)\n cursor = nextclosebracket+1\n #print \"for\", name, \"found close tag\"\n docontents = None # done\n # otherwise we are looking at a new tag, recursively parse it...\n # first record any intervening content\n else:\n remainder = xmltext[cursor:nextopenbracket]\n if remainder:\n L.append(remainder)\n #if verbose:\n # #print \"skipping\", repr(remainder)\n # #print \"--- recursively parsing starting at\", xmltext[nextopenbracket:nextopenbracket+20]\n (parsetree, cursor) = parsexml0(xmltext, startingat=nextopenbracket, toplevel=None, entityReplacer=entityReplacer)\n if parsetree:\n L.append(parsetree)\n # maybe should check for trailing garbage?\n # toplevel:\n # remainder = xmltext[cursor:].strip()\n # if remainder:\n # raise ValueError, \"trailing garbage at top level %s\" % repr(remainder[:20])\n if ContentList:\n if entityReplacer: ContentList = entityReplacer(ContentList)\n t = (NameString, AttDict, ContentList, ExtraStuff)\n return (t, cursor)", "def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder", "def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects", "def parse_object(thing,builder): \n \n # Both Element and Attribute instances are encoded first by their \n # \"tag\" attribute. After this, we determine thing's type. If it has\n # a \"value\" attribute, we treat thing as an Attribute instance. \n # Otherwise, we treat thing as an Element instance.\n \n builder.grab(str(thing.tag))\n \n try:\n \n builder.grab(thing.value)\n \n except:\n \n for attr in thing.attributes:\n parse_object(attr,builder)\n \n builder.grab(\"0\")\n \n # Each child is either another Element or Attribute instance, \n # in which case it can be parsed recursively, or it is a \n # string, in which case it may be grabbed immediately.\n \n for chld in thing.children:\n \n try:\n parse_object(chld,builder) \n except:\n builder.grab(chld)\n \n builder.grab(\"0\")", "def convert_xml_to_object(file_content):\n object = xmltodict.parse(file_content, dict_constructor=dict)\n print(object)\n return object", "def etree_to_dict(t):\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n if children:\n dd = defaultdict(list)\n for dc in map(etree_to_dict, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.items()}}\n if t.attrib:\n d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())\n if t.text:\n text = t.text.strip()\n if children or t.attrib:\n if text:\n d[t.tag]['#text'] = text\n else:\n d[t.tag] = text\n return d", "def parse_object(obj, path=''):\n if isinstance(obj, dict):\n iterator = obj.iteritems()\n elif isinstance(obj, (list, tuple)):\n iterator = enumerate(obj)\n else:\n return { path.strip('/'): obj }\n\n d = {}\n\n for key, value in iterator:\n key = unicode(key)\n d.update(parse_object(value, path + key + '/'))\n\n return d", "def parse(k):\r\n return stringify_children(xml_object.xpath(k)[0])", "def parse(k):\n return stringify_children(xml_object.xpath(k)[0])", "def etree_to_dict(t):\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n if children:\n dd = defaultdict(list)\n for dc in map(etree_to_dict, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {t.tag: {k: v[0] if len(v) == 1 else v\n for k, v in dd.items()}}\n if t.attrib:\n # treat DOI attributes differently for readability\n if (\"doi\"==t.tag):\n for k,v in t.attrib.items():\n d[\"doi_\"+k] = v\n else:\n d[t.tag].update(('@' + k, v)\n for k, v in t.attrib.items())\n if t.text:\n text = t.text.strip()\n if children or t.attrib:\n if text:\n d[t.tag] = text\n else:\n d[t.tag] = text\n return d", "def etree_to_dict(t):\n from collections import defaultdict\n\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n if children:\n dd = defaultdict(list)\n for dc in map(etree_to_dict, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}\n if t.attrib:\n d[t.tag].update((\"@\" + k, v) for k, v in t.attrib.items())\n if t.text:\n text = t.text.strip()\n if children or t.attrib:\n if text:\n d[t.tag][\"#text\"] = text\n else:\n d[t.tag] = text\n return d", "def dom_to_dict(root_node):\n\n # Remove namespaces from tagname\n\n tag = root_node.tagName\n\n if \":\" in tag:\n\n tag = tag.split(\":\")[1]\n\n root_dict = {\n tag: {}\n }\n\n node_dict = root_dict[tag]\n\n # Set attributes\n\n if root_node.hasAttributes():\n\n for key in list(root_node.attributes.keys()):\n\n node_dict[key] = root_node.getAttribute(key)\n\n # Check out child nodes\n\n for child in root_node.childNodes:\n\n if child.nodeType == root_node.TEXT_NODE:\n\n # This is the content\n\n node_dict['_content'] = child.data\n\n else:\n\n subnode_dict = dom_to_dict(child)\n\n child_tag = child.tagName\n\n if \":\" in child_tag:\n\n child_tag = child_tag.split(\":\")[1]\n\n new_val = subnode_dict[child_tag]\n\n # If we have several child with same name, put them in a list.\n\n if child_tag in node_dict:\n prev_val = node_dict[child_tag]\n\n if type(prev_val) != list:\n node_dict[child_tag] = [prev_val]\n\n node_dict[child_tag].append(new_val)\n\n else:\n node_dict[child_tag] = new_val\n\n return root_dict", "def xml2dict( xml, sanitize=True, prefix=None):\n \n \n #Decode to avert parsing errors as some software dump large text\n #fields into the file that occasionally contain erronious chars\n xml=xml.decode('utf-8', errors='ignore')\n\n \n return etree2dict(etree.fromstring(xml), sanitize, prefix)", "def recursive_generation(t):\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n\n if children:\n dd = defaultdict(list)\n\n for dc in map(recursive_generation, children):\n for k, v in dc.iteritems():\n dd[k].append(v)\n\n d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}\n\n if t.attrib:\n d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())\n\n if t.text:\n text = t.text.strip()\n\n if children or t.attrib:\n if text:\n d[t.tag]['#text'] = text\n else:\n d[t.tag] = text\n\n return d", "def parse_xml(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n #baseInfo['folder'] = tree.find('folder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n if obj.find('score') is None:\r\n obj_struct['score']=\"\"\r\n else:\r\n obj_struct['score'] = obj.find('score').text\r\n if obj.find('region') is None:\r\n obj_struct['region']=\"\"\r\n else:\r\n obj_struct['region'] = obj.find('region').text\r\n if obj.find('imageptr') is None:\r\n obj_struct['imageptr']=\"\"\r\n else:\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n # obj_struct['score'] = obj.find('score').text\r\n # obj_struct['region'] = obj.find('region').text\r\n # obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects", "def ParseXML(self, rawXML):\n if \"Search error: API limited due to abuse\" in str(rawXML.items()):\n raise Rule34_Error('Rule34 rejected your request due to \"API abuse\"')\n\n d = {rawXML.tag: {} if rawXML.attrib else None}\n children = list(rawXML)\n if children:\n dd = defaultdict(list)\n for dc in map(self.ParseXML, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {rawXML.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}\n if rawXML.attrib:\n d[rawXML.tag].update(('@' + k, v) for k, v in rawXML.attrib.items())\n if rawXML.text:\n text = rawXML.text.strip()\n if children or rawXML.attrib:\n if text:\n d[rawXML.tag]['#text'] = text\n else:\n d[rawXML.tag] = text\n return d", "def shape_element(element):\n node = {}\n # you should process only 2 types of top level tags: \"node\" and \"way\"\n if element.tag == \"node\" or element.tag == \"way\" :\n for key in element.attrib.keys():\n val = element.attrib[key]\n node[\"type\"] = element.tag\n\n # deal with top-level tags \n node = process_toptags(key,val, node)\n \n # Begin iterating over subtags\n node = process_subtags(element, node)\n \n for tag in element.iter(\"nd\"):\n if not \"node_refs\" in node.keys():\n node[\"node_refs\"] = []\n node_refs = node[\"node_refs\"]\n node_refs.append(tag.attrib[\"ref\"])\n node[\"node_refs\"] = node_refs\n\n return node\n else:\n return None", "def serialize_object(obj):\n if obj is None:\n return obj\n\n if isinstance(obj, etree._Element):\n return obj\n\n if isinstance(obj, list):\n return [serialize_object(sub) for sub in obj]\n\n result = OrderedDict()\n for key in obj:\n value = obj[key]\n if isinstance(value, (list, CompoundValue)):\n value = serialize_object(value)\n result[key] = value\n return result" ]
[ "0.76405007", "0.6487578", "0.6464231", "0.6301873", "0.6218455", "0.6149198", "0.6135709", "0.607625", "0.60481364", "0.58940786", "0.5876097", "0.5762465", "0.5756617", "0.5738174", "0.5733136", "0.57227683", "0.5722584", "0.56979394", "0.5683315", "0.5661643", "0.5641846", "0.56374836", "0.5628327", "0.56063956", "0.55917156", "0.5571635", "0.5568463", "0.55602247", "0.54705596", "0.5412003" ]
0.7581804
1
Key to sort hosts / domains alphabetically, by domain name.
def domain_sort_key(domain): import re domain_expr = r'(.*\.)?(.*\.)(.*)' # Eg: (www.)(google.)(com) domain_search = re.search(domain_expr, domain) if domain_search and domain_search.group(1): # sort by domain name and then everything left of # Eg: google, com, www domain_values = ( domain_search.group(2), domain_search.group(3), domain_search.group(1) ) key = '%s%s%s' % domain_values else: # no host portion, just return the domain name key = domain return(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subdomain_sorting_key(hostname):\n parts = hostname.split('.')[::-1]\n if parts[-1] == 'www':\n return parts[:-1], 1\n return parts, 0", "def list_domain_names(self) -> Dict:\n pass", "def get_hosts(self):\n\n return sorted(self.host_data.keys())", "def sort_key(self):\n ...", "def list_domain_names():\n pass", "def bucket_domain_name(self) -> str:\n ...", "def natsort_key_icase(s):\n return natsort_key(s.lower())", "def bucket_website_domain_name(self) -> str:\n ...", "def natsort_key_icase(s: str) -> str:\n return natsort_key(s.lower())", "def get_domain_name(self, DomainName: str) -> Dict:\n pass", "def sort_by_key(request):\n return request.param", "def sort_by_key(request):\n return request.param", "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def order_domain_values(var,assignment,csp):\n #right now it works only as just convert value and return\n #no special black magic yet\n return var.domain", "def getHostKey(instance):\n return instance['hostname']", "def get_sort_key(self) -> str:\n return self.name", "def group_by_domain(hash_entries):\n entries = (get_entry(h) for h in hash_entries)\n domains = {}\n for e in entries:\n domains[e['url_domain']] = domains.get(e['url_domain']) or []\n domains[e['url_domain']].append(e)\n return [{'domain': name, 'entries': ent} for name, ent in domains.items()]", "def keyListSort(keyList):\n keyList.sort(key=lambda y: y.GetName().lower())", "def cloudfront_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('cloudfront')\n response = client.list_distributions(\n MaxItems='100'\n )\n items = response[\"DistributionList\"][\"Items\"]\n for item in items:\n cloud_front_domain_name = item[\"DomainName\"]\n if item[\"Aliases\"][\"Quantity\"] > 0:\n if hostname in item[\"Aliases\"][\"Items\"]:\n return cloud_front_domain_name\n return None", "def bucket_dual_stack_domain_name(self) -> str:\n ...", "def get_subdomain(self):\n return self.key().name().split(':', 1)[0]", "def order_domain_values(self, var, assignment):\n # retrieve the domain for the variable\n domain = self.domains[var]\n # initialise a dictionary for sorting the values in the variable's domain\n sorting_dict = {} \n # for each of the values in the variable's domain \n for value in domain:\n # set the constraint counter to zero\n sorting_dict[value] = 0\n # for each of the neighbors of the variable\n for neighbor in self.crossword.neighbors(var):\n # retrieve the overlap indexes\n overlap = self.crossword.overlaps[(neighbor, var)]\n # for each of the overlap's possible values (the overlap's domain)\n for test in self.domains[neighbor]:\n # if the overlap letter is not the same\n if test[overlap[0]] != value[overlap[1]]:\n # this value constrains the neighbor's domain\n sorting_dict[value] += 1\n # sort the dictionary by the value of the sorting key\n sorted_vars = sorted(domain, key=lambda x: sorting_dict[x])\n return sorted_vars", "def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")", "def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def sort(self, key_func):\n pass", "def domain(self):\n return self.keys()", "def sort_key(alpha):\n if not isinstance(alpha, dict):\n # alpha *should* be a dict, but if passed a list or a string, treat it\n # as an ordering\n try:\n alpha = {k: v for v, k in enumerate(alpha)}\n except TypeError:\n # alpha isn't iterable, and is therefore useless as a key\n alpha = {}\n a = sorted(alpha.keys(), key=lambda x: -len(x))\n\n def key(word):\n out = []\n for m in regex.finditer('(' + '|'.join(a) + ')|.', word):\n if m.group(1):\n if alpha[m[0]] is not None:\n out.append(alpha[m[0]])\n else:\n out.append(-1)\n return out\n\n return key", "def bucket_regional_domain_name(self) -> str:\n ..." ]
[ "0.77395064", "0.5994397", "0.59496844", "0.5851569", "0.5843953", "0.5838", "0.58089083", "0.5635496", "0.56327444", "0.55446255", "0.5542909", "0.5542909", "0.5528308", "0.55238223", "0.5503389", "0.5494649", "0.54859453", "0.5457136", "0.5452885", "0.544919", "0.54136103", "0.5406171", "0.5381629", "0.5381629", "0.53522646", "0.53522646", "0.5350461", "0.5340174", "0.53155005", "0.53037846" ]
0.8291568
0
draw and label a cube. edges is a list of numbers between 1 and 12, specifying which of the 12 cube edges to draw
def draw_cube(ax, xy, size, depth=0.3, edges=None, label=None, label_kwargs=None, **kwargs): if edges is None: edges = range(1, 13) x, y = xy y -= size # set left/up corner as the first (0,0) for one cube # first plot background edges if 9 in edges: ax.plot([x + depth, x + depth + size], [y + depth + size, y + depth + size], **kwargs) if 10 in edges: ax.plot([x + depth + size, x + depth + size], [y + depth, y + depth + size], **kwargs) if 11 in edges: ax.plot([x + depth, x + depth + size], [y + depth, y + depth], **kwargs) if 12 in edges: ax.plot([x + depth, x + depth], [y + depth, y + depth + size], **kwargs) # second plot middile edges if 5 in edges: ax.plot([x, x + depth], [y + size, y + depth + size], **kwargs) if 6 in edges: ax.plot([x + size, x + size + depth], [y + size, y + depth + size], **kwargs) if 7 in edges: ax.plot([x + size, x + size + depth], [y, y + depth], **kwargs) if 8 in edges: ax.plot([x, x + depth], [y, y + depth], **kwargs) # last plot foreground edges if 1 in edges: # top edge ax.plot([x, x + size], [y + size, y + size], **kwargs) if 2 in edges: # right ax.plot([x + size, x + size], [y, y + size], **kwargs) if 3 in edges: # bottom ax.plot([x, x + size], [y, y], **kwargs) if 4 in edges: # left ax.plot([x, x], [y, y + size], **kwargs) if label: if label_kwargs is None: label_kwargs = {} ax.text(x + 0.5 * size, y + 0.5 * size - font_height() / 2, label, ha='center', va='center', **label_kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeCube(c) :\n print(\"Edge Length =\",c.getLength())\n print(\"Volume =\",c.volume())\n print(\"Surface Area =\",c.surfaceArea())\n print(\"Face Diagonal =\",c.faceDiagonal())\n print(\"Space Diagonal =\",c.spaceDiagonal())", "def main() :\n c1 = Cube(5.3) # cube with edge length of 5.3\n c2 = Cube(3.1) # cube with edge length of 3.1\n\n print(\"Cube 1:\")\n writeCube(c1)\n print()\n print(\"Cube 2:\")\n writeCube(c2)", "def draw_cube(self, middle, edges, coloring=None, explain=False):\n\n # edges = set(e - 2 for e in edges)\n top = middle - Point(0, self.edge)\n\n # No comment would help you. Draw it (with explain=True).\n\n if explain:\n self.set_source_rgb(1, 0, 0)\n for i in [0, 2, 4]:\n if i not in edges and (i + 2) % 6 not in edges:\n self.draw_line(\n middle,\n top.rotated((1 + i) * pi / 3, middle)\n )\n if i in edges:\n self.draw_line(middle, top.rotated(i * pi / 3, middle))\n\n for i in range(6):\n if not (i in edges or (i + 1) % 6 in edges):\n p = top.rotated(i * pi / 3, middle)\n q = top.rotated((i + 1) * pi / 3, middle)\n if explain:\n self.set_source_rgb(0.6, 0.6, 0)\n self.draw_line(p, q)\n\n for i in range(6):\n if i in edges:\n vert = top.rotated(i * pi / 3, middle)\n\n for sgn in [-1, 1]:\n if i % 2 == 0 or (i - sgn) % 6 not in edges:\n q = middle.rotated(sgn * pi / 3, vert)\n if explain:\n self.set_source_rgb(0.3, 0.3, 0.5)\n self.draw_line(q, q + vert - middle)\n\n if explain:\n self.set_source_rgb(0, 0, 0)", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def draw(vertices, edges):\n # pylint: disable=too-many-locals\n # NOTE: coordinates might me negative, so we need to shift\n # everything to the positive plane before we actually draw it.\n Xs = [] # noqa: N806, pylint: disable=invalid-name\n Ys = [] # noqa: N806, pylint: disable=invalid-name\n\n sug = _build_sugiyama_layout(vertices, edges)\n\n for vertex in sug.g.sV:\n # NOTE: moving boxes w/2 to the left\n Xs.append(vertex.view.xy[0] - vertex.view.w / 2.0)\n Xs.append(vertex.view.xy[0] + vertex.view.w / 2.0)\n Ys.append(vertex.view.xy[1])\n Ys.append(vertex.view.xy[1] + vertex.view.h)\n\n for edge in sug.g.sE:\n for x, y in edge.view._pts: # pylint: disable=protected-access\n Xs.append(x)\n Ys.append(y)\n\n minx = min(Xs)\n miny = min(Ys)\n maxx = max(Xs)\n maxy = max(Ys)\n\n canvas_cols = int(math.ceil(math.ceil(maxx) - math.floor(minx))) + 1\n canvas_lines = int(round(maxy - miny))\n\n canvas = AsciiCanvas(canvas_cols, canvas_lines)\n\n # NOTE: first draw edges so that node boxes could overwrite them\n for edge in sug.g.sE:\n # pylint: disable=protected-access\n assert len(edge.view._pts) > 1\n for index in range(1, len(edge.view._pts)):\n start = edge.view._pts[index - 1]\n end = edge.view._pts[index]\n\n start_x = int(round(start[0] - minx))\n start_y = int(round(start[1] - miny))\n end_x = int(round(end[0] - minx))\n end_y = int(round(end[1] - miny))\n\n assert start_x >= 0\n assert start_y >= 0\n assert end_x >= 0\n assert end_y >= 0\n\n canvas.line(start_x, start_y, end_x, end_y, \"*\")\n\n for vertex in sug.g.sV:\n # NOTE: moving boxes w/2 to the left\n x = vertex.view.xy[0] - vertex.view.w / 2.0\n y = vertex.view.xy[1]\n\n canvas.box(\n int(round(x - minx)),\n int(round(y - miny)),\n vertex.view.w,\n vertex.view.h,\n )\n\n canvas.text(int(round(x - minx)) + 1, int(round(y - miny)) + 1, vertex.data)\n\n return canvas.draw()", "def mlab_plt_cube(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = cube_faces(xmin, xmax, ymin, ymax, zmin, zmax)\n for grid in faces:\n x, y, z = grid\n mlab.mesh(x, y, z, opacity=0.1, color=(0.1, 0.2, 0.3))", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))", "def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))", "def draw_cube(self, points, color=(255, 0, 0)):\n\n # draw front\n self.draw_line(points[0], points[1], color)\n self.draw_line(points[1], points[2], color)\n self.draw_line(points[3], points[2], color)\n self.draw_line(points[3], points[0], color)\n\n # draw back\n self.draw_line(points[4], points[5], color)\n self.draw_line(points[6], points[5], color)\n self.draw_line(points[6], points[7], color)\n self.draw_line(points[4], points[7], color)\n\n # draw sides\n self.draw_line(points[0], points[4], color)\n self.draw_line(points[7], points[3], color)\n self.draw_line(points[5], points[1], color)\n self.draw_line(points[2], points[6], color)\n\n # draw dots\n self.draw_dot(points[0], point_color=color, point_radius=4)\n self.draw_dot(points[1], point_color=color, point_radius=4)\n\n # draw x on the top\n self.draw_line(points[0], points[5], color)\n self.draw_line(points[1], points[4], color)", "def draw_cube(self, vec):\n # TOP FACE\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n # BOTTOM FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # FRONT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # BACK FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # RIGHT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # LEFT FACE\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glEnd()", "def CubeGraph(n):\n theta = float(pi/n)\n\n d = {'':[]}\n dn={}\n p = {'':(float(0),float(0))}\n pn={}\n\n # construct recursively the adjacency dict and the positions\n for i in range(n):\n ci = float(cos(i*theta))\n si = float(sin(i*theta))\n for v,e in d.iteritems():\n v0 = v+'0'\n v1 = v+'1'\n l0 = [v1]\n l1 = [v0]\n for m in e:\n l0.append(m+'0')\n l1.append(m+'1')\n dn[v0] = l0\n dn[v1] = l1\n x,y = p[v]\n pn[v0] = (x, y)\n pn[v1] = (x+ci, y+si)\n d,dn = dn,{}\n p,pn = pn,{}\n\n # construct the graph\n r = Graph(name=\"%d-Cube\"%n)\n r.add_vertices(d.keys())\n for u,L in d.iteritems():\n for v in L:\n r.add_edge(u,v)\n r.set_pos(p)\n\n return r", "def FoldedCubeGraph(n):\n\n if n < 1:\n raise ValueError(\"The value of n must be at least 2\")\n\n g = CubeGraph(n-1)\n g.name(\"Folded Cube Graph\")\n\n # Complementing the binary word\n def complement(x):\n x = x.replace('0','a')\n x = x.replace('1','0')\n x = x.replace('a','1')\n return x\n\n for x in g:\n if x[0] == '0':\n g.add_edge(x,complement(x))\n\n return g", "def map_face(self):\n #Array Order: U0,D1,R2,L3,F4,B5,\n \n cube_list = []\n cube_list = self.cube.definition()\n \n for index, cubit in enumerate(self.faces['Up']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index]])\n for index, cubit in enumerate(self.faces['Ri']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+9]])\n for index, cubit in enumerate(self.faces['Ft']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+18]])\n for index, cubit in enumerate(self.faces['Dn']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+27]])\n for index, cubit in enumerate(self.faces['Le']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+36]])\n for index, cubit in enumerate(self.faces['Bk']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+45]])", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def drawCube(self):\r\n glBegin(GL_QUADS);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glEnd()", "def GUI_Cube(self,canvas,XYS):\n X,Y,S = XYS\n cUp = [];cFt = [];cDn = [];cBk = [];cRi = [];cLe = []\n cUp_xi=[S + X+S*i for i in range(3)]\n cUp_yi=[Y+S*i for i in range(3)]\n cFt_xi=[S + X+S*i for i in range(3)]\n cFt_yi=[4*S+Y+S*i for i in range(3)]\n cLe_xi=[X+S*i-3*S for i in range(3)]\n cLe_yi=[4*S+Y+S*i for i in range(3)]\n cRi_xi=[X+S*i+5*S for i in range(3)]\n cRi_yi=[4*S+Y+S*i for i in range(3)]\n cDn_xi=[S + X+S*i for i in range(3)]\n cDn_yi=[2*S+2*3*S+Y+S*i for i in range(3)]\n cBk_xi=[X+S*i+9*S for i in range(3)]\n cBk_yi=[4*S+Y+S*i for i in range(3)]\n\n x=0\n for j in range(3):\n for i in range(3):\n cUp.append(canvas.create_rectangle(cUp_xi[i],cUp_yi[j],cUp_xi[i]+S,cUp_yi[j]+S,fill='white',tags = ('Up',x+0)))\n cFt.append(canvas.create_rectangle(cFt_xi[i],cFt_yi[j],cFt_xi[i]+S,cFt_yi[j]+S,fill='green',tags = ('Ft',x+18)))\n cDn.append(canvas.create_rectangle(cDn_xi[i],cDn_yi[j],cDn_xi[i]+S,cDn_yi[j]+S,fill='yellow',tags = ('Dn',x+27))) \n cBk.append(canvas.create_rectangle(cBk_xi[i],cBk_yi[j],cBk_xi[i]+S,cBk_yi[j]+S,fill='blue',tags = ('Bk',x+45)))\n cRi.append(canvas.create_rectangle(cRi_xi[i],cRi_yi[j],cRi_xi[i]+S,cRi_yi[j]+S,fill='red',tags = ('Ri',x+9)))\n cLe.append(canvas.create_rectangle(cLe_xi[i],cLe_yi[j],cLe_xi[i]+S,cLe_yi[j]+S,fill='orange',tags = ('Le',x+36))) \n x+=1\n\n return {'Up':cUp,'Dn':cDn,'Ri':cRi,'Le':cLe,'Ft':cFt,'Bk':cBk}", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw_edges(self):\n pass", "def draw_cube(self, window):\n size = pygame.display.get_surface().get_size()\n width = (size[0]/4)\n\n window.fill((000,000,000))\n\n self.draw_face(\"U\", window, (0 + (width*1), 0 + (width*0)), width)\n self.draw_face(\"L\", window, (0 + (width*0), 0 + (width*1)), width)\n self.draw_face(\"F\", window, (0 + (width*1) * 1, 0 + (width*1)), width)\n self.draw_face(\"R\", window, (0 + (width*2), 0 + (width*1)), width)\n self.draw_face(\"B\", window, (0 + (width*3), 0 + (width*1)), width)\n self.draw_face(\"D\", window, (0 + (width*1), 0 + (width*2)), width)\n\n pygame.display.update()", "def create_graph_on_unit_cube(n_repeaters, radius, draw, seed=2):\r\n np.random.seed = seed\r\n G = nx.random_geometric_graph(n=n_repeaters, radius=radius, dim=2, seed=seed)\r\n for node in G.nodes():\r\n G.nodes[node]['type'] = 'repeater_node'\r\n color_map = ['blue'] * len(G.nodes)\r\n # Create the end nodes\r\n G.add_node(\"C\", pos=[0, 0], type='end_node')\r\n G.add_node(\"B\", pos=[1, 1], type='end_node')\r\n G.add_node(\"A\", pos=[0, 1], type='end_node')\r\n G.add_node(\"D\", pos=[1, 0], type='end_node')\r\n G.nodes[3]['pos'] = [0.953, 0.750]\r\n G.nodes[5]['pos'] = [0.25, 0.50]\r\n # Manually connect the end nodes to the three nearest nodes\r\n G.add_edge(\"C\", 8)\r\n G.add_edge(\"C\", 5)\r\n G.add_edge(\"C\", 2)\r\n G.add_edge(\"B\", 9)\r\n G.add_edge(\"B\", 4)\r\n G.add_edge(\"B\", 3)\r\n G.add_edge(\"A\", 1)\r\n G.add_edge(\"A\", 2)\r\n G.add_edge(\"A\", 9)\r\n G.add_edge(\"D\", 3)\r\n G.add_edge(\"D\", 6)\r\n G.add_edge(\"D\", 7)\r\n color_map.extend(['green'] * 4)\r\n for node in G.nodes():\r\n G.nodes[node]['xcoord'] = G.nodes[node]['pos'][0]\r\n G.nodes[node]['ycoord'] = G.nodes[node]['pos'][1]\r\n # Convert node labels to strings\r\n label_remapping = {key: str(key) for key in G.nodes() if type(key) is not str}\r\n G = nx.relabel_nodes(G, label_remapping)\r\n if draw:\r\n draw_graph(G)\r\n return G", "def Face_Cycle_L(self,event):\n t=event.widget.find_closest(event.x, event.y)[0]\n u=int(self.canvas.itemcget(t,\"tags\").split()[1])\n if u not in [4,13,22,31,40,49]:\n v=self.colours_face[self.ocol[self.canvas.itemcget(t,\"fill\")][0]][0][0]\n self.cubestring[int(u)]=v\n self.cube.cube = self.cubestring#Cube.set(self.cubestring)\n self.map_face()\n else:\n print(\"Cant Change Center Cubit\")", "def set_cube_binning(cls, detx_edges, dety_edges, energy_edges):\n empty_cube_data = np.zeros((len(energy_edges) - 1,\n len(dety_edges) - 1,\n len(detx_edges) - 1))\n\n counts_cube = FOVCube(coordx_edges=detx_edges,\n coordy_edges=dety_edges,\n energy_edges=energy_edges,\n data=Quantity(empty_cube_data, ''), # counts\n scheme='bg_counts_cube')\n\n livetime_cube = FOVCube(coordx_edges=detx_edges,\n coordy_edges=dety_edges,\n energy_edges=energy_edges,\n data=Quantity(empty_cube_data, 'second'),\n scheme='bg_livetime_cube')\n\n background_cube = FOVCube(coordx_edges=detx_edges,\n coordy_edges=dety_edges,\n energy_edges=energy_edges,\n data=Quantity(empty_cube_data, '1 / (s TeV sr)'),\n scheme='bg_cube')\n\n return cls(counts_cube=counts_cube,\n livetime_cube=livetime_cube,\n background_cube=background_cube)", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def cube_vertices(x, y, z, n):\r\n return [\r\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\r\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\r\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\r\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\r\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\r\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\r\n ]", "def make_edges(states,unit_cube):\n S,T = states.shape\n \n max_drifts = states.max(axis = 0)\n \n number_system = np.arange(T)\n \n number_system[T-1] = 1\n for i in range(1,T):\n number_system[T-1-i] = number_system[T-i] * (max_drifts[T-i]+1)\n\n edges = -1*np.ones((S,2**T),dtype = int)\n \n for i,s in enumerate(states):\n for j,c in enumerate(unit_cube):\n if (s - c).min() >= 0:\n edges[i,j] = i - (number_system*c).sum()\n \n return edges", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def Face_Cycle_R(self,event):\n t=event.widget.find_closest(event.x, event.y)[0]\n u=int(self.canvas.itemcget(t,\"tags\").split()[1])\n if u not in [4,13,22,31,40,49]:\n v=self.colours_face[self.ocol[self.canvas.itemcget(t,\"fill\")][1]][0][0]\n self.cubestring[u]=v\n self.cube.cube = self.cubestring#Cube.set(self.cubestring)\n self.map_face()\n else:\n print(\"Cant Change Center Cubit\")", "def draw_edges(img, data_vertex, data_edges):\r\n i = 0\r\n for v1, v2, v3 in data_edges: # get the numbers of string\r\n # # v1, v2, v3 = v1 - 1, v2 - 1, v3 - 1 # change the numbering\r\n # print(v1,v2,v3)\r\n img = draw_line(img, data_vertex, v1, v2)\r\n img = draw_line(img, data_vertex, v1, v3)\r\n img = draw_line(img, data_vertex, v2, v3)\r\n i += 1\r\n # print(i)\r\n return img", "def test_4_1_5D_cube_init(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0),\n (1, 1, 1, 0, 0), (1, 1, 1, 1, 0), (1, 1, 1, 0, 1),\n (1, 1, 0, 1, 0),\n (1, 1, 0, 1, 1), (1, 1, 0, 0, 1), (1, 0, 1, 0, 0),\n (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1),\n (1, 0, 0, 0, 1), (0, 1, 0, 0, 0), (0, 1, 1, 0, 0),\n (0, 1, 1, 1, 0),\n (0, 1, 1, 1, 1), (0, 1, 1, 0, 1), (0, 1, 0, 1, 0),\n (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1),\n (0, 0, 1, 0, 1), (0, 0, 0, 1, 0), (0, 0, 0, 1, 1),\n (0, 0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5, 0.5)]\n\n nn_checks = {(0, 1, 0, 1, 1): [(0, 0, 0, 0, 0), (\n 0.5, 0.5, 0.5, 0.5, 0.5), (0, 0, 0, 1, 1), (1, 1, 0, 1, 1),\n (0, 1, 0, 0, 0),\n (0, 1, 0, 0, 1),\n (0, 1, 0, 1, 0),\n (0, 0, 0, 0, 1),\n (1, 1, 1, 1, 1),\n (0, 1, 1, 1, 1),\n (0, 0, 0, 1, 0)]}\n\n init_triangulation(5, 0, check, nn_checks)" ]
[ "0.6443449", "0.6413569", "0.6355094", "0.63017035", "0.6130413", "0.6115143", "0.60516644", "0.59508514", "0.59508514", "0.59094137", "0.58450186", "0.5810668", "0.5770342", "0.57679313", "0.5727801", "0.5716166", "0.57069796", "0.56948054", "0.5686037", "0.5677185", "0.5661656", "0.5641656", "0.5637532", "0.5609416", "0.5579531", "0.5552842", "0.5552105", "0.55459493", "0.5537661", "0.5478198" ]
0.73245275
0
Validates the .workflow file.
def validate_syntax(self): resolves_present = False uses_present = False if not self.wf.get('workflow', None): pu.fail('A workflow block must be present\n') else: for _, wf_block in dict(self.wf['workflow']).items(): if wf_block.get('resolves', None): resolves_present = True if not resolves_present: pu.fail('[resolves] attribute must be present\n') if not self.wf.get('action', None): pu.fail('Atleast one action block must be present\n') else: for _, a_block in self.wf['action'].items(): if a_block.get('uses', None): uses_present = True if not uses_present: pu.fail('[uses] attribute must be present\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def validate_input_file(self):\r\n return os.path.isfile(self.input_file)", "def Validate(self, relative_file, contents):\n pass", "def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")", "def validate():", "def _validate_yaml(self):\n\n # verify the format is correct\n if self.validater == 'yamale':\n\n import yamale\n\n print('Validating yaml file with yamale.')\n cwd = Path(os.path.dirname(__file__))\n schema_path = str(cwd.parent / 'schema') + '/generic_schema.yaml'\n schema = yamale.make_schema(schema_path)\n data = yamale.make_data(self.yaml_path)\n try:\n yamale.validate(schema, data, strict=False)\n print('Validation success! 👍')\n return True\n except ValueError as e:\n print(\n 'Yamale found that your file, '\n + self.yaml_path\n + ' is not formatted correctly.'\n )\n print(e)\n return False\n else:\n print('Did not validate yaml.')\n print('If unexpected results occur, try installing yamale and rerun.')\n return True", "def validate(self):\n if os.path.exists(self.filename):\n with NWBHDF5IO(self.filename, mode='r') as io:\n errors = pynwb_validate(io)\n if errors:\n for err in errors:\n raise Exception(err)", "def validate_workflow_path(self, workflow_path):\n workflow_path = Path(workflow_path)\n\n if not workflow_path.exists():\n raise FileNotFoundError(\n f\"workflow path, {self.workflow_path}, does not exist\"\n )\n if not (workflow_path / \"testkraken_spec.yml\").is_file():\n raise FileNotFoundError(\n f\"Missing required file with specification: \"\n f\"{self.workflow_path / 'testkraken_spec.yml'}\"\n )\n return workflow_path", "def is_valid_file(self, file_path):\n return True", "def validate_document(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"]: \n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(assignment|problem|year|title|name|blurb|due))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tMake sure the tags you are using are correct.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&amp;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed at all.\".format(settings.filename))\n print color(\"\\tAre you sure all tags are closed?\", color_code(YELLOW))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n try:\n document = Document(settings.filename)\n document.parse_tree(tree)\n document.validate()\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n for i, version in enumerate(document.versions):\n print color(\"\\n\\nProblem {}: {}\\n\".format(i+1, version.filename),\n color_code(BLUE))\n validate_version(version, failed)", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validate(file_in) :\n\tname = str(file_in.name)\n\tif name[-4:] != \".xml\" and name[-4:] != \".XML\" :\n\t\treturn False\n\txsd = open('wcdb/WorldCrises.xsd.xml', 'r')\n\txmlFile = open('wcdb/temp.xml', 'w')\n\txmlFile.write(file_in.read())\n\txmlFile = open('wcdb/temp.xml', 'r')\n\ttry:\n\t\tpsvi = pyxsval.parseAndValidate(\"wcdb/temp.xml\",\n\t\t\t\"wcdb/WorldCrises.xsd.xml\", xmlIfClass=pyxsval.XMLIF_ELEMENTTREE)\n\t\ttree = psvi.getTree()\n\texcept pyxsval.XsvalError, e:\n\t\treturn 'Validation aborted. ' + str(e)\n\texcept GenXmlIfError, e:\n\t\treturn 'Parsing aborted. ' + str(e)\n\texcept Exception as e:\n\t\t# catch all\n\t\treturn 'Exception. ' + str(e)\n\t#handle invalid case\n\treturn tree", "def validate(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"] and not(settings.skip_permissions):\n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(problem|usedin|version|authors?|year|topics?|types?|param|deps?|dependency|dependencies|body|solution|rubric|resource))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n if len(string.rstrip(line)) > 80:\n print_warning(\"Line {} longer than 80 characters (has {})\".format(num+1, len(string.rstrip(line))))\n failed = True\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tA literal < can be escaped using \\\"&lt;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&amp;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed.\".format(settings.filename))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n if tree.getroot().tag == 'assignment':\n print_error(\"This looks like an assignment xml file. Did you mean 22edit validate_doc?\")\n exit(1)\n try:\n problem = Problem(settings.filename)\n problem.parse_tree(tree, False)\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n firstProblem = True\n for version in problem.get_versions():\n if not version.standalone and not firstProblem:\n continue\n firstProblem = False\n \n print color(\"\\n\\nVERSION {}:\\n\".format(version.vid),\n color_code(BLUE))\n validate_version(version, failed)", "def validate_settings(self):\n\t\t# Check all attributes exist\n\t\tfor key, value in vars(self).items():\n\t\t if hasattr(self, key) == False:\n\t\t\t\tUtility.report_error(1, '%s: Missing attribute \"%s\"' % (self._file_path, key))\n\n\t\t# Check mandatory attributes\n\t\tif self.is_valid_status(self.status) == False:\n\t\t\tUtility.report_error(1, '%s: Status \"%s\" is not valid' % (self._file_path, self.status))\n\n\t\tif self.definition == '' or self.definition == None:\n\t\t\tUtility.report_error(1, '%s: Definition field is empty or missing' % (self._file_path))\n\t\t\n\t\tif self.term == '' or self.term == None:\n\t\t\tUtility.report_error(1, '%s: Term field is empty or missing' % (self._file_path))\n\n\t\t# If status is neither approved or elaboration reject reason must be stated\n\t\tif (self.status == 'rejected' or self.status == 'replaced') and (self.status_reason == '' or self.status_reason == None):\n\t\t\tUtility.report_error(1, '%s: \"Status reason\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is rejected a rejected by user must be specified\n\t\tif self.status == 'rejected' and (self.rejected_by == '' or self.rejected_by == None):\n\t\t\tUtility.report_error(1, '%s: \"Rejected by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is replaced then Replaced by must be specified\n\t\tif self.status == 'replaced' and (self.replaced_by == None or self.replaced == ''):\n\t\t\tUtility.report_error(1, '%s: \"Replaced by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\tself.created_by = self.make_link_list('stakeholders', 'Created by', self.created_by, False)\n\t\tself.rejected_by = self.make_link_list('stakeholders', 'Rejected by', self.rejected_by, False)\n\t\tself.replaced_by = self.make_link_list('glossary', 'Replaced by', self.replaced_by)\n\n\t\tif self.is_string_date(self.created_on) == False:\n\t\t\tUtility.report_error(1, '%s: Created on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.created_on))\n\n\t\tif self.is_string_date(self.rejected_on) == False:\n\t\t\tUtility.report_error(1, '%s: Rejected on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.rejected_on))", "def check_wf(self,wf):\n pass", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def is_file_valid(self):\n # verifying that the other tests are even necessary\n if not self.validate_file_release_notes_exists():\n return False\n\n validations = [\n self.is_release_notes_changed(),\n self.is_valid_release_notes_structure(),\n ]\n\n return all(validations)", "def _validate_document(self, config):\n if 'gathering_phase' in self.config:\n self._validate_gathering_phase(self.config['gathering_phase'])\n else:\n comm.abort('ERROR: invalid config file',\n 'The required gathering_phase was not in the config', 1)\n\n if 'inclusion_phase' in self.config:\n self._validate_inclusion_phase(self.config['inclusion_phase'])\n\n if 'action_phase' in self.config:\n self._validate_action_phase(self.config['action_phase'])\n else:\n comm.abort('ERROR: invalid config file',\n 'The required action_phase was not in the config', 1)", "def validate(self):\n with open(os.path.join(settings.MEDIA_ROOT, self.file.name)) as file:\n lines = file.readlines()\n validators = ['os.', 'from os', 'io.', 'from io', 'open(', 'system(']\n for line in lines:\n for validator in validators:\n if validator in line:\n return False\n return True", "def _validate(self):\n pass", "def isValid(self):\n return self.file_name != \"\" and self.line_number != 0", "def test_is_valid_invalid_resume(self):\n self.assertFalse(resumeschema.is_valid(self.invalid_resume))", "def validate(ctx):\n handler = ValidateCommandHandler(ctx.obj['qa_dir'])\n exit(0 if handler.validate() else 1)", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def _validate(self):\n\n if not self.definition_func or not callable(self.definition_func):\n raise RadishError(\"The step '{0}' does not have a step definition\".format(self.sentence))", "def __validate():\n # TODO: implement", "def test_is_valid_valid_resume(self):\n self.assertTrue(resumeschema.is_valid(self.valid_resume))", "def validate(shapefile, path):\n if shapefile is None:\n shapefile = os.path.join(\n os.path.dirname(__file__), \"tests\", \"reproschema-shacl.ttl\"\n )\n if os.path.isdir(path):\n conforms = validate_dir(path, shapefile)\n else:\n data = load_file(path, started=False)\n conforms, vtext = validate_data(data, shapefile)\n if not conforms:\n lgr.critical(f\"File {path} has validation errors.\")\n raise ValueError(vtext)\n lgr.info(f\"{path} conforms.\")\n return conforms", "def _Validate(self):\n\n \n if self.cmsGenNode.applicationControls.get(\"generator\", None) == None:\n msg = \"No cmsGen generator option provided\"\n raise RuntimeError, msg\n \n return WorkflowMaker._Validate(self)" ]
[ "0.6631906", "0.6614255", "0.6606154", "0.6537318", "0.63448745", "0.62795186", "0.62735194", "0.6186111", "0.61825204", "0.6171616", "0.61469364", "0.61425644", "0.61318344", "0.6077196", "0.6075853", "0.6061646", "0.60611504", "0.6057122", "0.60558885", "0.6055866", "0.6049478", "0.604594", "0.5995211", "0.5978283", "0.5959923", "0.59549236", "0.5946873", "0.59457165", "0.593377", "0.59320325" ]
0.67150223
0
normalize the dictionary representation of the workflow
def normalize(self): # modify from this: # # "workflow": { # "test-and-deploy": { # "resolves": "deploy" # } # } # # to this: # # "workflow": { # "name": "test-and-deploy", # "on": "push", # "resolves": "deploy" # } for wf_name, wf_block in dict(self.wf['workflow']).items(): self.wf['name'] = wf_name self.wf['on'] = wf_block.get('on', 'push') self.wf['resolves'] = wf_block['resolves'] # python 2 to 3 compatibility try: basestring except UnboundLocalError: basestring = str # create a list for all attributes that can be either string or list if isinstance(self.wf['resolves'], basestring): self.wf['resolves'] = [self.wf['resolves']] elif not self.is_list_of_strings(self.wf['resolves']): pu.fail('[resolves] must be a list of strings or a string\n') if not isinstance(self.wf['on'], basestring): pu.fail('[on] attribute must be a string\n') for _, a_block in self.wf['action'].items(): if not isinstance(a_block['uses'], basestring): pu.fail('[uses] attribute must be a string\n') if a_block.get('needs', None): if isinstance(a_block['needs'], basestring): a_block['needs'] = [a_block['needs']] elif not self.is_list_of_strings(a_block['needs']): pu.fail( '[needs] attribute must be a list of strings \ or a string\n') if a_block.get('runs', None): if isinstance(a_block['runs'], basestring): a_block['runs'] = [a_block['runs']] elif not self.is_list_of_strings(a_block['runs']): pu.fail( '[runs] attribute must be a list of strings \ or a string\n') if a_block.get('args', None): if isinstance(a_block['args'], basestring): a_block['args'] = a_block['args'].split() elif not self.is_list_of_strings(a_block['args']): pu.fail( '[args] attribute must be a list of strings \ or a string\n') if a_block.get('env', None): if not isinstance(a_block['env'], dict): pu.fail('[env] attribute must be a dict\n') if a_block.get('secrets', None): if not self.is_list_of_strings(a_block['secrets']): pu.fail('[secrets] attribute must be a list of strings\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise_workflow(workflow_dict):\n normalise_process(workflow_dict)\n if not 'steps' in workflow_dict:\n exit_perm_fail(\"No steps in Workflow\")\n\n if isinstance(workflow_dict['steps'], dict):\n new_steps = []\n for step_id, step in workflow_dict['steps'].items():\n step['id'] = step_id\n new_steps.append(step)\n workflow_dict['steps'] = new_steps\n\n for step in workflow_dict['steps']:\n if 'in' in step:\n if isinstance(step['in'], dict):\n new_in = []\n for key, value in step['in'].items():\n if isinstance(value, str):\n new_in.append({'id': key, 'source': value})\n elif isinstance(value, dict):\n value['id'] = key\n new_in.append(value)\n step['in'] = new_in\n\n if 'out' in step:\n if not isinstance(step['out'], list):\n exit_perm_fail(\"The out attribute of a workflow step must be an array\")\n for i, output in enumerate(step['out']):\n if isinstance(output, str):\n step['out'][i] = {'id': output}", "def normalize(self):\n\n pass", "def _normalize(self, dictionnary):\r\n copy_dict = OrderedDict()\r\n for k,v in dictionnary.items():\r\n if isinstance(v, OrderedDict):\r\n copy_dict[k.replace('#','').replace('@','')] = self._normalize(v)\r\n else:\r\n copy_dict[k.replace('#','').replace('@','')] = v\r\n return copy_dict", "def load_from_dict(input_dict):\n view_steps = [WorkflowViewStepInformation.load_from_dict(data) \n for data in input_dict[WorkflowInformation.KEY_VIEW_STEPS]]\n wf_steps = [WorkflowStepInformation.load_from_dict(data)\n for data in input_dict[WorkflowInformation.KEY_WF_STEPS]]\n input_dict[WorkflowInformation.KEY_VIEW_STEPS] = view_steps\n input_dict[WorkflowInformation.KEY_WF_STEPS] = wf_steps\n return WorkflowInformation(input_dict)", "def fix_big_data_passing(workflow: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]: # Tekton change signature\n return workflow", "def normalise_process(process_desc):\n if not 'inputs' in process_desc:\n exit_validation(\"Error: no inputs defined for Process\")\n\n if isinstance(process_desc['inputs'], dict):\n process_desc['inputs'] = normalise_parameter(process_desc['inputs'])\n\n if not 'outputs' in process_desc:\n exit_validation(\"Error: no outputs defined for Process\")\n\n if isinstance(process_desc['outputs'], dict):\n process_desc['outputs'] = normalise_parameter(process_desc['outputs'])", "def _reconstruct_workflow(workflow_record, hints, requirements, inputs, outputs):\n rec = workflow_record[\"w\"]\n return Workflow(name=rec[\"name\"], hints=hints, requirements=requirements, inputs=inputs,\n outputs=outputs, workflow_id=rec[\"id\"])", "def _from_dict_transform(cls: Type[TVerifiedElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'verified' in data:\n data['is_verified'] = data.pop('verified')\n\n if 'verification_code' in data:\n del data['verification_code']\n\n return data", "def process_dict(self, dictionary):\n return self._flatten(dictionary)", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def _preprocess(self, feature_dict):\n return feature_dict", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}", "def _unstructure(self) -> dict[str, Any]:\n return {\n \"class\": \"Experiment\",\n \"components\": {\n k: v._unstructure(experiment=self) for k, v in self.components.items()\n },\n }", "def extract_workflow_data(workflow):\n workflow_data = {}\n workflow_data[\"id\"] = workflow.id\n workflow_data['name'] = workflow.name\n workflow_data['created_at'] = workflow.created_at\n workflow_data['updated_at'] = workflow.updated_at\n workflow_data[\"state\"] = workflow.state\n return workflow_data", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def _to_dict_transform(self, data: Dict[str, Any]) -> Dict[str, Any]:\n if 'is_verified' in data:\n data['verified'] = data.pop('is_verified')\n\n data = super()._to_dict_transform(data)\n\n return data", "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "def transform(attrs: dict) -> dict:\n\n pass", "def _from_dict_transform(cls: Type[TElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n if 'application' in data:\n data['created_by'] = data.pop('application')\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'created_ts' not in data:\n # some really old nin entries in the database have neither created_ts nor modified_ts\n data['_no_created_ts_in_db'] = True\n data['created_ts'] = datetime.fromisoformat('1900-01-01')\n\n if 'modified_ts' not in data:\n data['_no_modified_ts_in_db'] = True\n # Use created_ts as modified_ts if no explicit modified_ts was found\n data['modified_ts'] = data['created_ts']\n\n return data", "def preprocess_attributes(business):\n\tattrs = business[ATTRIBUTES]\n\tattrs_dict = dict()\n\n\tfor (key, val) in attrs.items():\n\t\tval = ast.literal_eval(val)\n\t\tif val is None:\n\t\t\tcontinue\n\n\t\t# if val is a json object\n\t\tif isinstance(val, dict):\n\t\t\tinner_dict = dict()\n\t\t\tfor (inner_key, inner_val) in val.items():\n\t\t\t\tinner_key = inner_key\n\t\t\t\tinner_val = inner_val\n\t\t\t\tinner_dict[inner_key] = inner_val\n\t\t\tattrs_dict[key] = inner_dict\n\t\telse:\n\t\t\tattrs_dict[key] = val\n\tbusiness[ATTRIBUTES] = attrs_dict", "def transform(self, source: dict) -> dict:\n pass", "def _reconstruct_metadata(metadata_record):\n rec = metadata_record[\"m\"]\n return {key: val for key, val in rec.items() if key != \"state\"}", "def process_state_dict_to_server(self) -> dict:\n clean_state_dict = self.cleanup_state_dict_to_server()\n\n if self.is_sparse:\n for key, param in clean_state_dict.items():\n if param.is_sparse:\n clean_state_dict[key] = param._values()\n\n if self.is_adj_round:\n clean_state_dict.update(self.dict_extra_sgrad)\n self.dict_extra_sgrad = dict()\n\n return clean_state_dict", "def cleanStep(idict):\n for step in ['input', 'output']:\n data = idict.get(step, {})\n for key, values in data.items():\n for elem in values:\n for skip in ['pfn', 'InputPFN', 'OutputPFN', 'inputpfns']:\n if skip in elem:\n del elem[skip]\n data[key] = values\n return idict", "def convert_state_dict(state_dict):\n\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n state_dict[name] = v\n del state_dict[k]\n return state_dict", "def normalize_state_space(a2_data):\n aux_dic = OrderedDict()\n for key, vec in a2_data.iteritems():\n _, aux_dic[key] = normalize(vec)\n return aux_dic", "def update_workflow_from_dict(\n self,\n workflow_dict,\n workflow_id=None,\n validate=True\n ):\n valid_def = {}\n if validate:\n valid_def = Definition.validate_workflow(workflow_dict)\n if valid_def is False:\n Log.an().error(\n 'invalid workflow:\\n%s', yaml.dump(workflow_dict)\n )\n return False\n\n else:\n valid_def = workflow_dict\n\n # insert workflow_id into dict if provided\n if workflow_id:\n valid_def['workflow_id'] = workflow_id\n\n # make sure steps of workflow are valid, update app IDs\n if not self.synchronize_workflow_with_db(valid_def):\n Log.an().error(\n 'cannot synchronize workflow with data source: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # update workflow record\n if not self.update_workflow(\n valid_def['workflow_id'],\n {\n 'name': valid_def['name'],\n 'description': valid_def['description'],\n 'username': valid_def['username'],\n 'git': valid_def['git'],\n 'inputs': json.dumps(valid_def['inputs']),\n 'parameters': json.dumps(valid_def['parameters']),\n 'final_output': json.dumps(valid_def['final_output']),\n 'apps': json.dumps(valid_def['apps']),\n 'public': valid_def['public'],\n 'enable': valid_def['enable'],\n 'test': valid_def['test'],\n 'version': valid_def['version']\n }\n ):\n Log.an().error(\n 'cannot update workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n # update steps, create map of steps\n step_name2id = self.update_workflow_steps_from_dict(valid_def)\n if not step_name2id:\n Log.an().error(\n 'cannot update workflow steps: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # delete dependencies\n if not self.delete_depend_by_workflow_id(valid_def['workflow_id']):\n Log.an().error(\n 'cannot delete step dependencies for workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n # insert dependency records\n if not self.import_step_depends_from_dict(valid_def, step_name2id):\n Log.an().error(\n 'cannot import step dependencies for workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n return True", "def normalize_entity(in_dict):\n out_dict = in_dict.copy()\n if 'pk' in list(in_dict):\n out_dict['id'] = in_dict['pk']\n del out_dict['pk']\n if 'start_time' in list(in_dict):\n out_dict['start_time'] = \\\n datetime.strptime(in_dict['start_time'], '%Y-%m-%dT%H:%M:%S.%fZ') \\\n if out_dict['start_time'] else None\n if 'end_time' in list(in_dict):\n out_dict['end_time'] = \\\n datetime.strptime(in_dict['end_time'], '%Y-%m-%dT%H:%M:%S.%fZ') \\\n if out_dict['end_time'] else None\n if 'created_at' in list(in_dict):\n out_dict['created_at'] = datetime.strptime(in_dict['created_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')\n if 'updated_at' in list(in_dict):\n out_dict['updated_at'] = datetime.strptime(in_dict['updated_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')\n return out_dict", "def _to_dict_transform(self, data: Dict[str, Any]) -> Dict[str, Any]:\n if 'is_primary' in data:\n data['primary'] = data.pop('is_primary')\n\n data = super()._to_dict_transform(data)\n\n return data" ]
[ "0.7474329", "0.5918155", "0.5898275", "0.58240426", "0.5808958", "0.57993853", "0.57069623", "0.5687853", "0.5675693", "0.559138", "0.5580299", "0.5571338", "0.55547684", "0.5554434", "0.55311835", "0.5512876", "0.55118704", "0.549156", "0.5486343", "0.54750293", "0.54630303", "0.54498094", "0.5434459", "0.54282606", "0.54215235", "0.5421518", "0.54087555", "0.5392373", "0.5390943", "0.5387585" ]
0.6584143
1
A GHA workflow is defined by specifying edges that point to the previous nodes they depend on. To make the workflow easier to process, we add forward edges. We also obtains the root nodes.
def complete_graph(self): root_nodes = set() for name, a_block in self.wf['action'].items(): a_block['name'] = name for n in a_block.get('needs', []): if not self.wf['action'][n].get('next', None): self.wf['action'][n]['next'] = set() self.wf['action'][n]['next'].add(name) if not a_block.get('needs', None): root_nodes.add(name) self.wf['root'] = root_nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward_graph(self):\n raise NotImplementedError", "def get_forward_init(node, graph):\n\tedges = []\n\tfor e in node.edges:\n\t\tif node.label <= graph.nodes[e.to].label:\n\t\t\tedges.append(e)\n\treturn edges", "def _bfs_forward(self, start_node):\n visited = {node: (False) for node in self.layer_names}\n queue = [start_node]\n visited[start_node] = True\n while queue:\n node = queue.pop(0)\n if node != start_node:\n input_nodes = self.g.predecessors(node)\n if logging.getLogger().level == logging.DEBUG:\n l = copy(input_nodes)\n None\n cur_layer = getattr(self, node)\n output_pre_layers = []\n output_complete = True\n for n in input_nodes:\n if n not in self.outputs.keys():\n if logging.getLogger().level == logging.DEBUG:\n None\n output_complete = False\n break\n else:\n output_pre_layers.append(self.outputs[n])\n if not output_complete:\n if logging.getLogger().level == logging.DEBUG:\n None\n queue.append(node)\n else:\n cur_output = cur_layer(*output_pre_layers)\n self.outputs[node] = cur_output\n for i in self.g.successors(node):\n if visited[i] == False:\n queue.append(i)\n visited[i] = True\n losses, loss_weights = self._get_losses()\n return [self.outputs[t] for t in self.output_tasks], losses, loss_weights", "def flow_model():\n return {\n 'nodes': [\n {\n 'id': 'left_root',\n 'name': 'left_root',\n 'value': 3.5,\n 'rank': 'l',\n },\n {\n 'id': 'right_root',\n 'name': 'right_root',\n 'value': 3.5,\n 'rank': 'r',\n },\n ], 'edges': [\n {\n 'source': 'left_root',\n 'target': 'right_root',\n 'value': 1.0,\n },\n ]\n }", "def _dfs_cycle_forest(G, root=None):\n # Create a directed graph from the depth-first search tree with\n # root node `root` in which tree edges are directed toward the\n # root and nontree edges are directed away from the root. For\n # each node with an incident nontree edge, this creates a\n # directed cycle starting with the nontree edge and returning to\n # that node.\n #\n # The `parent` node attribute stores the parent of each node in\n # the DFS tree. The `nontree` edge attribute indicates whether\n # the edge is a tree edge or a nontree edge.\n #\n # We also store the order of the nodes found in the depth-first\n # search in the `nodes` list.\n H = nx.DiGraph()\n nodes = []\n for u, v, d in nx.dfs_labeled_edges(G, source=root):\n if d == 'forward':\n # `dfs_labeled_edges()` yields (root, root, 'forward')\n # if it is beginning the search on a new connected\n # component.\n if u == v:\n H.add_node(v, parent=None)\n nodes.append(v)\n else:\n H.add_node(v, parent=u)\n H.add_edge(v, u, nontree=False)\n nodes.append(v)\n # `dfs_labeled_edges` considers nontree edges in both\n # orientations, so we need to not add the edge if it its\n # other orientation has been added.\n elif d == 'nontree' and v not in H[u]:\n H.add_edge(v, u, nontree=True)\n else:\n # Do nothing on 'reverse' edges; we only care about\n # forward and nontree edges.\n pass\n return H, nodes", "def pipeline_dependencies_tasks(g):\n deps = dict()\n for step_name in nx.topological_sort(g):\n deps[step_name] = list(g.predecessors(step_name)) # copy list\n return deps", "def processLoadedLinkNodes(self, isLoadingModel):\r\n \r\n if(isLoadingModel):\r\n #=====================================================\r\n # Hierarchical structure maintenance (See HierarchicalASGNode.py)\r\n #=====================================================\r\n for node in self.newLinkNodeQueue:\r\n # NOTE: node.in_connections_ could be > 1 in the metamodel since \r\n # AToM3 considers meta-model relations to have hierarchy active!\r\n # Of course I didn't intend this, and it has no meaning, so ignore!\r\n if(node.isHierarchicalLink() and len(node.in_connections_) == 1):\r\n for child in node.out_connections_:\r\n child._setHierParent(node.in_connections_[0])\r\n node.in_connections_[0]._addHierChildrenList(node.out_connections_) \r\n \r\n\r\n #=======================================================================\r\n # QOCA Constraints\r\n # Only do this if we are actually using QOCA in the first place...\r\n #=======================================================================\r\n if(isNotUsingQoca()): \r\n return\r\n \r\n for node in self.newLinkNodeQueue:\r\n # Apply any QOCA linear constraints\r\n # For graph grammars, graphObject_ may be none, so GGrule.py will also\r\n # trigger QOCA in its replaceSides() method\r\n if(hasattr(node, 'QOCA') and not node.__dict__.has_key('QOCA')):\r\n # Make sure that node has a method called 'QOCA', not an attribute\r\n node.QOCA(None)\r\n \r\n # Clean out the queue\r\n self.newLinkNodeQueue = []", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def _build_graph(self):\n pass", "def build_graph(self):\n pass", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def preprocessing_steiner(self, extension):\n G = nx.Graph(self.optimization_graph)\n terminal_nodes = [node for node, data in G.nodes(data=True)\n if data.get(config.NODE_TYPE_KEY, None) == config.BUILDING_NODE_TYPE]\n link = {}\n if not extension:\n return G, terminal_nodes, link\n old_Graph = self.old_network_graph\n H = nx.Graph(G.subgraph([n for n in G.nodes if n not in old_Graph.nodes]))\n H_connected_components = list(nx.connected_components(H))\n old_junctions = [n for n, d in old_Graph.nodes(data=True) if d['nodetype'] == 'junction']\n # Remove the old buildings from the terminal nodes list\n for node in [n for n in terminal_nodes if n in old_Graph.nodes]:\n terminal_nodes.remove(node)\n # Building the Graph on which we will use the heuristic\n for node in old_junctions:\n neighbors = [n for n in G.neighbors(node) if n in H.nodes]\n for cc in H_connected_components:\n sub_neighbors = [n for n in neighbors if n in cc]\n if len(sub_neighbors) == 0:\n continue\n dist, closest_neighbor = min([[G.edges[node, n]['cost'], n] for n in sub_neighbors], key=lambda t: t[0])\n if closest_neighbor not in link:\n link[closest_neighbor] = [node, dist]\n continue\n if dist < link[closest_neighbor][1]:\n link[closest_neighbor] = [node, dist]\n # Add a node corresponding to the old Graph and connected with the selected neighbors\n terminal_nodes.append('OldNetworkNode')\n for n in link:\n H.add_edge('OldNetworkNode', n, cost=link[n][1])\n G = H.copy()\n return G, terminal_nodes, link", "def forward_features(self, x, flows_backward, flows_forward):\n x1 = self.stage1(x, flows_backward[0::4], flows_forward[0::4])\n x2 = self.stage2(x1, flows_backward[1::4], flows_forward[1::4])\n x3 = self.stage3(x2, flows_backward[2::4], flows_forward[2::4])\n x4 = self.stage4(x3, flows_backward[3::4], flows_forward[3::4])\n x = self.stage5(x4, flows_backward[2::4], flows_forward[2::4])\n x = self.stage6(x + x3, flows_backward[1::4], flows_forward[1::4])\n x = self.stage7(x + x2, flows_backward[0::4], flows_forward[0::4])\n x = x + x1\n for layer in self.stage8:\n x = layer(x)\n x = rearrange(x, 'n c d h w -> n d h w c')\n x = self.norm(x)\n x = rearrange(x, 'n d h w c -> n c d h w')\n return x", "def graph(self):\n ...", "def _build_forward_graph(self):\n\n print('[*] Building a Neural Turing Machine.')\n\n self._initalize_state()\n\n # present start token\n controller_out = self.controller.emit_feature_vector(self.start_token, self.r_t[0], reuse=None)\n self._read_write(controller_out, reuse=None)\n\n # present inputs\n print('Input chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.inputs[t], self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present end token\n controller_out = self.controller.emit_feature_vector(self.end_token, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present outputs\n print('Output chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.zeros, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n reuse = None if t == 0 else True\n self.outputs.append(self._decode_read_vector(self.r_t[-1], reuse=reuse))\n print('Done.')", "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def forward(self, t_local, z, backwards = False):\n self.nfe += 1\n\n\n node_attributes = z[:self.K_N,:]\n edge_attributes = z[self.K_N:,:]\n assert (not torch.isnan(node_attributes).any())\n assert (not torch.isnan(edge_attributes).any())\n\n #grad_edge, edge_value = self.edge_ode_func_net(node_attributes,self.num_atom) # [K*N*N,D],[K,N*N], edge value are non-negative by using relu.\n grad_edge, edge_value = self.edge_ode_func_net(node_attributes,edge_attributes,self.num_atom) # [K*N*N,D],[K,N*N], edge value are non-negative by using relu.todo:with self-evolution\n edge_value = self.normalize_graph(edge_value,self.K_N)\n assert (not torch.isnan(edge_value).any())\n grad_node = self.node_ode_func_net(node_attributes,edge_value,self.node_z0) # [K*N,D]\n assert (not torch.isnan(grad_node).any())\n assert (not torch.isinf(grad_edge).any())\n\n assert (not torch.isnan(grad_node).any())\n assert (not torch.isinf(grad_edge).any())\n\n # Concat two grad\n grad = self.dropout(torch.cat([grad_node,grad_edge],0)) # [K*N + K*N*N, D]\n\n\n return grad", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def populate_graph(self):", "def build_dependency_graph(self, transforms):\n try:\n self.final_transforms = tuple(transforms)\n except TypeError:\n self.final_transforms = tuple([transforms])\n self.graph = igraph.Graph(directed=True)\n self.base_transforms = []\n # Recursively adds dependencies required to generate each feature\n for transform in self.final_transforms:\n self.__build_dependency_graph(transform)\n # The identity transform is where raw data is input initially\n # We add a self-loop so that it outputs its own input\n identity = _IdentityTransform()\n self.graph.add_vertex(label=identity.get_name(), transform=identity, data=None, color=\"yellow\")\n self.root = self.get(identity)\n # Connects the identity transform to those that process raw data\n for base_transform in self.base_transforms:\n self.graph.add_edge(self.root, self.get(base_transform))", "def part1(input_lines):\n # This is a DAG problem. We need to form a dependency graph.\n tower = get_tower(input_lines)\n return find_root(tower)", "def gen_graph(self):", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def get_graph(self, with_fix=False):\n nodes = self.get_tasks()\n if with_fix:\n for n in nodes:\n n.fix_arguments()\n deps = self.get_dependencies()\n graph = nx.DiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from( [d.edge() for d in deps] )\n return graph", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g", "def make_euler_circuit(start_node, updated_graph_instance):\n\n current_edges_on_graph_list = make_edges_list(updated_graph_instance.edges_dict)\n\n current_node = start_node\n\n node_visit_order = [current_node]\n edge_visit_order = []\n\n # print(\"\\n\\n\\ncurrent_edges_on_graph_list:\", current_edges_on_graph_list)\n\n while len(current_edges_on_graph_list) > 0:\n\n # print(\"current_edges_on_graph_list:\", current_edges_on_graph_list)\n # while there are still edges on the graph, keep traversing\n\n current_bridges_on_graph = get_bridges(current_edges_on_graph_list)\n\n edges_conn_to_current_node = get_all_conn_edges_remaining_in_graph(\n current_node, current_edges_on_graph_list, updated_graph_instance.nodes_dict\n )\n\n edge_to_traverse = choose_edge_to_traverse(\n current_node, edges_conn_to_current_node, current_bridges_on_graph\n )\n\n if edge_to_traverse in current_edges_on_graph_list:\n\n current_edges_on_graph_list.remove(edge_to_traverse)\n\n else:\n\n current_edges_on_graph_list.remove(edge_to_traverse[::-1])\n\n edge_to_traverse_list = list(edge_to_traverse)\n # remove current node from edge to traverse\n edge_to_traverse_list.remove(current_node)\n # update current node to be the only node left in the edge list\n\n # update edge traveral list with edge just traversed\n edge_traversed = (current_node, edge_to_traverse_list[0])\n\n edge_visit_order.append(edge_traversed)\n\n current_node = edge_to_traverse_list[0]\n\n # add the new current node to the nodes visit order list\n node_visit_order.append(current_node)\n\n # add node visit order and edge_visit order to graph instance\n\n updated_graph_instance.node_visit_order = node_visit_order\n\n updated_graph_instance.edge_visit_order = edge_visit_order\n\n updated_graph_instance.node_geojson = make_node_geojson(updated_graph_instance)\n\n updated_graph_instance.edge_geojson = make_edge_geojson(updated_graph_instance)\n\n updated_graph_instance.route_geojson = make_route_geojson(updated_graph_instance)\n\n print(\"\\n\\n\\n\\n\\nROUTE COLLECTION\", updated_graph_instance.route_geojson)\n\n print(\"check done\")\n\n return updated_graph_instance", "def get_graph(nodes, edges, deps, sec, containers):\n root = RootResource()\n nodes.append(root)\n # connect accessible nodes to the web\n for node in nodes:\n if node.get_original_type() is not None and \\\n node.get_original_type().is_accessible():\n root.add_child(node)\n\n # Add children for each edge we collected before\n for edge in edges:\n parents = get_resource(nodes, edge['from'])\n children = get_resource(nodes, edge['to'])\n for node in parents:\n for child in children:\n node.add_child(child)\n\n # Same with dependencies\n for dep in deps:\n parents = get_resource(nodes, dep['from'])\n children = get_resource(nodes, dep['to'])\n for node in parents:\n for child in children:\n if node.get_origin() == TARGET and child.get_origin() == TARGET:\n node.add_dependency(child)\n\n # Ensure containers only has Resource as keys and in values by creating more\n # entries if necessary.\n expanded_containers = {}\n for container in containers:\n for c in get_resource(nodes, container):\n if not c in expanded_containers:\n expanded_containers[c] = []\n for x in containers[container]:\n expanded_containers[c].extend(get_resource(nodes, x))\n\n # Same with sec\n expanded_sec = {}\n for security in sec:\n for s in get_resource(nodes, security):\n if not s in expanded_sec:\n expanded_sec[s] = {'from': [], 'to': []}\n for x in sec[security]['from']:\n expanded_sec[s]['from'].extend(get_resource(nodes, x))\n for x in sec[security]['to']:\n expanded_sec[s]['to'].extend(get_resource(nodes, x))\n\n nodes = move_security_nodes(nodes, expanded_sec, expanded_containers)\n nodes = move_container_nodes(nodes, expanded_containers)\n return (nodes, root)", "def build_graph(self):\n raise NotImplementedError" ]
[ "0.68631876", "0.59310347", "0.5865252", "0.57889354", "0.57486254", "0.57264847", "0.56896424", "0.5673367", "0.5652373", "0.5643193", "0.5587451", "0.55842507", "0.55828464", "0.5574014", "0.5573729", "0.55621177", "0.55612457", "0.5532347", "0.5529374", "0.55181307", "0.5502875", "0.54963475", "0.54745495", "0.54613245", "0.54526216", "0.54526216", "0.5449259", "0.54331475", "0.54328114", "0.5425222" ]
0.6231825
1
Factory of ActionRunner instances, one for each action
def instantiate_runners(self): for _, a in self.wf['action'].items(): if 'docker://' in a['uses']: a['runner'] = DockerRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) continue if 'shub://' in a['uses']: a['runner'] = SingularityRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) continue if './' in a['uses']: if os.path.exists(os.path.join(a['uses'], 'Dockerfile')): a['runner'] = DockerRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) elif os.path.exists(os.path.join(a['uses'], 'singularity.def')): a['runner'] = SingularityRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) else: a['runner'] = HostRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) continue dockerfile_path = os.path.join(a['repo_dir'], a['action_dir'], 'Dockerfile') singularityfile_path = os.path.join(a['repo_dir'], a['action_dir'], 'singularity.def') if os.path.exists(dockerfile_path): a['runner'] = DockerRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) elif os.path.exists(singularityfile_path): a['runner'] = SingularityRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run) else: a['runner'] = HostRunner( a, self.workspace, self.env, self.quiet, self.debug, self.dry_run)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_clients():\n clients = {}\n\n rospy.loginfo(\"Waiting for rubble detector\")\n clients['rubble_detect'] = actionlib.SimpleActionClient('rubble_detect',\n RubbleDetectAction)\n\n rospy.loginfo(\"Waiting for rubble checker\")\n clients['rubble_check'] = actionlib.SimpleActionClient('rubble_check',\n RubbleCheckAction)\n clients['rubble_check'].wait_for_server()\n rospy.loginfo(\"rubble_check connected\")\n\n rospy.loginfo(\"Waiting for room searcher\")\n clients['search_room'] = actionlib.SimpleActionClient('search_room',\n SearchRoomAction)\n clients['search_room'].wait_for_server()\n rospy.loginfo(\"search_room connected\")\n\n rospy.loginfo(\"Waiting for navigator\")\n clients['navigation'] = actionlib.SimpleActionClient('navigation',\n NavigateAction)\n clients['navigation'].wait_for_server()\n rospy.loginfo(\"navigation connected\")\n\n rospy.loginfo(\"Waiting for door clearer\")\n clients['rubble_clear'] = actionlib.SimpleActionClient('rubble_clear',\n RubbleClearAction)\n clients['rubble_clear'].wait_for_server()\n rospy.loginfo(\"rubble clear connected\")\n\n rospy.loginfo(\"All clients for policy executor set up\")\n\n return clients", "def actions(self):\r\n return Actions(self)", "def runner(action):\n job = q.enqueue_call(func=run_runner, args=[action], kwargs=request.json, timeout=600)\n status_url = \"{}{}/runner/status/{}\".format(request.url_root, API_VERSION, job.get_id())\n return jsonify({\"task_id\": job.get_id(), \"status_url\": status_url})", "def _generate_actions(self) -> list:\n pass", "def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))", "def initializeActions_(self, opts):\n\n for opt in opts.keys():\n \n val = opts[opt]\n \n \n if ( opt == '-create' ):\n ncjobs = 0\n if val:\n if ( isInt(val) ):\n ncjobs = int(val)\n elif ( val == 'all'):\n ncjobs = val\n else:\n msg = 'Bad creation bunch size <'+str(val)+'>\\n'\n msg += ' Must be an integer or \"all\"'\n msg += ' Generic range is not allowed\"'\n raise SkimException(msg)\n pass\n else: ncjobs = 'all'\n\n if ncjobs != 0:\n # Instantiate Creator object\n self.creator = Creator(self.cfg_params,\n ncjobs)\n self.actions[opt] = self.creator\n pass\n pass\n\n elif ( opt == '-submit' ):\n\n self.actions[opt] = Submitter(self.cfg_params)\n\n return", "def runner_setup():\n runner = ClassicRunner()\n yield runner", "def create_test_action(context, **kw):\n action = get_test_action(context, **kw)\n action.create()\n return action", "def _formulate_action(Action, **kwargs):\n\n return Action(**kwargs)", "def _run_actions(self):\n\n if \"install-bento\" in self.actions:\n self._do_action_bento_setup()\n\n if \"create-tables\" in self.actions:\n self._do_action_tables_create()\n\n if \"import-ratings\" in self.actions:\n self._do_action_import_ratings()\n\n if \"import-user-info\" in self.actions:\n self._do_action_import_user_info()\n\n if \"import-movie-info\" in self.actions:\n self._do_action_import_movie_info()\n\n if \"train-item-item-cf\" in self.actions:\n self._do_action_train()\n\n if \"register-freshener\" in self.actions:\n self._do_action_register_freshener()", "def build_action_randomizers(cls, constants) -> List[ActionRandomizer]:\n return []", "def createActions (self):\n self.closeTabAction = QtHelper.createAction(self, self.tr(\"Close\"), self.closeCurrentTab,\n tip = 'Closes the current document')\n self.closeAllTabAction = QtHelper.createAction(self, self.tr(\"Close All\"), self.closeAllTab, \n tip = 'Closes all document')\n\n self.newTestUnitAction = QtHelper.createAction(self, \"Test Unit\", self.newTestUnit,\n icon = QIcon(\":/%s.png\" % TestUnit.TYPE), \n tip = 'Creates a new test unit')\n self.newTestConfigAction = QtHelper.createAction(self, \"Test Config\", self.newTestConfig,\n icon = QIcon(\":/%s.png\" % TestConfig.TYPE), \n tip = 'Creates a new test config')\n self.newTestSuiteAction = QtHelper.createAction(self, \"Test Suite\", self.newTestSuite,\n icon = QIcon(\":/%s.png\" % TestSuite.TYPE), \n shortcut = \"Ctrl+N\", tip = 'Creates a new test suite')\n self.newTestPlanAction = QtHelper.createAction(self, \"Test Plan\", self.newTestPlan,\n icon = QIcon(\":/%s.png\" % TestPlan.TYPE), \n tip = 'Creates a new test plan')\n self.newTestGlobalAction = QtHelper.createAction(self, \"Test Global\", self.newTestGlobal,\n icon = QIcon(\":/%s.png\" % TestPlan.TYPE_GLOBAL), \n tip = 'Creates a new test global')\n self.newTestDataAction = QtHelper.createAction(self, \"Test Data\", self.newTestData,\n icon = QIcon(\":/%s.png\" % TestData.TYPE), \n tip = 'Creates a new test data')\n self.newAdapterAction = QtHelper.createAction(self, \"Adapter\", self.newTestAdapter,\n icon = QIcon(\":/file-adp2.png\"), tip = 'Creates a new adapter')\n self.newLibraryAction = QtHelper.createAction(self, \"Library\", self.newTestLibrary,\n icon = QIcon(\":/file-lib-adp.png\"), tip = 'Creates a new library')\n self.newTxtAction = QtHelper.createAction(self, \"Txt\", self.newTestTxt,\n icon = QIcon(\":/file-txt.png\"), tip = 'Creates a new txt')\n\n\n self.openAction = QtHelper.createAction(self, self.tr(\"Open\"), self.openDoc,\n icon = QIcon(\":/open-test.png\"), shortcut = \"Ctrl+O\", tip = 'Open')\n self.saveAction = QtHelper.createAction(self, self.tr(\"Save\"), self.saveTab, \n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/save' ),\n icon = QIcon(\":/save-test.png\"), \n tip = 'Saves the active document')\n self.saveAsAction = QtHelper.createAction(self, self.tr(\"Save As\"), self.saveTabAs,\n icon = QIcon(\":/filesave.png\"), tip = 'Saves the active document as ...')\n self.exportAsAction = QtHelper.createAction(self, self.tr(\"Export\"), self.exportTabAs,\n icon = None, tip = 'Export the active document as ...')\n\n self.saveAllAction = QtHelper.createAction(self, self.tr(\"Save all\"), self.saveAllTabs,\n icon = QIcon(\":/save_all.png\"), tip = 'Saves all documents')\n\n self.printAction = QtHelper.createAction(self, self.tr(\"Print\"), self.printDoc,\n icon = QIcon(\":/printer.png\"), tip = 'Print the current document', \n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/print' ) )\n\n self.undoAction = QtHelper.createAction(self, self.tr(\"Undo\"), callback = self.globalCallback,\n icon = QIcon(\":/undo.png\"), data='undo', \n shortcut = \"Ctrl+Z\", tip = 'Undoes the last action' )\n self.redoAction = QtHelper.createAction(self, self.tr(\"Redo\"), callback = self.globalCallback, \n icon = QIcon(\":/redo.png\"), data='redo', \n shortcut = \"Ctrl+Y\", tip = 'Redoes the previously undone action' )\n self.cutAction = QtHelper.createAction(self, self.tr(\"Cut\"), callback = self.globalCallback,\n shortcut = QKeySequence.Cut, data='cut', \n tip = 'Cuts the selection and puts it on the clipboard' )\n self.copyAction = QtHelper.createAction(self, self.tr(\"Copy\"), callback = self.globalCallback,\n shortcut = QKeySequence.Copy, data='copy', \n tip = 'Copies the selection and puts it on the clipboard' )\n self.copyAction.setShortcutContext(Qt.WidgetWithChildrenShortcut)\n self.pasteAction = QtHelper.createAction(self, self.tr(\"Paste\"), callback = self.globalCallback,\n data='paste', shortcut = QKeySequence.Paste, \n tip = 'Inserts clipboard contents' )\n self.pasteAction.setShortcutContext(Qt.WidgetWithChildrenShortcut)\n self.deleteAction = QtHelper.createAction( self, \"Delete Selection\", callback = self.globalCallback,\n data='removeSelectedText', tip = 'Deletes the selection' )\n self.commentAction = QtHelper.createAction(self, \"Comment\", callback = self.globalCallback,\n icon = QIcon(\":/comment.png\"), data='comment', \n tip = 'Insert comment sign at the begining of line' )\n self.uncommentAction = QtHelper.createAction(self, \"Uncomment\", callback = self.globalCallback,\n icon = QIcon(\":/uncomment.png\"), data='uncomment', \n tip = 'Remove comment sign at the begining of line' )\n self.selectAllAction = QtHelper.createAction(self, \"Select All\", self.globalCallback, \n QIcon(\":/select_all.png\"), data='selectAll', \n tip = 'Selects the entire document' )\n self.indentAction = QtHelper.createAction(self, \"Indent\", self.globalCallback, data='indent', \n shortcut = \"Tab\", tip = 'Indent current line or selection' )\n self.unindentAction = QtHelper.createAction(self, \"Unindent\", self.globalCallback, data='unindent', \n shortcut = \"Shift+Tab\", tip = 'Unindent current line or selection' )\n \n self.foldAllAction = QtHelper.createAction(self, \"Fold/Unfold all\", callback = self.globalCallback,\n icon = QIcon(\":/toggle-expand.png\"), \n data='foldAllLines', tip = 'Fold all lines' )\n self.codefoldingAction = QtHelper.createAction(self, \"Code Folding\", self.toggleCodeFolding, \n icon = QIcon(\":/folding.png\"), toggled = True)\n self.codefoldingAction.setChecked( self.codeFolding )\n self.whitespaceVisibilityAction = QtHelper.createAction(self, \"Show whitespace and tabulation\", \n self.toggleWhitespaceVisibility, toggled = True)\n self.whitespaceVisibilityAction.setChecked( self.whitespaceVisible )\n self.indentGuidesVisibilityAction = QtHelper.createAction(self, \"Show indentation guides\", \n self.toggleIndentGuidesVisibility, toggled = True)\n self.indentGuidesVisibilityAction.setChecked( self.indentationGuidesVisible )\n self.linesNumberingAction = QtHelper.createAction(self, \"Line Numbering\", self.toggleLineNumbering, \n toggled = True)\n self.linesNumberingAction.setChecked( self.linesNumbering )\n self.codeWrappingAction = QtHelper.createAction(self, \"Code Wrapping\", self.toggleCodeWrapping, \n icon = None, toggled = True)\n self.codeWrappingAction.setChecked( self.codeWrapping )\n \n \n self.runAction = QtHelper.createAction(self, \"Execute\", self.runDocument,\n tip = 'Executes the current test', icon=QIcon(\":/test-play.png\") )\n \n self.runNowAction = QtHelper.createAction(self, \"Immediately\", self.runDocument,\n tip = 'Executes the current test',\n shortcut=Settings.instance().readValue( key = 'KeyboardShorcuts/run' ) )\n self.runMinimizeAction = QtHelper.createAction(self, \"Immediately + Minimize\", self.runDocumentMinimize,\n tip = 'Executes the current test and minimize the application' )\n self.runReduceAction = QtHelper.createAction(self, \"Immediately + Reduce\", self.runDocumentReduce,\n tip = 'Executes the current test and reduce the application' )\n self.runBackgroundAction = QtHelper.createAction(self, \"Background\", self.runDocumentInBackground,\n tip = 'Executes the current test in background')\n \n self.runDebugAction = QtHelper.createAction(self, \"&Debug\", self.runDocumentDebug,\n tip = 'Executes the current test with debug traces on server' )\n self.runWithoutNotifAction = QtHelper.createAction(self, \"&Without notifications\", self.runDocumentWithoutNotif,\n tip = 'Executes the current test without mail notifications' )\n self.runNoKeepTrAction = QtHelper.createAction(self, \"&Do not keep test result\", self.runDocumentNoKeepTr,\n tip = 'Do not keep test result on archive' )\n\n self.runSchedAction = QtHelper.createAction(self, self.tr(\"Schedule\"), self.schedRunDocument,\n icon = QIcon(\":/schedule.png\"), \n tip = self.tr('Scheduling a run of the current tab') )\n \n self.runSeveralAction = QtHelper.createAction(self, self.tr(\"Grouped\"), self.runSeveralTests,\n icon = QIcon(\":/test-play-several.png\"), tip = self.tr('Run several tests') )\n self.runSeveralAction.setEnabled(False)\n\n self.runStepByStepAction = QtHelper.createAction(self, \"Steps\", self.runDocumentStepByStep,\n tip = 'Execute the current test step by step', \n icon=QIcon(\":/run-state.png\"),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/steps' ) )\n self.runBreakpointAction = QtHelper.createAction(self, \"Break Point\", self.runDocumentBreakpoint,\n tip = 'Execute the current test with breakpoint', \n icon=QIcon(\":/breakpoint.png\"),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/breakpoint' ) )\n\n self.checkSyntaxAction = QtHelper.createAction(self, self.tr(\"&Syntax\"), self.checkSyntaxDocument,\n icon = QIcon(\":/check-syntax.png\"), \n tip = self.tr('Checking syntax of the current tab'),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/syntax' ) )\n self.checkDesignAction = QtHelper.createAction(self, self.tr(\"&Design\"), self.checkDesignDocument,\n icon = QIcon(\":/tds.png\"), \n tip = self.tr('Checking design of the current tab') )\n self.updateTestAction = QtHelper.createAction(self, self.tr(\"&Assistant\"), self.updateMacro,\n icon = QIcon(\":/recorder.png\") , \n tip = self.tr('Update the test with the automation assistant'),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/assistant' ) )\n \n menu1 = QMenu(self)\n menu1.addAction( self.checkSyntaxAction )\n menu1.addAction( self.checkDesignAction )\n self.checkAction = QtHelper.createAction(self, self.tr(\"Prepare\"), self.prepareDocument,\n tip = self.tr('Prepare the current test'), icon=QIcon(\":/check-syntax.png\") )\n self.checkAction.setMenu(menu1) \n\n menu3 = QMenu(self)\n menu3.addAction( self.runSchedAction )\n menu3.addAction( self.runSeveralAction )\n self.schedAction = QtHelper.createAction(self, self.tr(\"Schedule\"), self.schedRunDocument,\n tip = self.tr('Schedule a test'), icon=QIcon(\":/schedule.png\") )\n self.schedAction.setMenu(menu3) \n \n menu = QMenu(self)\n menu.addAction( self.runNowAction )\n menu.addAction( self.runBackgroundAction )\n menu.addSeparator()\n menu.addAction( self.runMinimizeAction )\n menu.addAction( self.runReduceAction )\n menu.addSeparator()\n menu.addAction( self.runWithoutNotifAction )\n menu.addAction( self.runNoKeepTrAction )\n menu.addSeparator()\n menu.addAction( self.runDebugAction )\n menu.addSeparator()\n menu.addAction( self.runStepByStepAction )\n menu.addAction( self.runBreakpointAction )\n\n self.runAction.setMenu(menu)\n\n self.findAction = QtHelper.createAction(self, self.tr(\"Search\"), self.searchText,\n icon = QIcon(\":/find.png\"), tip = self.tr('Search text'),\n shortcut = Settings.instance().readValue( key = 'KeyboardShorcuts/search' ) )\n self.findAction.setChecked(True)\n \n self.setDefaultActionsValues()", "def from_array(cls, json_array):\n\t\tinstances = []\n\t\tif (len(json_array) > 0):\n\t\t\tfor json in json_array:\n\t\t\t\tinstances.append(Action(json))\n\t\treturn instances", "def create_scenario(actions, logs=None):\n keys = [str(i) for i in range(len(actions))]\n key_provider = create_mock_key_provider(keys)\n digest_provider = MockDigestProvider(actions, logs)\n digest_validator = mock.Mock()\n\n def validate(bucket, key, public_key, digest_data, digest_str):\n if '_invalid' in digest_data:\n raise DigestError('invalid error')\n\n digest_validator.validate = validate\n return key_provider, digest_provider, digest_validator", "def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))", "def _RunActions(self, rule, client_id):\n actions_count = 0\n\n for action in rule.actions:\n try:\n # Say this flow came from the foreman.\n token = self.token.Copy()\n token.username = \"Foreman\"\n\n if action.HasField(\"hunt_id\"):\n if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):\n logging.info(\"Foreman: ignoring hunt %s on client %s: was started \"\n \"here before\", client_id, action.hunt_id)\n else:\n logging.info(\"Foreman: Starting hunt %s on client %s.\",\n action.hunt_id, client_id)\n\n flow_cls = flow.GRRFlow.classes[action.hunt_name]\n flow_cls.StartClients(action.hunt_id, [client_id])\n actions_count += 1\n else:\n flow.GRRFlow.StartFlow(\n client_id=client_id, flow_name=action.flow_name, token=token,\n **action.argv.ToDict())\n actions_count += 1\n # There could be all kinds of errors we don't know about when starting the\n # flow/hunt so we catch everything here.\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\"Failure running foreman action on client %s: %s\",\n action.hunt_id, e)\n\n return actions_count", "def buildRunner(self, reps, info):\n mem = info.get('mem', None)\n startDelay = info.get('startDelay', 0)\n sram = info.get('sram', None)\n runner = self.RUNNER_CLASS(self, reps, startDelay, mem, sram)\n return runner", "def createActions( self ):\n self.readInConfigAct = QtGui.QAction( \"&Read Input Config\",\n self, statusTip=\"Read config file for input\",\n triggered=self.readInConfigFileDlg )\n self.saveInConfigAct = QtGui.QAction( \"&Save Input Config\",\n self, statusTip=\"Save config file for input\",\n triggered=self.saveInConfigFileDlg )\n self.readConfigAct = QtGui.QAction( \"&Read Full Config\",\n self, statusTip=\"Read config file for full application\",\n triggered=self.readConfigFileDlg )\n self.saveConfigAct = QtGui.QAction( \"&Save Full Config\",\n self, statusTip=\"Save config file for full application\",\n triggered=self.saveConfigFileDlg )\n self.copyTaskAct = QtGui.QAction( \"&Copy Task Settings\",\n self, statusTip=\"Copy task parameters from another task into the current task\",\n triggered=self.copyTaskDlg )", "def __init__(self):\n self.actions = []", "def action(self, action_id):\r\n return Action(self, action_id)", "def action(self, action_id):\r\n return Action(self, action_id)", "async def run_action( # pylint: disable=too-many-branches,too-many-statements,too-many-locals\n self,\n kind: str,\n runner: Callable[[List[str]], Awaitable],\n *command: Strings,\n **resources: int,\n ) -> None:\n self._become_current()\n self.abort_due_to_other()\n\n await self.done(self.sync())\n\n run_parts = []\n persistent_parts = []\n log_parts = []\n is_silent = None\n for part in each_string(*command):\n if is_silent is None:\n if part.startswith(\"@\"):\n is_silent = True\n if part == \"@\":\n continue\n part = part[1:]\n else:\n is_silent = False\n\n run_parts.append(part)\n if not is_phony(part):\n persistent_parts.append(part)\n\n if kind != \"shell\":\n part = copy_annotations(part, shlex.quote(part))\n log_parts.append(part)\n\n log_command = \" \".join(log_parts)\n\n if self.exception is not None:\n Logger.debug(f\"Can't run: {log_command}\")\n no_additional_complaints()\n raise self.exception\n\n if self.new_persistent_actions:\n self.new_persistent_actions[-1].run_action(persistent_parts)\n\n if not self.should_run_action():\n global log_skipped_actions # pylint: disable=invalid-name\n if not log_skipped_actions.value:\n level = logging.DEBUG\n elif is_silent:\n level = Logger.FILE\n else:\n level = logging.INFO\n Logger.log(level, f\"Skip: {log_command}\")\n self.did_skip_actions = True\n if self.new_persistent_actions:\n self.new_persistent_actions.append(PersistentAction(self.new_persistent_actions[-1])) #\n Invocation.skipped_count += 1\n return\n\n if self.did_skip_actions:\n self.must_run_action = True\n Logger.debug(\"Must restart step to run skipped action(s)\")\n raise RestartException(\"To run skipped action(s)\")\n\n self.must_run_action = True\n self.did_run_actions = True\n\n Invocation.actions_count += 1\n\n resources = Resources.effective(resources)\n if resources:\n await self.done(self._use_resources(resources))\n\n try:\n self.remove_stale_outputs()\n\n self.oldest_output_path = None\n\n global no_actions # pylint: disable=invalid-name\n async with locks():\n if is_silent:\n Logger.file(f\"Run: {log_command}\")\n else:\n Logger.info(f\"Run: {log_command}\")\n if no_actions.value:\n raise DryRunException()\n\n if no_actions.value:\n exit_status = 0\n else:\n sub_process = await self.done(runner(run_parts))\n\n read_stdout = self._read_pipe(sub_process.stdout, Logger.STDOUT)\n read_stderr = self._read_pipe(sub_process.stderr, Logger.STDERR)\n await self.done(asyncio.gather(read_stdout, read_stderr))\n\n exit_status = await self.done(sub_process.wait())\n\n if self.new_persistent_actions:\n persistent_action = self.new_persistent_actions[-1]\n persistent_action.done_action()\n self.new_persistent_actions.append(PersistentAction(persistent_action))\n\n if exit_status != 0:\n self.log_and_abort(f\"Failure: {log_command}\")\n return\n\n if not no_actions.value:\n Logger.trace(f\"Success: {log_command}\")\n finally:\n self._become_current()\n if resources:\n if Logger.isEnabledFor(logging.DEBUG):\n Logger.debug(\"Free resources: \" + _dict_to_str(resources))\n Resources.free(resources)\n if Logger.isEnabledFor(logging.DEBUG):\n Logger.debug(\"Available resources: \" + _dict_to_str(Resources.available))\n await self.done(Resources.condition.acquire())\n Resources.condition.notify_all()\n Resources.condition.release()", "def runner_setup():\n concurrent_sessions = 5\n runner = VisualGridRunner(concurrent_sessions)\n yield runner", "async def run_actions(self, event: EventType, **kwargs):\n\n # extract options to be passed to context setting\n c, m, p, b, af = kwargs.pop('client'), kwargs.pop('message', None), \\\n kwargs.pop('payload', None), kwargs.pop('before', None), kwargs.pop('after', None)\n\n for a in self.actions:\n if a.event_type() != event:\n continue\n\n action = copy.deepcopy(a)\n\n action.set_context(client=c, message=m, payload=p, before=b, after=af)\n if not action.match():\n continue\n\n logger.info(f'matched the {action} action for event {event}')\n await action.execute()\n\n if action.should_stop():\n logger.debug(f'should stop triggered for action {action}')\n break", "def _create_controller(main_controller, action_controller_list):\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def execute_action_sequence(actions):\n for action in actions:\n action.execute()\n rospy.logdebug( \"Action sequence finished\")\n return", "def actions():\n pass", "def run(self, action_name=None, reuse=False, parallel=False):\n os.environ['WORKSPACE'] = self.workspace\n\n self.download_actions()\n self.instantiate_runners()\n\n if action_name:\n self.wf['action'][action_name]['runner'].run(reuse)\n else:\n for s in self.get_stages():\n self.run_stage(s, reuse, parallel)", "def start_runners(self, runner_names=None):\n if not runner_names:\n runner_names = []\n for runner_config in config.runner_configs:\n # Strip off the 'runner.' prefix.\n assert runner_config.name.startswith('runner.'), (\n 'Unexpected runner configuration section name: {}'.format(\n runner_config.name))\n runner_names.append(runner_config.name[7:])\n # For each runner we want to start, find their config section, which\n # will tell us the name of the class to instantiate, along with the\n # number of hash space slices to manage.\n for name in runner_names:\n section_name = 'runner.' + name\n # Let AttributeError propagate.\n runner_config = getattr(config, section_name)\n if not as_boolean(runner_config.start):\n continue\n # Find out how many runners to instantiate. This must be a power\n # of 2.\n count = int(runner_config.instances)\n assert (count & (count - 1)) == 0, (\n 'Runner \"{0}\", not a power of 2: {1}'.format(name, count))\n for slice_number in range(count):\n # runner name, slice #, # of slices, restart count\n info = (name, slice_number, count, 0)\n spec = '{0}:{1:d}:{2:d}'.format(name, slice_number, count)\n pid = self._start_runner(spec)\n log = logging.getLogger('mailman.runner')\n log.debug('[{0:d}] {1}'.format(pid, spec))\n self._kids.add(pid, info)", "def _make_tasks(self) -> Sequence[AbstractExecutorTask]:\n return [AEAInstanceTask(agent) for agent in self._agents]" ]
[ "0.56596607", "0.56475294", "0.5616994", "0.5510894", "0.54630816", "0.5457942", "0.54218817", "0.5406142", "0.53818655", "0.5351243", "0.53502804", "0.5345953", "0.532772", "0.5279873", "0.527048", "0.523934", "0.521685", "0.51686555", "0.515931", "0.5137021", "0.5137021", "0.51179916", "0.5083241", "0.5066786", "0.5057596", "0.5048156", "0.5045751", "0.5041852", "0.5029337", "0.50277656" ]
0.69310266
0
Generator of stages. A stages is a list of actions that can be executed in parallel.
def get_stages(self): current_stage = self.wf['root'] while current_stage: yield current_stage next_stage = set() for n in current_stage: next_stage.update(self.wf['action'][n].get('next', set())) current_stage = next_stage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stages(self):\n return StageManager(session=self._session)", "def stages(self):\r\n return pipelines.Stages(self)", "def run(stages, maxsize=0):\n\n if isinstance(stages, list) and len(stages) == 0:\n raise ValueError(\"Expected at least 1 stage to run\")\n\n elif isinstance(stages, list):\n stage = concat(stages, maxsize=maxsize)\n\n else:\n stage = stages\n\n stage = to_iterable(stage, maxsize=maxsize)\n\n for _ in stages:\n pass", "def gen_stage_loop(cls, _opts, tests, put_next_stage, _put_result_stage):\n for test in tests:\n put_next_stage(test)", "def list_stages():\n for name in Manager.STAGES:\n click.echo('{}'.format(name))", "def run(stages: typing.List[Stage], maxsize: int = 0) -> None:\n\n if isinstance(stages, list) and len(stages) == 0:\n raise ValueError(\"Expected at least 1 stage to run\")\n\n elif isinstance(stages, list):\n stage = concat(stages, maxsize=maxsize)\n\n else:\n stage = stages\n\n stage = to_iterable(stage, maxsize=maxsize)\n\n for _ in stages:\n pass", "def stages(self):\n return self._stages", "def stages(self):\n return self._stages", "def stage_list(args):\n\n for stage in args.stages:\n print stage", "def process_stages(self) -> List[str]:\n return self._process_stages", "def stages() -> List[Tuple[str, str]]:\n return [TestStatus.preparation, TestStatus.testing, TestStatus.completed]", "def expand_gbk(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n transform = only_transform(stage.transforms)\n if transform.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:\n for pcoll_id in transform.inputs.values():\n pipeline_context.length_prefix_pcoll_coders(pcoll_id)\n for pcoll_id in transform.outputs.values():\n if pipeline_context.use_state_iterables:\n pipeline_context.components.pcollections[\n pcoll_id].coder_id = pipeline_context.with_state_iterables(\n pipeline_context.components.pcollections[pcoll_id].coder_id)\n pipeline_context.length_prefix_pcoll_coders(pcoll_id)\n\n # This is used later to correlate the read and write.\n transform_id = stage.name\n if transform != pipeline_context.components.transforms.get(transform_id):\n transform_id = unique_name(\n pipeline_context.components.transforms, stage.name)\n pipeline_context.components.transforms[transform_id].CopyFrom(transform)\n grouping_buffer = create_buffer_id(transform_id, kind='group')\n gbk_write = Stage(\n transform.unique_name + '/Write',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name + '/Write',\n inputs=transform.inputs,\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_OUTPUT_URN,\n payload=grouping_buffer))\n ],\n downstream_side_inputs=frozenset(),\n must_follow=stage.must_follow)\n yield gbk_write\n\n yield Stage(\n transform.unique_name + '/Read',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name + '/Read',\n outputs=transform.outputs,\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_INPUT_URN,\n payload=grouping_buffer))\n ],\n downstream_side_inputs=stage.downstream_side_inputs,\n must_follow=union(frozenset([gbk_write]), stage.must_follow))\n else:\n yield stage", "def job_stages(self) -> Sequence['outputs.JobStagesResponse']:\n return pulumi.get(self, \"job_stages\")", "def job_stages(self) -> Sequence['outputs.JobStagesResponse']:\n return pulumi.get(self, \"job_stages\")", "def job_stages(self) -> Sequence['outputs.JobStagesResponse']:\n return pulumi.get(self, \"job_stages\")", "def stages(self, stages):\n if stages is None:\n self._stages = None\n else:\n self._stages = stages if isinstance(stages, list) else [stages] * len(self.pidevice.allaxes)\n debug('ControllerStartup.stages = %s', itemstostr(self._stages))", "def set_stages(self, stages):\n\n if not isinstance(stages, list) or not all([issubclass(type(x), Stage) for x in stages]):\n raise ValueError(\"stages must be a list of Stages's only!\")\n\n self.stages = stages\n\n return self", "def extract_impulse_stages(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n for transform in list(stage.transforms):\n if transform.spec.urn == common_urns.primitives.IMPULSE.urn:\n stage.transforms.remove(transform)\n yield Stage(\n transform.unique_name,\n transforms=[transform],\n downstream_side_inputs=stage.downstream_side_inputs,\n must_follow=stage.must_follow,\n parent=stage.parent)\n\n if stage.transforms:\n yield stage", "def sort(self):\n for _ in self.stage1():\n yield\n for _ in self.stage2():\n yield", "def sort_stages(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> List[Stage]\n all_stages = set(stages)\n seen = set() # type: Set[Stage]\n ordered = []\n\n producers = {\n pcoll: stage\n for stage in all_stages for t in stage.transforms\n for pcoll in t.outputs.values()\n }\n\n def process(stage):\n if stage not in seen:\n seen.add(stage)\n if stage not in all_stages:\n return\n for prev in stage.must_follow:\n process(prev)\n stage_outputs = set(\n pcoll for transform in stage.transforms\n for pcoll in transform.outputs.values())\n for transform in stage.transforms:\n for pcoll in transform.inputs.values():\n if pcoll not in stage_outputs:\n process(producers[pcoll])\n ordered.append(stage)\n\n for stage in stages:\n process(stage)\n return ordered", "def expand_sdf(stages, context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n transform = only_transform(stage.transforms)\n if transform.spec.urn == common_urns.primitives.PAR_DO.urn:\n\n pardo_payload = proto_utils.parse_Bytes(\n transform.spec.payload, beam_runner_api_pb2.ParDoPayload)\n\n if pardo_payload.restriction_coder_id:\n\n def copy_like(protos, original, suffix='_copy', **kwargs):\n if isinstance(original, str):\n key = original\n original = protos[original]\n else:\n key = 'component'\n new_id = unique_name(protos, key + suffix)\n protos[new_id].CopyFrom(original)\n proto = protos[new_id]\n for name, value in kwargs.items():\n if isinstance(value, dict):\n getattr(proto, name).clear()\n getattr(proto, name).update(value)\n elif isinstance(value, list):\n del getattr(proto, name)[:]\n getattr(proto, name).extend(value)\n elif name == 'urn':\n proto.spec.urn = value\n elif name == 'payload':\n proto.spec.payload = value\n else:\n setattr(proto, name, value)\n if 'unique_name' not in kwargs and hasattr(proto, 'unique_name'):\n proto.unique_name = unique_name(\n {p.unique_name\n for p in protos.values()},\n original.unique_name + suffix)\n return new_id\n\n def make_stage(base_stage, transform_id, extra_must_follow=()):\n # type: (Stage, str, Iterable[Stage]) -> Stage\n transform = context.components.transforms[transform_id]\n return Stage(\n transform.unique_name, [transform],\n base_stage.downstream_side_inputs,\n union(base_stage.must_follow, frozenset(extra_must_follow)),\n parent=base_stage.name,\n environment=base_stage.environment)\n\n main_input_tag = only_element(\n tag for tag in transform.inputs.keys()\n if tag not in pardo_payload.side_inputs)\n main_input_id = transform.inputs[main_input_tag]\n element_coder_id = context.components.pcollections[\n main_input_id].coder_id\n # Tuple[element, restriction]\n paired_coder_id = context.add_or_get_coder_id(\n beam_runner_api_pb2.Coder(\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=common_urns.coders.KV.urn),\n component_coder_ids=[\n element_coder_id, pardo_payload.restriction_coder_id\n ]))\n # Tuple[Tuple[element, restriction], double]\n sized_coder_id = context.add_or_get_coder_id(\n beam_runner_api_pb2.Coder(\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=common_urns.coders.KV.urn),\n component_coder_ids=[\n paired_coder_id,\n context.add_or_get_coder_id(\n # context can be None here only because FloatCoder does\n # not have components\n coders.FloatCoder().to_runner_api(None), # type: ignore\n 'doubles_coder')\n ]))\n\n paired_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_paired',\n coder_id=paired_coder_id)\n pair_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/PairWithRestriction',\n urn=common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,\n outputs={'out': paired_pcoll_id})\n\n split_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_split',\n coder_id=sized_coder_id)\n split_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/SplitAndSizeRestriction',\n urn=common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,\n inputs=dict(transform.inputs, **{main_input_tag: paired_pcoll_id}),\n outputs={'out': split_pcoll_id})\n\n reshuffle_stage = None\n if common_urns.composites.RESHUFFLE.urn in context.known_runner_urns:\n reshuffle_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_reshuffle',\n coder_id=sized_coder_id)\n reshuffle_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Reshuffle',\n urn=common_urns.composites.RESHUFFLE.urn,\n payload=b'',\n inputs=dict(transform.inputs, **{main_input_tag: split_pcoll_id}),\n outputs={'out': reshuffle_pcoll_id})\n reshuffle_stage = make_stage(stage, reshuffle_transform_id)\n else:\n reshuffle_pcoll_id = split_pcoll_id\n reshuffle_transform_id = None\n\n if context.is_drain:\n truncate_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_truncate_restriction',\n coder_id=sized_coder_id)\n # Lengthprefix the truncate output.\n context.length_prefix_pcoll_coders(truncate_pcoll_id)\n truncate_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/TruncateAndSizeRestriction',\n urn=common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: reshuffle_pcoll_id}),\n outputs={'out': truncate_pcoll_id})\n process_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Process',\n urn=common_urns.sdf_components.\n PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: truncate_pcoll_id}))\n else:\n process_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Process',\n urn=common_urns.sdf_components.\n PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: reshuffle_pcoll_id}))\n\n yield make_stage(stage, pair_transform_id)\n split_stage = make_stage(stage, split_transform_id)\n yield split_stage\n if reshuffle_stage:\n yield reshuffle_stage\n if context.is_drain:\n yield make_stage(\n stage, truncate_transform_id, extra_must_follow=[split_stage])\n yield make_stage(stage, process_transform_id)\n else:\n yield make_stage(\n stage, process_transform_id, extra_must_follow=[split_stage])\n\n else:\n yield stage\n\n else:\n yield stage", "def create_and_optimize_stages(\n pipeline_proto, # type: beam_runner_api_pb2.Pipeline\n phases,\n known_runner_urns, # type: FrozenSet[str]\n use_state_iterables=False,\n is_drain=False):\n # type: (...) -> Tuple[TransformContext, List[Stage]]\n pipeline_context = TransformContext(\n pipeline_proto.components,\n known_runner_urns,\n use_state_iterables=use_state_iterables,\n is_drain=is_drain)\n\n # Initial set of stages are singleton leaf transforms.\n stages = list(\n leaf_transform_stages(\n pipeline_proto.root_transform_ids,\n pipeline_proto.components,\n known_composites=union(known_runner_urns, KNOWN_COMPOSITES)))\n\n # Apply each phase in order.\n for phase in phases:\n _LOGGER.info('%s %s %s', '=' * 20, phase, '=' * 20)\n stages = list(phase(stages, pipeline_context))\n _LOGGER.debug('%s %s' % (len(stages), [len(s.transforms) for s in stages]))\n _LOGGER.debug('Stages: %s', [str(s) for s in stages])\n\n # Return the (possibly mutated) context and ordered set of stages.\n return pipeline_context, stages", "def make_pipeline(steps):\n def compose2(f, g):\n return lambda x: g(f(x))\n return functools.reduce(compose2, steps)", "def steps(self):\n for step in self._steps:\n yield step", "def stage_states(self) -> pulumi.Output[Sequence['outputs.ExecutionStageStateResponse']]:\n return pulumi.get(self, \"stage_states\")", "def _add_stage(self, name):\n def stage_func(self, *args, **kwargs):\n \"\"\" Stage function.\n\n :param args: Positional arguments.\n :param kwargs: Keyword arguments.\n :return: Pipeline (for method chaining).\n \"\"\"\n self._pipe.append(Stage(name, args, kwargs))\n return self\n\n setattr(Pipeline, name, stage_func)", "def sweep_stages(self) -> List[str]:\n return self._sweep_stages", "def load_stages():\n\n def load_stages_from_dir(mro_dir):\n \"\"\"Iterate over MRO file in a directory, parse them, and accumulate\n their stages.\n \"\"\"\n stages = {}\n for file_name in os.listdir(mro_dir):\n if file_name.endswith(\".mro\"):\n stages.update(mro_parser.get_stages(\n os.path.join(mro_dir, file_name)))\n return stages\n\n stages = {}\n for mro_dir in os.environ[\"MROPATH\"].split(':'):\n stages.update(load_stages_from_dir(mro_dir))\n return stages", "def sink_flattens(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n # TODO(robertwb): Actually attempt to sink rather than always materialize.\n # TODO(robertwb): Possibly fuse multi-input flattens into one of the stages.\n for stage in fix_flatten_coders(stages,\n pipeline_context,\n common_urns.primitives.FLATTEN.urn):\n transform = only_element(stage.transforms)\n if (transform.spec.urn == common_urns.primitives.FLATTEN.urn and\n len(transform.inputs) > 1):\n # This is used later to correlate the read and writes.\n buffer_id = create_buffer_id(transform.unique_name)\n flatten_writes = [] # type: List[Stage]\n for local_in, pcoll_in in transform.inputs.items():\n flatten_write = Stage(\n transform.unique_name + '/Write/' + local_in,\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name + '/Write/' + local_in,\n inputs={local_in: pcoll_in},\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_OUTPUT_URN,\n payload=buffer_id),\n environment_id=transform.environment_id)\n ],\n downstream_side_inputs=frozenset(),\n must_follow=stage.must_follow)\n flatten_writes.append(flatten_write)\n yield flatten_write\n\n yield Stage(\n transform.unique_name + '/Read',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name + '/Read',\n outputs=transform.outputs,\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_INPUT_URN, payload=buffer_id),\n environment_id=transform.environment_id)\n ],\n downstream_side_inputs=stage.downstream_side_inputs,\n must_follow=union(frozenset(flatten_writes), stage.must_follow))\n\n else:\n yield stage", "def concat(stages, maxsize=0):\n\n stages = [_to_stage(s) for s in stages]\n\n return _Stage(\n worker_constructor=WORKER,\n workers=1,\n maxsize=maxsize,\n on_start=None,\n on_done=None,\n target=_concat,\n args=tuple(),\n dependencies=stages,\n )" ]
[ "0.6442683", "0.63786435", "0.6202054", "0.6113744", "0.6091911", "0.60464627", "0.60097075", "0.60097075", "0.5929696", "0.5866913", "0.5734935", "0.56417054", "0.56399095", "0.56399095", "0.56399095", "0.56128675", "0.55290216", "0.5513827", "0.54971427", "0.5468624", "0.5463165", "0.5462", "0.543548", "0.5422332", "0.5402847", "0.5354267", "0.5299588", "0.5281375", "0.5265026", "0.52311677" ]
0.71270746
0
Runs the singularity action
def run(self, reuse=False): build = True if 'shub://' in self.action['uses']: image = self.action['uses'] build = False elif './' in self.action['uses']: image = 'action/' + os.path.basename(self.action['uses']) singularityfile_path = os.path.join( os.getcwd(), self.action['uses']) else: image = '/'.join(self.action['uses'].split('/')[:2]) singularityfile_path = os.path.join(self.action['repo_dir'], self.action['action_dir']) if not reuse: if self.singularity_exists(): self.singularity_rm() if build: self.singularity_build(singularityfile_path, image) else: self.singularity_pull(image) else: if not self.singularity_exists(): if build: self.singularity_build(singularityfile_path, image) else: self.singularity_pull(image) e = self.singularity_start(image) if e != 0: pu.fail('Action {} failed!\n'.format(self.action['name']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_smoke(self):\n\t\tinit_state = torch.tensor(0.0)\n\t\ttotal_time = torch.tensor(4.0)\n\t\tprint('Agent state trajectory and actions:')\n\t\tAgent().play(init_state, total_time)\n\t\tpyro.clear_param_store()", "def singularity_start(self, image):\n env_vars = self.action.get('env', {})\n\n for s in self.action.get('secrets', []):\n env_vars.update({s: os.environ[s]})\n\n for e, v in self.env.items():\n env_vars.update({e: v})\n\n env_vars.update({'HOME': os.environ['HOME']})\n\n # sets the env variables\n for k, v in env_vars.items():\n Client.setenv(k, v)\n\n e = Client.run(image=self.generate_image_name(image),\n args=' '.join(self.action.get('args', '')),\n return_result=True)\n return e['return_code']", "def handleSingularity(singularity_list, singularity_type, trajectory, robot):\n \n multiStepInter = False\n\n target_trajectory = np.copy(trajectory)\n \n if multiStepInter is False:\n for i in range(0,np.size(singularity_list)):\n \n index = singularity_list[i]\n \n if(singularity_type[i] == 0):\n \n target_trajectory[index][0] = (trajectory[index-1][0] + trajectory[index+1][0]) / 2\n target_trajectory[index][1] = (trajectory[index-1][1] + trajectory[index+1][1]) / 2\n target_trajectory[index][2] = (trajectory[index-1][2] + trajectory[index+1][2]) / 2\n target_trajectory[index][3] = (trajectory[index-1][3] + trajectory[index+1][3]) / 2\n target_trajectory[index][4] = (trajectory[index-1][4] + trajectory[index+1][4]) / 2\n target_trajectory[index][5] = (trajectory[index-1][5] + trajectory[index+1][5]) / 2\n \n for j in range(0,6):\n for k in range(0,2): \n \n delta_angle_joint = np.abs(target_trajectory[index-1+k][j] - trajectory[index+k][j])\n \n if delta_angle_joint > max_velocity[j]*time_step: \n print \"No possible path found because of overhead singularity.\"\n return np.array([robot.GetDOFValues()])\n \n if(singularity_type[i] == 1):\n \n target_trajectory[index][0] = (trajectory[index-1][0] + trajectory[index+1][0]) / 2\n target_trajectory[index][1] = (trajectory[index-1][1] + trajectory[index+1][1]) / 2\n target_trajectory[index][2] = (trajectory[index-1][2] + trajectory[index+1][2]) / 2\n target_trajectory[index][3] = (trajectory[index-1][3] + trajectory[index+1][3]) / 2\n target_trajectory[index][4] = (trajectory[index-1][4] + trajectory[index+1][4]) / 2\n target_trajectory[index][5] = (trajectory[index-1][5] + trajectory[index+1][5]) / 2\n \n for j in range(0,6):\n for k in range(0,2): \n \n delta_angle_joint = np.abs(target_trajectory[index-1+k][j] - trajectory[index+k][j])\n \n if delta_angle_joint > max_velocity[j]*time_step: \n print \"No possible path found because of wrist singularity.\"\n print \n return np.array([robot.GetDOFValues()]) \n else:\n # Multi step interpolation dont function yet\n interStep = 2\n InterSuccess = False\n \n while InterSuccess is False:\n \n for i in range(0,np.size(singularity_list)):\n \n index = singularity_list[i]\n \n #Todo: check if interpolation width hurts trajectory range or other singularity interpolations \n \n target_trajectory[index][0] = (target_trajectory[index-(interStep/2)][0] + target_trajectory[index+(interStep/2)][0]) / 2\n target_trajectory[index][1] = (target_trajectory[index-(interStep/2)][1] + target_trajectory[index+(interStep/2)][1]) / 2\n target_trajectory[index][2] = (target_trajectory[index-(interStep/2)][2] + target_trajectory[index+(interStep/2)][2]) / 2\n target_trajectory[index][3] = (target_trajectory[index-(interStep/2)][3] + target_trajectory[index+(interStep/2)][3]) / 2\n target_trajectory[index][4] = (target_trajectory[index-(interStep/2)][4] + target_trajectory[index+(interStep/2)][4]) / 2\n target_trajectory[index][5] = (target_trajectory[index-(interStep/2)][5] + target_trajectory[index+(interStep/2)][5]) / 2\n \n delta_traj = target_trajectory[index]\n delta_traj[0] = (target_trajectory[index+(interStep/2)][0] - target_trajectory[index-(interStep/2)][0]) / interStep\n delta_traj[1] = (target_trajectory[index+(interStep/2)][1] - target_trajectory[index-(interStep/2)][1]) / interStep\n delta_traj[2] = (target_trajectory[index+(interStep/2)][2] - target_trajectory[index-(interStep/2)][2]) / interStep\n delta_traj[3] = (target_trajectory[index+(interStep/2)][3] - target_trajectory[index-(interStep/2)][3]) / interStep\n delta_traj[4] = (target_trajectory[index+(interStep/2)][4] - target_trajectory[index-(interStep/2)][4]) / interStep\n delta_traj[5] = (target_trajectory[index+(interStep/2)][5] - target_trajectory[index-(interStep/2)][5]) / interStep\n \n for s in range(1,interStep/2): \n target_trajectory[index-s][0] = target_trajectory[index][0] - (s * delta_traj[0]) \n target_trajectory[index-s][1] = target_trajectory[index][1] - (s * delta_traj[1]) \n target_trajectory[index-s][2] = target_trajectory[index][2] - (s * delta_traj[2])\n target_trajectory[index-s][3] = target_trajectory[index][3] - (s * delta_traj[3])\n target_trajectory[index-s][4] = target_trajectory[index][4] - (s * delta_traj[4])\n target_trajectory[index-s][5] = target_trajectory[index][5] - (s * delta_traj[5])\n \n target_trajectory[index+s][0] = target_trajectory[index][0] + (s * delta_traj[0]) \n target_trajectory[index+s][1] = target_trajectory[index][1] + (s * delta_traj[1]) \n target_trajectory[index+s][2] = target_trajectory[index][2] + (s * delta_traj[2])\n target_trajectory[index+s][3] = target_trajectory[index][3] + (s * delta_traj[3])\n target_trajectory[index+s][4] = target_trajectory[index][4] + (s * delta_traj[4])\n target_trajectory[index+s][5] = target_trajectory[index][5] + (s * delta_traj[5])\n \n maxVelHurt = False\n \n for j in range(0,6):\n for k in range(0,interStep): \n \n delta_angle_joint = np.abs(target_trajectory[index-(interStep/2)+k][j] - target_trajectory[index-((interStep/2)-1)+k][j])\n \n if delta_angle_joint > max_velocity[j]*time_step and maxVelHurt is False:\n \n maxVelHurt = True \n print \"Interpolation step = \" \n print interStep \n print \"No possible path found because of singularity.\"\n print \"joint:\"\n print j\n print \"step:\"\n print k\n \n print delta_angle_joint\n print \">\"\n print (max_velocity[j]*time_step)\n print \"______________________________________________\"\n #return np.array([robot.GetDOFValues()])\n interStep = interStep + 2\n if interStep == 30:\n return np.array([robot.GetDOFValues()])\n \n \n \n if maxVelHurt is False:\n InterSuccess = True \n \n \n \n return target_trajectory", "def act(self, env: FakeEnv, s: ActorStrategy):\n action = env.action_space.sample()\n print(f\"Sampled action shape : {action.shape}\")\n env.step(action)", "def run_single(self):\n self.run_sim_time(1)", "def act(self, state, epsilon, env):\n if random.random() > epsilon:\n state = Variable(torch.FloatTensor(state)).unsqueeze(0) # adds extra dim when single input\n state = self.vari_gpu(state)\n _, u_opt = self.forward(state)\n action = (u_opt.cpu().detach().numpy()) # compute the u*[0] \n #print('act:q_value ',q_value)\n #print('act:model action ',action)\n else:\n rand = np.random.rand(int(np.array(env.action_space.shape)))\n high = env.action_space.high\n low = env.action_space.low\n action = low + rand*(high-low)\n #print('act: ',action)\n return action", "def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation", "def execute(self, agent: Agent, state: SimState) -> None:\n if agent.state() == AgentState.SUSCEPTIBLE and self.days == state.vaccine_time() \\\n and np.random.random() < state.vaccine_share():\n agent.set_state(AgentState.IMMUNE)", "async def handle(self):\n local_controller = self.controller\n local_controller.add_action(local_controller.larvae.random.train(HYDRALISK))\n return True", "def _run(self):\n logging.warning('-> perform EMPTY experiment...')", "def test_sparsity(config):\n total_zeros = 0\n total_nonzeros = 0\n\n print ('<===sparsity type is {}'.format(config.sparsity_type))\n print ('<===layers to be pruned are {}'.format(config._prune_ratios))\n if config.masked_progressive and (config.sparsity_type == 'filter' or config.sparsity_type =='column'or config.sparsity_type == \"bn_filter\" ):\n ### test both column and row sparsity\n print (\"***********checking column sparsity*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print (\"***********checking filter sparsity*************\") \n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print (\"************checking overall sparsity in conv layers*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy() \n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n return\n \n if config.sparsity_type == \"irregular\":\n for name,W in config.model.named_parameters():\n if 'bias' in name:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n total_zeros+=zeros\n nonzeros = np.sum(W!=0)\n total_nonzeros+=nonzeros\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))\n total_weight_number = total_zeros+total_nonzeros\n print ('overal compression rate is {}'.format(total_weight_number/total_nonzeros))\n elif config.sparsity_type == \"column\":\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros)) \n elif config.sparsity_type == \"filter\":\n print ('inside if')\n print (config.prune_ratios)\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n elif config.sparsity_type == \"bn_filter\":\n print ('inside bn_filter')\n print (config.prune_ratios)\n for i,(name,W) in enumerate(config.model.named_parameters()):\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n nonzeros = np.sum(W!=0)\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))", "def sing(self):\n if self._energy < self._sing_cost:\n return\n\n self._energy = self._energy - self._sing_cost\n self._env.simulate()", "def execute(self, agent: Agent, state: SimState) -> None:\n if agent.state() is not AgentState.INFECTIVE:\n return\n\n if np.random.random() < state.remove_prob():\n if np.random.random() < state.lethality():\n agent.set_state(AgentState.DEAD)\n else:\n agent.set_state(AgentState.IMMUNE)\n else:\n agent.update_sick_days()", "async def handle(self):\n local_controller = self.controller\n local_controller.add_action(local_controller.larvae.random.train(DRONE))\n return True", "def solveOneStep(self):\n ### Student code goes here\n return True", "def RUN(self):", "def start(self, world):\n self.sense(world)", "def run(self):\n sites = [x for x in self.cell.all_sections['axonnode']]\n ptype = 'pulses'\n self.default_durs=[10., 100., 25.]\n stimdict = { # set up the stimulus parameters\n 'NP': 20,\n 'Sfreq': 500.0,\n 'delay': self.default_durs[0],\n 'dur': 0.5,\n 'amp': 2.0,\n 'PT': 0.0,\n 'dt': self.dt,\n }\n istim = h.iStim(0.5, sec=self.cell.soma)\n istim.delay = 0.\n istim.dur = 1e9 # these actually do not matter...\n istim.iMax = 0.0\n self.run_one(istim, stimdict, sites=sites) #do one simulation", "def act(self, s, exploration, game):\n agent_p = self.compute_marginal_pi(s, one_hot=False)\n if self.exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = np.argmax(agent_p)\n return agent_action", "def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())", "def step(self, action):\n\n # handling error\n \n assert (action < self.NumofBandits)#, \"[ERROR] un-identified arm\"\n\n # return reward via sampling from normal distribution\n return np.random.normal(self.MeanList[action], self.sigma, 1)[0]", "def test_simulation(self):\n\t\tprint \"Simulation is being tested\"\n\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Debug Flag Set!\"\n\t\t\tprint self.getConfig()\n\n\t\tif toggles.PACKING:\n\t\t\ttoggles.OUTPUT_PATH = toggles.OUTPUT_PATH+toggles.RUN_NAME+'/'\n\t\t\tpackageMaker(toggles.OUTPUT_PATH,self.getConfig())\n\t\tif toggles.IDEAL_GRID:\n\t\t\tself.consensusGrid()\n\n\t\tif toggles.REAL_DATA:\n\t\t\tsampleData = self.load_data()\n\t\t\tif toggles.RUN_DATA_STATS:\n\t\t\t\tself.output_data_stats(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_AVERAGE_COST:\n\t\t\t\tself.sim_average_cost(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_SINGLE_PAIR:\n\t\t\t\tself.sim_single_pair_cost(sampleData, pending_eddy(self.pick_worker([0], [0])))\n\t\t\t\tself.reset_database()\n\t\telse:\n\t\t\tsampleData = {}\n\t\t\tsyn_load_data()\n\n\t\tif toggles.RUN_ITEM_ROUTING and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: item Routing\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif PRED_SCORE_COUNT and not (RUN_TASKS_COUNT or RUN_MULTI_ROUTING):\n\t\t\tif DEBUG_FLAG:\n\t\t\t\tprint \"Running: Pred Score count\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\n\n\t\tif toggles.COUNT_TICKETS and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: ticket counting\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif toggles.SELECTIVITY_GRAPH and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: selectivity amounts over time\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\tcorrectAnswers = self.get_correct_answers(toggles.INPUT_PATH + toggles.ITEM_TYPE + '_correct_answers.csv')\n\t\t\tpassedItems = self.get_passed_items(correctAnswers)\n\n\n\t\tif toggles.RUN_OPTIMAL_SIM:\n\t\t\tcountingArr=[]\n\t\t\tself.reset_database()\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running optimal_sim \" +str(i)\n\t\t\t\tself.num_tasks = self.optimal_sim(sampleData)\n\t\t\t\tcountingArr.append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_optimal_tasks'\n\t\t\tgeneric_csv_write(dest+'.csv',[countingArr])\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \" + dest+'.csv'\n\n\n\n\t\tif toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING or toggles.RUN_CONSENSUS_COUNT:\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\t#print \"Running: task_count\"\n\t\t\t\t#f = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv', 'a')\n\t\t\t\t#f1 = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_incorrect_count.csv', 'a')\n\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\toutputArray = []\n\n\t\t\trunTasksArray = []\n\t\t\tgoodArray, badArray = [], []\n\t\t\tgoodPoints, badPoints = [], []\n\t\t\taccCount = []\n\t\t\tlocArray = [[],[],[],[]]\n\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running simulation \" + str(i+1)\n\t\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\t\trunTasksArray.append(self.num_tasks)\n\n\t\t\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\t\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\t\t\tnum_incorrect = self.final_item_mismatch(passedItems)\n\t\t\t\t\taccCount.append(num_incorrect)\n\t\t\t\tif toggles.RUN_CONSENSUS_COUNT or toggles.VOTE_GRID:\n\t\t\t\t\tdonePairs = IP_Pair.objects.filter(Q(num_no__gt=0)|Q(num_yes__gt=0))\n\t\t\t\t\tif toggles.TEST_ACCURACY:\n\t\t\t\t\t\tgoodPairs, badPairs = [], []\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tval = bool((pair.num_yes-pair.num_no)>0)\n\t\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\t\tcorrect = ((correctAnswers[(pair.item,pair.predicate)]) == val)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcorrect = (pair.true_answer == val)\n\t\t\t\t\t\t\tif correct:\n\t\t\t\t\t\t\t\tgoodArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbadArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tbadPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tgoodArray.append(pair.num_no + pair.num_yes)\n\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\n\t\t\t\t\t#print \"This is number of incorrect items: \", num_incorrect\n\n\t\t\t\tself.reset_database()\n\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\tgeneric_csv_write(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_tasks_count.csv',[runTasksArray])\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv'\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tif len(runTasksArray)>1:\n\t\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.png'\n\t\t\t\t\t\ttitle = toggles.RUN_NAME + ' Cost distribution'\n\t\t\t\t\t\thist_gen(runTasksArray, dest, labels = ('Cost','Frequency'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + dest\n\t\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"only ran one sim, not running hist_gen\"\n\n\t\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_Eddy_sys_' + str(toggles.EDDY_SYS) + '_multi_routing.png'\n\t\t\t\t\ttitle = toggles.RUN_NAME + ' Average Predicate Routing'\n\t\t\t\t\tquestions = toggles.CHOSEN_PREDS\n\t\t\t\t\tarrayData = []\n\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\tarrayData.append([])\n\t\t\t\t\tfor routingL in ROUTING_ARRAY:\n\t\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\t\tarrayData[i].append(routingL[i])\n\t\t\t\t\tmrsavefile = open(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv','w')\n\t\t\t\t\tmrwriter = csv.writer(mrsavefile)\n\t\t\t\t\tmrwriter.writerow(questions)\n\t\t\t\t\tfor row in arrayData:\n\t\t\t\t\t\tmrwriter.writerow(row)\n\t\t\t\t\tmrsavefile.close()\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \"+toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\tstats_bar_graph_gen(arrayData, questions, dest, labels = ('Predicate','# of Items Routed'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.png'\n\t\t\tif toggles.ACCURACY_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_acc_count'\n\t\t\t\tgeneric_csv_write(dest+'.csv',[accCount])\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\thist_gen(accCount, dest+'.png')\n\n\t\t\tif toggles.RUN_CONSENSUS_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_consensus_count'\n\t\t\t\tif len(goodArray)>1:\n\t\t\t\t\tif len(badArray) == 0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray])\n\t\t\t\t\t\t#print goodArray\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray,badArray])\n\t\t\t\t\t\t#print goodArray,badArray\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \" + dest + '.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = 'Normalized Distribution of Tasks before Consensus'\n\t\t\t\t\t\tlabels = ('Number of Tasks', 'Frequency')\n\t\t\t\t\t\tif len(badArray) < 2:\n\t\t\t\t\t\t\thist_gen(goodArray, dest+'.png',labels=labels,title=title)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleg = ('Correctly Evaluated IP pairs','Incorrectly Evaluated IP pairs')\n\t\t\t\t\t\t\tmulti_hist_gen([goodArray,badArray],leg,dest+'.png',labels=labels,title=title)\n\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"only ran one sim, ignoring results\"\n\t\t\tif toggles.VOTE_GRID:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_vote_grid'\n\t\t\t\tif len(goodPoints)>1:\n\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',goodPoints)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'_good.csv',goodPoints)\n\t\t\t\t\t\tgeneric_csv_write(dest+'_bad.csv',badPoints)\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = \"Vote Grid Graph\"\n\t\t\t\t\t\tlabels = (\"Number of No Votes\",\"Number of Yes Votes\")\n\t\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\t\txL,yL=zip(*goodPoints)\n\t\t\t\t\t\t\tline_graph_gen(xL,yL,dest+'.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgX,gY = zip(*goodPoints)\n\t\t\t\t\t\t\tbX,bY = zip(*badPoints)\n\t\t\t\t\t\t\tmulti_line_graph_gen((gX,bX),(gY,bY),('Correct','Incorrect'),dest+'_both.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(gX,gY,dest+'_good.png',title=title+\" goodPoints\",labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(bX,bY,dest+'_bad.png',title=title+\" badPoints\",labels=labels,scatter=True,square=True)\n\t\tif toggles.TIME_SIMS:\n\t\t\tself.timeRun(sampleData)\n\n\t\tif toggles.RUN_ABSTRACT_SIM:\n\t\t\tself.abstract_sim(sampleData, toggles.ABSTRACT_VARIABLE, toggles.ABSTRACT_VALUES)", "def _simulate(self, action=None):\n for k in range(int(self.SIMULATION_FREQUENCY // self.config[\"policy_frequency\"])):\n if action is not None and \\\n self.time % int(self.SIMULATION_FREQUENCY // self.config[\"policy_frequency\"]) == 0:\n # Forward action to the spacecraft\n self.spacecraft.act(self.ACTIONS[action])\n\n self.space.act()\n self.space.step(1 / self.SIMULATION_FREQUENCY)\n self.time += 1\n\n # Automatically render intermediate simulation steps if a viewer has been launched\n # Ignored if the rendering is done offscreen\n self._automatic_rendering()\n\n # Stop at terminal states\n if self.done or self._is_terminal():\n break\n self.enable_auto_render = False", "def run(self):\n self._display_sims(self._compute_sims())", "def run_game_logic(self):\n pass", "def test_singular_func(self):\n\n m = Mothur(**self.init_vars)\n m.help()\n\n return", "def run_sim(self):\n self.operator, var_form, opt = self.generate_VQE_args()\n\n backend = Aer.get_backend('statevector_simulator')\n quantum_instance = QuantumInstance(backend=backend)\n vqe = VQE(self.operator, var_form, opt) \n\n self.result = vqe.run(quantum_instance)\n solution = self.extract_solution(self.result, False)\n return solution", "def testSimCompletes(self):\n sim = Simulation()\n self.assertEqual(25, sim.run_simple(1, 11, \"output\", 0.1, 2, 10))", "def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))", "def action_run(self):\n pass" ]
[ "0.5975073", "0.5935403", "0.59130096", "0.57898796", "0.5710506", "0.56999266", "0.56843907", "0.5681977", "0.55953836", "0.55517167", "0.5515142", "0.55127794", "0.5501622", "0.5477187", "0.5396195", "0.53956157", "0.5391933", "0.53681904", "0.53640187", "0.53324854", "0.53042185", "0.52998745", "0.5297435", "0.52967316", "0.5284339", "0.5277941", "0.52753276", "0.52611053", "0.5259433", "0.5232182" ]
0.6079829
0
Generates the image name from the image url.
def generate_image_name(self, image): return image.replace('shub://', '').replace('/', '-') + '.simg'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_generated_image_name(full_image_url):\r\n\r\n logging.debug('get_generated_image_name({})'.format(full_image_url))\r\n\r\n image_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\n image_extension = full_image_url.split(\".\")[-1]\r\n image_name = image_name + \".\" + image_extension\r\n logging.debug('get_generated_image_name - image_name = {}'.format(image_name))\r\n return image_name", "def _ImageName(self, image):\n\n image_without_protocol = image.split('/')[-1]\n if '@' in image_without_protocol:\n return image_without_protocol.split('@')[0]\n elif ':' in image:\n return image_without_protocol.split(':')[0]\n else:\n return image_without_protocol", "def image_name(name):\n \n # Gets the '.' position\n dot = name.find('.')\n # Slice the name from beginning and before '.'\n img = name[:dot]\n # return string with jpg format\n return \"{}.jpg\".format(img)", "def GenerateImageName(cls, build_target=None, build_id=None):\n if not build_target and not build_id:\n return \"image-\" + uuid.uuid4().hex\n name = cls.IMAGE_NAME_FMT.format(\n build_target=build_target,\n build_id=build_id,\n uuid=uuid.uuid4().hex[:8])\n return cls._FormalizeName(name)", "def filename(self,imgurl):\n if imgurl.find('/'):\n return imgurl.rsplit('/', 1)[1]", "def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))", "def get_image_filename(self, filename):\n path = 'images/{folder}/{filename}'.format(\n folder=self.folder,\n filename=filename\n )\n return path", "def _get_image_name(image_meta, max_len=pvm_const.MaxLen.FILENAME_DEFAULT):\n return pvm_util.sanitize_file_name_for_api(\n image_meta.name, prefix=DiskType.IMAGE + '_',\n suffix='_' + image_meta.checksum, max_len=max_len)", "def build_image_name(self, tag):\n return self.repository_name + ':' + tag", "def namer(self, image_url, page_url):\n title = page_url.rsplit('/', 2)[1]\n image_ext = image_url.rsplit('.', 1)[1]\n return '%s.%s' % (title, image_ext)", "def image_name(self) -> str:\n return self._image_name", "def image_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"image_name\")", "def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)", "def image_file_name(instance, filename):\n\text = filename[-4:]\n\tnew_filename = os.path.join('images',str(instance.image_folder),str(instance.user).replace(\" \",\"\").lower()+ext)\n\treturn new_filename", "def imageNamed(imageName):\n\t\treturn os.path.join(AppVars.imagePath(), imageName)", "def get_image_url():", "def get_image_name(resource_id):\n match_images = re.match(r\".*images/(?P<image_name>[^/]*).*\", resource_id, flags=re.IGNORECASE)\n if match_images:\n return match_images.group(\"image_name\")\n return \"\"", "def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)", "def _get_image_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_image_name = f\"{dirname}_{self.config_name}\"\n image_name = self.config_options.get(\"image\", default_image_name)\n return image_name", "def generate_image_name(self, schedule, server_name):\n\n max_name_length = 255\n prefix = self._get_image_prefix(schedule)\n now = str(calendar.timegm(self._get_utcnow().utctimetuple()))\n\n # NOTE(ameade): Truncate the server name so the image name is within\n # 255 characters total\n server_name_len = max_name_length - len(now) - len(prefix) - len('--')\n server_name = server_name[:server_name_len]\n\n return (\"%s-%s-%s\" % (prefix, server_name, str(now)))", "def name(self, strippath=False):\n return _image.image_name(self, strippath)", "def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)", "def imId2name(self, im_id):\n \n if isinstance(im_id, int):\n name = str(im_id).zfill(self.STR_ID_LEN) + '.jpg'\n elif isinstance(im_id, str):\n name = im_id + '.jpg'\n else:\n raise AssertionError('Image ID should be of type string or int')\n return name", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def image_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_name\")", "def image_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_name\")", "def image_url(self):\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field:\n field = context.getField(IMAGE_FIELD_NAME)\n\n if field and field.get_size(context) > 0:\n return u'%s/%s_%s' % (obj_url, field.getName(), 'thumb')\n\n return u\"%s/isaw_logo.png\" % self.portal.absolute_url()", "def l10n_img_file_name(ctx, url):\n url = url.lstrip('/')\n locale = ctx.get('LANG', None)\n if not locale:\n locale = settings.LANGUAGE_CODE\n\n # We use the same localized screenshots for all Spanishes\n if locale.startswith('es') and not _l10n_media_exists('img', locale, url):\n locale = 'es-ES'\n\n if locale != settings.LANGUAGE_CODE:\n if not _l10n_media_exists('img', locale, url):\n locale = settings.LANGUAGE_CODE\n\n return path.join('img', 'l10n', locale, url)", "def get_country_image_name(country):\n\n country = country.replace(\" \", \"-\").replace(\".\", \"\").lower()\n return \"%s.png\" % (country)", "def image_url(self, name):\r\n s3_key = self._generate_s3_key(name)\r\n return s3_key.generate_url(self.IMAGE_LINK_DURATION)" ]
[ "0.8253151", "0.7566465", "0.73151815", "0.7306501", "0.72865564", "0.72370976", "0.7237034", "0.7213149", "0.7112175", "0.71032536", "0.7042305", "0.70135736", "0.7007883", "0.6985449", "0.6945476", "0.6920966", "0.68803626", "0.68720126", "0.6842178", "0.68296844", "0.681122", "0.67095834", "0.66644984", "0.664952", "0.6646352", "0.6646352", "0.6579825", "0.6519138", "0.65161616", "0.65113056" ]
0.8002894
1
Check whether an instance exists or not.
def singularity_exists(self): instances = Client.instances(quiet=self.quiet) for instance in instances: if self.pid in instance.name: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True", "def exists(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n in_states=None,\n filters=None,\n):\n instances = find_instances(\n instance_id=instance_id,\n name=name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n in_states=in_states,\n filters=filters,\n )\n if instances:\n log.info(\"Instance exists.\")\n return True\n else:\n log.warning(\"Instance does not exist.\")\n return False", "def exists(self):\n\n if self:\n pass", "def have_this_instance(self, instance):\n for i in self.all_instances:\n if i == instance:\n print(\"YES ITS ME!\")\n return True\n print(\"NO S.B. ELSE\")\n return False", "def exists(self):\n return True", "def exists(self):\n return True", "def _Exists(self):\n cmd = util.GcloudCommand(self, 'beta', 'bigtable', 'instances', 'list')\n cmd.flags['format'] = 'json'\n cmd.flags['project'] = self.project\n # The zone flag makes this command fail.\n cmd.flags['zone'] = []\n stdout, stderr, retcode = cmd.Issue(\n suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n # This is not ideal, as we're returning false not because we know\n # the table isn't there, but because we can't figure out whether\n # it is there. This behavior is consistent without other\n # _Exists methods.\n logging.error('Unable to list GCP Bigtable instances. Return code %s '\n 'STDOUT: %s\\nSTDERR: %s', retcode, stdout, stderr)\n return False\n result = json.loads(stdout)\n instances = {instance['name'] for instance in result}\n full_name = 'projects/{}/instances/{}'.format(self.project, self.name)\n return full_name in instances", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def exist(self):", "def checkDBImportInstance(self, instance):\n\n\t\tsession = self.configDBSession()\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tresult = (session.query(\n\t\t\t\tdbimportInstances.name\n\t\t\t)\n\t\t\t.select_from(dbimportInstances)\n\t\t\t.filter(dbimportInstances.name == instance)\n\t\t\t.count())\n\n\t\tif result == 0:\n\t\t\tlogging.error(\"No DBImport Instance with that name can be found in table 'dbimport_instances'\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def exists(path):\n return get_instance(path).exists(path)", "def objExists(*args, **kwargs)->bool:\n pass", "def exists(self):\n return self.obj is not None", "def object_exists(self, fname):\n return True", "def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True", "def object_exists(self, fname):\n return False", "def object_exists(self, fname):\n return self.object_exists", "def exists(self, name):\n try:\n self.container.get_object(name)\n return True\n except NoSuchObject:\n return False", "def exists(self, obj):\n return False", "def _verify_unique_instance_name(self, name):\n existing = self.instances.find_one({'name': name, 'deleted': False})\n if existing:\n raise AXApiInvalidParam(\"Fixture instance with name '{}' already exists\".format(name))", "def is_existing(self):\n return self.backend.is_existing", "def __contains__(self, instance: object) -> bool:\n try:\n state = attributes.instance_state(instance)\n except exc.NO_STATE as err:\n raise exc.UnmappedInstanceError(instance) from err\n return self._contains_state(state)", "def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def is_exist(self, class_reference, **key_value):\n return True if service.select_from(class_reference, **key_value).get() else False", "def check_instance(self, class_name, inst_id, stored_objects):\n '''get '<class_name>.id' to FileStorage.__objects key format'''\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n \"\"\"given id does not exist\"\"\"\n print(\"** no instance found **\")\n instance = False\n return instance", "def exists(self):\n return _os.path.exists(self.__str__())", "def exists(self):\n return bool(self.get())" ]
[ "0.7803968", "0.76079255", "0.70579004", "0.7034308", "0.7027742", "0.7027742", "0.6946009", "0.689386", "0.67451686", "0.6722067", "0.6699531", "0.66554385", "0.6641805", "0.66386664", "0.66077375", "0.6606492", "0.6554289", "0.65404296", "0.6523959", "0.651226", "0.6500812", "0.64558464", "0.64506847", "0.6441972", "0.6436156", "0.6418387", "0.6367281", "0.6344695", "0.62431663", "0.62420857" ]
0.77773875
1
Starts a singularity instance based on the image.
def singularity_start(self, image): env_vars = self.action.get('env', {}) for s in self.action.get('secrets', []): env_vars.update({s: os.environ[s]}) for e, v in self.env.items(): env_vars.update({e: v}) env_vars.update({'HOME': os.environ['HOME']}) # sets the env variables for k, v in env_vars.items(): Client.setenv(k, v) e = Client.run(image=self.generate_image_name(image), args=' '.join(self.action.get('args', '')), return_result=True) return e['return_code']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def singularity_build(self, path, image):\n Client.build(os.path.join(\n path, 'singularity.def'\n ), self.generate_image_name(image))", "def start_ssm(self, ssm_image):\n pass", "def create_instance_by_image(self):\n print '# Start a new instance based on an existing AMI'\n ami = raw_input('Enter AMI (empty to cancel): ')\n\n # Cancel\n if not ami:\n print 'Operation cancelled'\n return\n\n # Start the instance\n if self.compute.create_instance_by_image(ami):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given AMI'", "def start_instance(InstanceId=None):\n pass", "def startami(image, instancetype, accesskey, secretkey, pkname):\n if not is_valid_instance_type(image, instancetype):\n raise ValueError(\"Invalid instance type: '%s'\" % instancetype)\n\n conn = EC2Connection(accesskey, secretkey)\n image = conn.get_image(get_image_id(image))\n reservation = image.run(instance_type=instancetype, key_name=pkname)\n instance = reservation.instances[0]\n\n waitForInstanceToRun(instance)\n\n # [AN] call script instanceStartup.py\n return str(instance.dns_name)", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def __init__(self, sl):\n threading.Thread.__init__(self)\n self.alpha = 0.5\n self.running = True\n self.slam = sl\n self.image = None\n self.scale = 5.0\n self.centre = np.zeros(2)\n self.mouse = np.zeros(2)\n self.dimensions = np.array([1920, 1080])\n self.tracking = TrackingMode.FREE\n self.map_mode = MapMode.DIST", "def run(self, reuse=False):\n build = True\n if 'shub://' in self.action['uses']:\n image = self.action['uses']\n build = False\n elif './' in self.action['uses']:\n image = 'action/' + os.path.basename(self.action['uses'])\n singularityfile_path = os.path.join(\n os.getcwd(), self.action['uses'])\n else:\n image = '/'.join(self.action['uses'].split('/')[:2])\n singularityfile_path = os.path.join(self.action['repo_dir'],\n self.action['action_dir'])\n\n if not reuse:\n if self.singularity_exists():\n self.singularity_rm()\n if build:\n self.singularity_build(singularityfile_path, image)\n else:\n self.singularity_pull(image)\n else:\n if not self.singularity_exists():\n if build:\n self.singularity_build(singularityfile_path, image)\n else:\n self.singularity_pull(image)\n\n e = self.singularity_start(image)\n\n if e != 0:\n pu.fail('Action {} failed!\\n'.format(self.action['name']))", "def start_sml():\n launchfile = basepath + '/launch/teststarter.launch'\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n #print roslaunch.rlutil.check_roslaunch(launchfile)\n #roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [launchfile])\n launch.start()", "def makeInstanceFromImage(self , imageid, initialconfig, instancename):\n self.initCreate(initialconfig)\n ip = self.launchMinipadServer()\n if ip:\n self.__server_ip = ip\n disk = self.createDisk(instancename)\n self.attachDiskToMinipad(disk )\n \n if self.startConversion(imageid , self.__server_ip) == False:\n return None\n\n self.detachDiskFromMinipad(disk)\n vm = self.createVM(disk , instancename)\n return vm", "def start(self, state):\n return self.brain.start(state)", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def FullSingleSceneInference(self, image_file, USE_MODEL = 'DEFAULT'): \n if USE_MODEL != 'DEFAULT':\n (detector_file, classes_file, map_file, \n model_name, model_type) = self.DownloadModel(USE_MODEL)\n self.SetActiveGraph(detector_graph_file = detector_file,\n model_name = model_name,\n classes_file = classes_file,\n map_file = map_file,\n model_type = model_type\n )\n \n b,s,c = self.AnalyzeSingleImage(image_file = image_file)\n self.DrawImage(image_file = image_file, boxes = b,\n scores = s, classes = c\n )\n return", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def __init__(self, experiment) -> None:\n self.logger = get_logger()\n np.random.seed(experiment.seed)\n self.gpu_imported = False\n\n from sktime.forecasting.naive import NaiveForecaster # type: ignore\n\n # Disable container if certain features are not supported but enforced ----\n dummy = NaiveForecaster()\n self.active = _check_enforcements(forecaster=dummy, experiment=experiment)\n if not self.active:\n return\n\n self.seasonality_present = experiment.seasonality_present\n self.sp = experiment.primary_sp_to_use\n\n if self.sp == 1:\n self.active = False\n return\n\n args = self._set_args\n tune_args = self._set_tune_args\n tune_grid = self._set_tune_grid\n tune_distributions = self._set_tune_distributions\n leftover_parameters_to_categorical_distributions(tune_grid, tune_distributions)\n\n eq_function = lambda x: type(x) is NaiveForecaster and x.sp != 1\n\n super().__init__(\n id=\"snaive\",\n name=\"Seasonal Naive Forecaster\",\n class_def=NaiveForecaster,\n args=args,\n tune_grid=tune_grid,\n tune_distribution=tune_distributions,\n tune_args=tune_args,\n is_gpu_enabled=self.gpu_imported,\n eq_function=eq_function,\n )", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def __init__(self, image_size):\n super(SiameseDiscriminator, self).__init__()\n self.cnn1 = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(3, 4, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(4),\n nn.Dropout2d(p=.2),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(4, 8, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(8),\n nn.Dropout2d(p=.2),\n\n nn.ReflectionPad2d(1),\n nn.Conv2d(8, 8, kernel_size=3),\n nn.LeakyReLU(0.1, inplace=True),\n nn.BatchNorm2d(8),\n nn.Dropout2d(p=.2))\n\n self.fc1 = nn.Sequential(\n nn.Linear(8 * image_size * image_size, 500),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Linear(500, 500),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Linear(500, 15))", "def _start(self):\n if self._classifier is None:\n self._classifier = TFSlimClassifier(self.config)\n self._classifier.__enter__()", "def start_image_builder(Name=None, AppstreamAgentVersion=None):\n pass", "def create_instance(self, image='ami-660c3023', key_name='linuxonEC2', instance_type='t1.micro', security_groups=['default']):\n return self.conn.run_instances(image,\n key_name=key_name,\n instance_type=instance_type,\n security_groups=security_groups).instances[0]", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def start(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Hi send an image to classify!')", "def start_SLP_server(self):\n import slp\n \n s = self.op.get_value('mode_params')\n if len(s) == 0:\n print 'No parameter received'\n params = {}\n else:\n l_p = self.parse_params(s)\n params = {'action':[np.int(l_p[0])]}\n if len(l_p) == 2:\n params['image_feature_layer_name'] = l_p[1]\n slpserver = slp.SLPServerTrial(self, params)\n slpserver.start()", "def start(self: AutoScaler) -> AutoScalerState:\n log.info(f'Autoscale start (policy: {self.policy.name.lower()}, init-size: {self.init_size})')\n log.debug(f'Autoscale launcher: {self.launcher}')\n return AutoScalerState.INIT", "def start(self):\n with self._lock:\n if not self.started():\n self._started = None\n getattr(self.factory, 'start_' + self.class_name())(self)", "def sex(image):\n \n # run sextractor with different default parameters\n print('running SExtractor to {}...'.format(image))\n P = Popen('sextractor -c goto.sex '+image+' -CATALOG_NAME '+image[:-5]+'.cat', shell=True)\n P.wait()", "def init_with_display_image(cls, d, image, is_sub=False):\n # init basic display parameters\n scene = cls()\n scene.illuminant = Illuminant(wave=d.wave)\n scene.dist = d.dist # viewing distance\n\n # compute horizontal field of view\n scene.fov = 2 * rad_to_deg(atan2(image.shape[1] * d.meters_per_dot / 2, d.dist))\n\n # set illuminant as spd of display\n scene.illuminant.photons = energy_to_quanta(d.white_spd, d.wave)\n\n # gamma distortion for the input image\n image = d.lookup_digital((image * (d.n_levels - 1)).astype(int))\n\n # sub-pixel rendering if required\n if is_sub:\n image = d.compute(image)\n\n # compute radiance from image\n out_sz = np.concatenate((np.array(image.shape[0:2]), [d.wave.size]))\n image = rgb_to_xw_format(image)\n scene.photons = energy_to_quanta(np.dot(image, d.spd.T), d.wave)\n\n # add ambient quanta to scene photons\n scene.photons += d.ambient\n\n # reshape photons\n scene.photons = scene.photons.reshape(out_sz, order=\"F\")\n return scene", "def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)", "def process_image(self):\n\n detect.main(self.nn_args)" ]
[ "0.666752", "0.6325278", "0.6263676", "0.614632", "0.5845064", "0.5794984", "0.5747361", "0.5733841", "0.5721612", "0.56165344", "0.5603583", "0.5509438", "0.54767895", "0.5428946", "0.53604615", "0.53470963", "0.5332762", "0.53182864", "0.53123957", "0.5296787", "0.52888715", "0.52811354", "0.52584165", "0.52493566", "0.52435315", "0.5218621", "0.521427", "0.5212473", "0.51945853", "0.5174413" ]
0.7294491
0
Load foia sba datasets
def load_sba_datasets(dbm, direc): foia_504_1991_present = pd.read_excel(direc + 'FOIA - 504 (FY1991-Present).xlsx') foia_7a_1991_1999 = pd.read_excel(direc + 'FOIA - 7(a) (FY1991-FY1999).xlsx', skiprows=1) foia_7a_2000_2009 = pd.read_excel(direc + 'FOIA - 7(a)(FY2000-FY2009).xlsx', skiprows=1) foia_7a_2010_present = pd.read_excel(direc + 'FOIA - 7(a) (FY2010-Present).xlsx') dbm.write_df_table( foia_504_1991_present, table_name='sba__foia_504_1991_present', schema='data_ingest') dbm.write_df_table( foia_7a_1991_1999, table_name='sba__foia_7a_1991_1999', schema='data_ingest') dbm.write_df_table( foia_7a_2000_2009, table_name='sba__foia_7a_2000_2009', schema='data_ingest') dbm.write_df_table( foia_7a_2010_present, table_name='sba__foia_7a_2010_present', schema='data_ingest')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_kiba_dataset():\n trainn_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'folds', 'train_fold_setting1.txt')))\n train_fold = []\n for e in zip(*trainn_fold):\n for ee in e:\n train_fold.extend(ee)\n #train_fold = [ee for e in trainn_fold for ee in e]\n test_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'folds', 'test_fold_setting1.txt')))\n ligands = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'ligands_can.txt')),\n object_pairs_hook=OrderedDict)\n proteins = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'proteins.txt')),\n object_pairs_hook=OrderedDict)\n \n affinity = pickle.load(open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'Y'), \n 'rb'), encoding='latin1')\n smiles_lst, protein_lst = [], []\n\n for k in ligands.keys():\n smiles = ligands[k]\n smiles_lst.append(smiles)\n for k in proteins.keys():\n protein_lst.append(proteins[k])\n\n affinity = np.asarray(affinity)\n \n os.makedirs(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'processed'), exist_ok=True)\n train_test_dataset = []\n for split in ['train', 'test']:\n split_dir = os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'processed', split)\n os.makedirs(split_dir, exist_ok=True)\n fold = train_fold if split == 'train' else test_fold\n rows, cols = np.where(np.isnan(affinity) == False)\n rows, cols = rows[fold], cols[fold]\n \n data_lst = [[] for _ in range(1)]\n for idx in range(len(rows)):\n data = {}\n data['smiles'] = smiles_lst[rows[idx]]\n data['protein'] = protein_lst[cols[idx]]\n af = affinity[rows[idx], cols[idx]]\n data['aff'] = af\n\n data_lst[idx % 1].append(data)\n random.shuffle(data_lst)\n train_test_dataset.append(data_lst[0])\n return train_test_dataset", "def LoadTroikaDataset():\n data_dir = \"./datasets/troika/training_data\"\n data_fls = sorted(glob.glob(data_dir + \"/DATA_*.mat\"))\n ref_fls = sorted(glob.glob(data_dir + \"/REF_*.mat\"))\n return data_fls, ref_fls", "def load_data(self):", "def load_data(self) -> None:", "def load_database(self, fsp='Species'):\n self.df_species = pd.read_csv(fsp + '.csv', header=0,\n index_col=0)", "def import_and_save(ADCthres=0, s=False):\n df = import_data(ADCthres, s)\n bus_vec = np.array(range(0,3))\n for bus in bus_vec:\n df_clu = cluster_data(df, bus) \n save_clusters(df_clu, bus)", "def load_davis_dataset():\n trainn_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'folds', 'train_fold_setting1.txt')))\n train_fold = []\n for e in zip(*trainn_fold):\n for ee in e:\n train_fold.append(ee)\n #train_fold = [ee for e in trainn_fold for ee in e]\n test_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'folds', 'test_fold_setting1.txt')))\n ligands = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'ligands_can.txt')),\n object_pairs_hook=OrderedDict)\n proteins = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'proteins.txt')),\n object_pairs_hook=OrderedDict)\n \n affinity = pickle.load(open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'Y'), \n 'rb'), encoding='latin1')\n smiles_lst, protein_lst = [], []\n\n for k in ligands.keys():\n smiles = ligands[k]\n smiles_lst.append(smiles)\n for k in proteins.keys():\n protein_lst.append(proteins[k])\n\n affinity = [-np.log10(y / 1e9) for y in affinity]\n affinity = np.asarray(affinity)\n \n os.makedirs(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'processed'), exist_ok=True)\n train_test_dataset = []\n for split in ['train', 'test']:\n split_dir = os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'processed', split)\n os.makedirs(split_dir, exist_ok=True)\n fold = train_fold if split == 'train' else test_fold\n rows, cols = np.where(np.isnan(affinity) == False)\n rows, cols = rows[fold], cols[fold]\n \n data_lst = [[] for _ in range(1)]\n for idx in range(len(rows)):\n data = {}\n data['smiles'] = smiles_lst[rows[idx]]\n data['protein'] = protein_lst[cols[idx]]\n af = affinity[rows[idx], cols[idx]]\n data['aff'] = af\n\n data_lst[idx % 1].append(data)\n random.shuffle(data_lst)\n train_test_dataset.append(data_lst[0])\n return train_test_dataset", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_data():\n\n base = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/'\n fname = 'BSR_bsds500.tgz'\n\n path = get_file(fname,\n origin = base + fname,\n cache_dir = DEFAULT_CACHE_DIR,\n dset_name = 'bsds500')\n\n f = tarfile.open(path)\n\n train_data = []\n test_data = []\n for name in f.getnames():\n if name.startswith('BSR/BSDS500/data/images/train/'):\n try:\n fp = f.extractfile(name)\n img = imageio.imread(fp)\n train_data.append(img)\n except:\n continue\n elif name.startswith('BSR/BSDS500/data/images/test/'):\n try:\n fp = f.extractfile(name)\n img = skimage.io.imread(fp)\n test_data.append(img)\n except:\n continue\n\n\n return (train_data, test_data)", "def load_dsb(self, dataset_dir, subset):\n # Add classes. We have only one class to add.\n self.add_class(\"dsb\", 1, \"nucleo\")\n\n # Train or validation dataset?\n if subset == \"train\" or subset == \"val\":\n dataset_dir = os.path.join(dataset_dir, \"TRAINCLAHE/\")\n elif subset == 'test':\n dataset_dir = os.path.join(dataset_dir, \"TEST/\")\n else:\n print(\"Invalid Subset\",subset)\n #Listar quais exames tem\n exames = next(os.walk(dataset_dir))[1]\n\n if subset==\"train\":\n exames = exames[:600]\n elif subset == \"val\":\n exames = exames[600:]\n else:\n # exames = exames\n pass\n\n\n #Acessar a pasta exame/image\n for n, id_ in tqdm(enumerate(exames), total=len(exames)):\n path = dataset_dir + id_\n self.add_image(\n \"dsb\",\n image_id=id_, # use file name as a unique image id\n path=path + '/images/' + id_ + '.png', dir=path,\n )", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def datasets(self):\n pass", "def dataset(options):\n pass", "def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def test_i_large_datasets(self):\n\n # If foma is not installed, exit.\n if not h.foma_installed(force_check=True):\n return\n\n # Configuration\n\n # The ``old_dump_file`` variable holds the name of a MySQL dump file in /tests/data/datasets\n # that will be used to populate the database.\n old_dump_file = 'blaold.sql'\n backup_dump_file = 'old_test_dump.sql'\n\n # The ``precompiled_morphophonology`` variable holds the name of a compiled foma FST that\n # maps surface representations to sequences of morphemes. A file with this name should be\n # present in /tests/data/morphophonologies or else the variable should be set to None.\n pregenerated_morphophonology = None # 'blaold_morphophonology.script'\n precompiled_morphophonology = None # 'blaold_morphophonology.foma'\n\n # Here we load a whole database from the mysqpl dump file specified in ``tests/data/datasets/<old_dump_file>``.\n old_dump_file_path = os.path.join(self.test_datasets_path, old_dump_file)\n backup_dump_file_path = os.path.join(self.test_datasets_path, backup_dump_file)\n tmp_script_path = os.path.join(self.test_datasets_path, 'tmp.sh')\n if not os.path.isfile(old_dump_file_path):\n return\n config = h.get_config(config_filename='test.ini')\n SQLAlchemyURL = config['sqlalchemy.url']\n if not SQLAlchemyURL.split(':')[0] == 'mysql':\n return\n rdbms, username, password, db_name = SQLAlchemyURL.split(':')\n username = username[2:]\n password = password.split('@')[0]\n db_name = db_name.split('/')[-1]\n # First dump the existing database so we can load it later.\n # Note: the --single-transaction option seems to be required (on Mac MySQL 5.6 using InnoDB tables ...)\n # see http://forums.mysql.com/read.php?10,108835,112951#msg-112951\n with open(tmp_script_path, 'w') as tmpscript:\n tmpscript.write('#!/bin/sh\\nmysqldump -u %s -p%s --single-transaction --no-create-info --result-file=%s %s' % (\n username, password, backup_dump_file_path, db_name))\n os.chmod(tmp_script_path, 0744)\n with open(os.devnull, \"w\") as fnull:\n call([tmp_script_path], stdout=fnull, stderr=fnull)\n # Now load the dump file of the large database (from old_dump_file)\n with open(tmp_script_path, 'w') as tmpscript:\n tmpscript.write('#!/bin/sh\\nmysql -u %s -p%s %s < %s' % (username, password, db_name, old_dump_file_path))\n with open(os.devnull, \"w\") as fnull:\n call([tmp_script_path], stdout=fnull, stderr=fnull)\n\n # Recreate the default users that the loaded dump file deleted\n administrator = h.generate_default_administrator()\n contributor = h.generate_default_contributor()\n viewer = h.generate_default_viewer()\n Session.add_all([administrator, contributor, viewer])\n Session.commit()\n\n\n ################################################################################\n # PHONOLOGY\n ################################################################################\n\n # Create a Blackfoot phonology with the test phonology script\n params = self.phonology_create_params.copy()\n params.update({\n 'name': u'Blackfoot Phonology',\n 'description': u'The phonological rules of Frantz (1997) as FSTs',\n 'script': self.blackfoot_phonology_script\n })\n params = json.dumps(params)\n response = self.app.post(url('phonologies'), params, self.json_headers,\n self.extra_environ_admin)\n resp = json.loads(response.body)\n phonology_id = resp['id']\n\n \"\"\"\n\n\n ################################################################################\n # MORPHOLOGY\n ################################################################################\n\n # Create a lexicon form search and corpus\n # The code below constructs a query that finds a (large) subset of the Blackfoot morphemes.\n # Notes for future morphology creators:\n # 1. the \"oth\" category is a mess: detangle the nominalizer, inchoative, transitive suffixes, etc. from\n # one another and from the numerals and temporal modifiers -- ugh!\n # 2. the \"pro\" category\" is also a mess: clearly pronoun-forming iisto does not have the same distribution \n # as the verbal suffixes aiksi and aistsi! And oht, the LING/means thing, is different again...\n # 3. hkayi, that thing at the end of demonstratives, is not agra, what is it? ...\n # 4. the dim category contains only 'sst' 'DIM' and is not used in any forms ...\n lexical_category_names = ['nan', 'nin', 'nar', 'nir', 'vai', 'vii', 'vta', 'vti', 'vrt', 'adt',\n 'drt', 'prev', 'med', 'fin', 'oth', 'o', 'und', 'pro', 'asp', 'ten', 'mod', 'agra', 'agrb', 'thm', 'whq',\n 'num', 'stp', 'PN']\n durative_morpheme = 15717\n hkayi_morpheme = 23429\n query = {'filter': ['and', [['Form', 'syntactic_category', 'name', 'in', lexical_category_names],\n ['not', ['Form', 'morpheme_break', 'regex', '[ -]']],\n ['not', ['Form', 'id', 'in', [durative_morpheme, hkayi_morpheme]]],\n ['not', ['Form', 'grammaticality', '=', '*']]\n ]]}\n smaller_query_for_rapid_testing = {'filter': ['and', [['Form', 'id', '<', 1000],\n ['Form', 'syntactic_category', 'name', 'in', lexical_category_names]]]}\n params = self.form_search_create_params.copy()\n params.update({\n 'name': u'Blackfoot morphemes',\n 'search': query\n })\n params = json.dumps(params)\n response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)\n lexicon_form_search_id = json.loads(response.body)['id']\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of Blackfoot morphemes',\n 'form_search': lexicon_form_search_id\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n lexicon_corpus_id = json.loads(response.body)['id']\n\n # Create a rules corpus\n\n # Create a corpus of forms containing words -- to be used to estimate ngram probabilities\n # The goal here is to exclude things that look like words but are not really words, i.e., \n # morphemes; as a heuristic we search for grammatical forms categorized as 'sent' or whose\n # transcription value contains a space or a dash.\n query = {'filter': ['and', [['or', [['Form', 'syntactic_category', 'name', '=', u'sent'],\n ['Form', 'morpheme_break', 'like', '% %'],\n ['Form', 'morpheme_break', 'like', '%-%']]],\n ['Form', 'syntactic_category_string', '!=', None],\n ['Form', 'grammaticality', '=', '']]]}\n params = self.form_search_create_params.copy()\n params.update({\n 'name': u'Find Blackfoot sentences',\n 'description': u'Returns all forms containing words',\n 'search': query\n })\n params = json.dumps(params)\n response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)\n rules_form_search_id = json.loads(response.body)['id']\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of Blackfoot sentences',\n 'form_search': rules_form_search_id\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n rules_corpus_id = json.loads(response.body)['id']\n\n # Now we reduce the number of category-based word-formation rules by removing all such\n # rules implicit in the rules corpus that have fewer than two exemplar tokens.\n\n # Get the category sequence types of all of the words in the rules corpus ordered by their counts, minus\n # those with fewer than ``minimum_token_count`` counts.\n minimum_token_count = 2\n params = {'minimum_token_count': minimum_token_count}\n response = self.app.get(url(controller='corpora', action='get_word_category_sequences', id=rules_corpus_id),\n params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n\n word_category_sequences = u' '.join([word_category_sequence for word_category_sequence, ids in resp])\n #word_category_sequences = u'agra-vai vai-agrb'\n\n # Now create a morphology using the lexicon and rules defined by word_category_sequences\n rich_upper = False\n name = u'Morphology of Blackfoot'\n params = self.morphology_create_params.copy()\n params.update({\n 'name': name,\n 'lexicon_corpus': lexicon_corpus_id,\n 'rules': word_category_sequences,\n 'script_type': u'lexc',\n 'extract_morphemes_from_rules_corpus': False,\n 'rich_upper': rich_upper\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologies'), params, self.json_headers, self.extra_environ_admin_appset)\n resp = json.loads(response.body)\n morphology_id = resp['id']\n assert resp['name'] == name\n assert resp['script_type'] == u'lexc'\n\n # Generate the morphology's script without compiling it.\n response = self.app.put(url(controller='morphologies', action='generate',\n id=morphology_id), headers=self.json_headers,\n extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n generate_attempt = resp['generate_attempt']\n\n # Poll ``GET /morphologies/morphology_id`` until ``generate_attempt`` has changed.\n seconds_elapsed = 0\n wait = 2\n while True:\n response = self.app.get(url('morphology', id=morphology_id),\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n if generate_attempt != resp['generate_attempt']:\n log.debug('Generate attempt for morphology %d has terminated.' % morphology_id)\n break\n else:\n log.debug('Waiting for morphology %d\\'s script to generate: %s' % (\n morphology_id, self.human_readable_seconds(seconds_elapsed)))\n sleep(wait)\n seconds_elapsed = seconds_elapsed + wait\n\n ################################################################################\n # MORPHEME LANGUAGE MODEL\n ################################################################################\n\n # Create a morpheme language model\n name = u'Blackfoot morpheme language model'\n params = self.morpheme_language_model_create_params.copy()\n params.update({\n 'name': name,\n 'corpus': rules_corpus_id,\n 'toolkit': 'mitlm'\n })\n params = json.dumps(params)\n response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morpheme_language_model_id = resp['id']\n assert resp['name'] == name\n assert resp['toolkit'] == u'mitlm'\n assert resp['order'] == 3\n assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM\n\n # Generate the files of the language model\n response = self.app.put(url(controller='morphemelanguagemodels', action='generate', id=morpheme_language_model_id),\n {}, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n lm_generate_attempt = resp['generate_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphemelanguagemodel', id=morpheme_language_model_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)\n assert resp['generate_message'] == u'Language model successfully generated.'\n\n ################################################################################\n # MORPHOLOGICAL PARSER\n ################################################################################\n\n # Create a morphological parser for Blackfoot\n params = self.morphological_parser_create_params.copy()\n params.update({\n 'name': u'Morphological parser for Blackfoot',\n 'phonology': phonology_id,\n 'morphology': morphology_id,\n 'language_model': morpheme_language_model_id\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_id = resp['id']\n\n # Compile the morphological parser's morphophonology script if necessary, cf. precompiled_morphophonology and pregenerated_morphophonology.\n morphological_parser_directory = os.path.join(self.morphological_parsers_path, 'morphological_parser_%d' % morphological_parser_id)\n morphophonology_binary_filename = 'morphophonology.foma'\n morphophonology_script_filename = 'morphological_parser.script'\n morphophonology_binary_path = os.path.join(morphological_parser_directory, morphophonology_binary_filename )\n morphophonology_script_path = os.path.join(morphological_parser_directory, morphophonology_script_filename )\n try:\n precompiled_morphophonology_path = os.path.join(self.test_morphophonologies_path, precompiled_morphophonology)\n pregenerated_morphophonology_path = os.path.join(self.test_morphophonologies_path, pregenerated_morphophonology)\n except Exception:\n precompiled_morphophonology_path = None\n pregenerated_morphophonology_path = None\n if (precompiled_morphophonology_path and pregenerated_morphophonology_path and \n os.path.exists(precompiled_morphophonology_path) and os.path.exists(pregenerated_morphophonology_path)):\n # Use the precompiled morphophonology script if it's available,\n copyfileobj(open(precompiled_morphophonology_path, 'rb'), open(morphophonology_binary_path, 'wb'))\n copyfileobj(open(pregenerated_morphophonology_path, 'rb'), open(morphophonology_script_path, 'wb'))\n else:\n # Generate the parser's morphophonology FST, compile it and generate the morphemic language model\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Generate the parser's morphophonology FST, compile it and generate the morphemic language model\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n\n # Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.\n requester = lambda: self.app.get(url('morphologicalparser', id=morphological_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,\n wait=10, vocal=True, task_descr='compile morphological parser %s' % morphological_parser_id)\n assert resp['compile_message'] == \\\n u'Compilation process terminated successfully and new binary file was written.'\n\n # Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.\n requester = lambda: self.app.get(url('morphologicalparser', id=morphological_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,\n wait=10, vocal=True, task_descr='compile morphological parser %s' % morphological_parser_id)\n assert resp['compile_message'] == \\\n u'Compilation process terminated successfully and new binary file was written.'\n\n # Some reusable transcriptions and their parses\n transcription1 = u'nitsspiyi'\n transcription1_correct_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'nit', u'1', u'agra']),\n h.rare_delimiter.join([u'ihpiyi', u'dance', u'vai']))\n transcription1_impoverished_parse = u'nit-ihpiyi'\n transcription2 = u'aaniit'\n transcription2_correct_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'waanii', u'say', u'vai']),\n h.rare_delimiter.join([u't', u'IMP', u'agrb']))\n transcription2_impoverished_parse = u'waanii-t'\n\n # Test applyup on the mophological parser's morphophonology FST\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='applyup',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n if rich_upper:\n assert transcription1_correct_parse in resp[transcription1]\n assert transcription2_correct_parse in resp[transcription2]\n else:\n assert transcription1_impoverished_parse in resp[transcription1]\n assert transcription2_impoverished_parse in resp[transcription2]\n\n # Test how well the morphological parser parses some test words.\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_correct_parse\n # aaniit will have waaniit 'scatter' as its most likely parse and the correct parse waanii-t 'say-IMP'\n # as its second most likely...\n assert resp[transcription2] != transcription2_correct_parse\n\n\n\n \"\"\"\n\n\n ################################################################################\n # LOUIE MORPHOLOGY\n ################################################################################\n\n # Create a form search that returns forms containing analyzed words elicited by Louie.\n\n conjuncts = [['or', [['Form', 'syntactic_category', 'name', '=', u'sent'],\n ['Form', 'morpheme_break', 'like', '% %'],\n ['Form', 'morpheme_break', 'like', '%-%']]],\n ['Form', 'syntactic_category_string', '!=', None],\n ['Form', 'grammaticality', '=', ''],\n ['Form', 'elicitor', 'last_name', '=', 'Louie']]\n query = {'filter': ['and', conjuncts]}\n\n params = self.form_search_create_params.copy()\n params.update({\n 'name': u'Forms containing analyzed words elicited by Louie',\n 'search': query\n })\n params = json.dumps(params)\n response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)\n louie_form_search_id = json.loads(response.body)['id']\n\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of forms containing analyzed words elicited by Louie',\n 'form_search': louie_form_search_id\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n louie_corpus_id = json.loads(response.body)['id']\n\n # Now create a morphology using the Louie corpus as both the lexicon and rules corpora.\n rich_upper = False\n name = u'Morphology of Blackfoot based on words elicited by Louie'\n params = self.morphology_create_params.copy()\n params.update({\n 'name': name,\n 'lexicon_corpus': louie_corpus_id,\n 'rules_corpus': louie_corpus_id,\n 'script_type': u'regex',\n 'extract_morphemes_from_rules_corpus': True,\n 'rich_upper': rich_upper\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologies'), params, self.json_headers, self.extra_environ_admin_appset)\n resp = json.loads(response.body)\n louie_morphology_id = resp['id']\n assert resp['name'] == name\n assert resp['script_type'] == u'regex'\n\n # Generate the morphology's script without compiling it.\n response = self.app.put(url(controller='morphologies', action='generate',\n id=louie_morphology_id), headers=self.json_headers,\n extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n generate_attempt = resp['generate_attempt']\n\n # Poll ``GET /morphologies/morphology_id`` until ``generate_attempt`` has changed.\n seconds_elapsed = 0\n wait = 2\n while True:\n response = self.app.get(url('morphology', id=louie_morphology_id),\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n if generate_attempt != resp['generate_attempt']:\n log.debug('Generate attempt for morphology %d has terminated.' % louie_morphology_id)\n break\n else:\n log.debug('Waiting for morphology %d\\'s script to generate: %s' % (\n louie_morphology_id, self.human_readable_seconds(seconds_elapsed)))\n sleep(wait)\n seconds_elapsed = seconds_elapsed + wait\n\n\n ################################################################################\n # MORPHEME LANGUAGE MODEL -- LOUIE\n ################################################################################\n\n # Create a morpheme language model based on the data elicited by Louie\n name = u'Blackfoot morpheme language model based on data elicited by Louie'\n params = self.morpheme_language_model_create_params.copy()\n params.update({\n 'name': name,\n 'corpus': louie_corpus_id,\n 'toolkit': 'mitlm'\n })\n params = json.dumps(params)\n response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n louie_language_model_id = resp['id']\n assert resp['name'] == name\n assert resp['toolkit'] == u'mitlm'\n assert resp['order'] == 3\n assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM\n\n # Generate the files of the language model\n response = self.app.put(url(controller='morphemelanguagemodels', action='generate',\n id=louie_language_model_id),\n {}, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n lm_generate_attempt = resp['generate_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphemelanguagemodel', id=louie_language_model_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)\n assert resp['generate_message'] == u'Language model successfully generated.'\n\n ################################################################################\n # MORPHOLOGICAL PARSER -- LOUIE\n ################################################################################\n\n # Create a morphological parser for Blackfoot based on data elicited by Louie\n params = self.morphological_parser_create_params.copy()\n params.update({\n 'name': u'Morphological parser for Blackfoot based on data elicited by Louie',\n 'phonology': phonology_id,\n 'morphology': louie_morphology_id,\n 'language_model': louie_language_model_id\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologicalparsers'), params, self.json_headers,\n self.extra_environ_admin)\n resp = json.loads(response.body)\n louie_parser_id = resp['id']\n\n # Compile the morphological parser's morphophonology script if necessary, cf.\n # precompiled_morphophonology and pregenerated_morphophonology.\n morphological_parser_directory = os.path.join(self.morphological_parsers_path,\n 'morphological_parser_%d' % louie_parser_id)\n morphophonology_binary_filename = 'morphophonology.foma'\n morphophonology_script_filename = 'morphological_parser.script'\n morphophonology_binary_path = os.path.join(morphological_parser_directory,\n morphophonology_binary_filename )\n morphophonology_script_path = os.path.join(morphological_parser_directory,\n morphophonology_script_filename )\n try:\n precompiled_morphophonology_path = os.path.join(self.test_morphophonologies_path,\n precompiled_morphophonology)\n pregenerated_morphophonology_path = os.path.join(self.test_morphophonologies_path,\n pregenerated_morphophonology)\n except Exception:\n precompiled_morphophonology_path = None\n pregenerated_morphophonology_path = None\n if (precompiled_morphophonology_path and pregenerated_morphophonology_path and \n os.path.exists(precompiled_morphophonology_path) and os.path.exists(pregenerated_morphophonology_path)):\n # Use the precompiled morphophonology script if it's available,\n copyfileobj(open(precompiled_morphophonology_path, 'rb'), open(morphophonology_binary_path, 'wb'))\n copyfileobj(open(pregenerated_morphophonology_path, 'rb'), open(morphophonology_script_path, 'wb'))\n else:\n # Generate the parser's morphophonology FST, compile it and generate the morphemic language model\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=louie_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Generate the parser's morphophonology FST, compile it and generate the morphemic language model\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=louie_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n\n # Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.\n requester = lambda: self.app.get(url('morphologicalparser', id=louie_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,\n wait=10, vocal=True, task_descr='compile morphological parser %s' % louie_parser_id)\n assert resp['compile_message'] == \\\n u'Compilation process terminated successfully and new binary file was written.'\n\n # Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.\n requester = lambda: self.app.get(url('morphologicalparser', id=louie_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,\n wait=10, vocal=True, task_descr='compile morphological parser %s' % louie_parser_id)\n assert resp['compile_message'] == \\\n u'Compilation process terminated successfully and new binary file was written.'\n\n # Some reusable transcriptions and their parses\n transcription1 = u'nitsspiyi'\n transcription1_correct_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'nit', u'1', u'agra']),\n h.rare_delimiter.join([u'ihpiyi', u'dance', u'vai']))\n transcription1_impoverished_parse = u'nit-ihpiyi'\n transcription2 = u'aaniit'\n transcription2_correct_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'waanii', u'say', u'vai']),\n h.rare_delimiter.join([u't', u'IMP', u'agrb']))\n transcription2_impoverished_parse = u'waanii-t'\n\n # Test applyup on the mophological parser's morphophonology FST\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='applyup',\n id=louie_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n if rich_upper:\n assert transcription1_correct_parse in resp[transcription1]\n assert transcription2_correct_parse not in resp[transcription2]\n else:\n assert transcription1_impoverished_parse in resp[transcription1]\n assert transcription2_impoverished_parse not in resp[transcription2]\n\n # Test how well the morphological parser parses some test words.\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=louie_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_correct_parse\n # aaniit will have waaniit 'scatter' as its most likely parse and the correct parse waanii-t 'say-IMP'\n # as its second most likely...\n assert resp[transcription2] != transcription2_correct_parse\n\n\n # Finally, load the original database back in so that subsequent tests can work.\n with open(tmp_script_path, 'w') as tmpscript:\n tmpscript.write('#!/bin/sh\\nmysql -u %s -p%s %s < %s' % (username, password, db_name, backup_dump_file_path))\n with open(os.devnull, \"w\") as fnull:\n call([tmp_script_path], stdout=fnull, stderr=fnull)\n os.remove(tmp_script_path)\n os.remove(backup_dump_file_path)\n\n # Implement category-based class LMs and test them against morpheme-based ones.\n # Build multiple Bf morphological parsers and test them out, find the best one, write a paper on it!", "def load_yaafedata(params, \n n_learn_frames=2000,\n use_custom_stft=False):\n\n audio_file_path = getoptions(params, 'location', '/sons/voxforge/data/Learn/')\n # if no number specified, use n_learn_frames\n n_frames = getoptions(params, 'n_frames', n_learn_frames)\n sr = getoptions(params, 'sr', 16000)\n sigma_noise = getoptions(params, 'sigma', 0.0)\n random_seed = getoptions(params, 'shuffle', 1001)\n features = getoptions(params, 'features', [])\n wintime = getoptions(params, 'wintime', 0.032)\n steptime = getoptions(params, 'steptime', 0.008)\n startpoint = getoptions(params, 'startpoint', 0)\n forbid_list = getoptions(params, 'forbidden_names', [])\n mfnpf = getoptions(params, 'frame_num_per_file', 3000)\n# wintime = float(win_size)/float(sr)\n# steptime = float(step_size)/float(sr)\n \n win_size = int(wintime*sr)\n step_size = int(steptime*sr)\n# print wintime, steptime, win_size, step_size\n # apply sub_routine to all the files until a condition is met\n n_frames_reached = 0\n\n all_file_paths = get_filepaths(audio_file_path,\n random_seed,\n forbid_list = forbid_list)\n file_index = 0\n\n specseq = []\n featseq = []\n dataseq = []\n n_files_used = 0\n\n while (n_frames_reached < n_frames):\n file_index = file_index + 1\n filepath = all_file_paths[file_index]\n n_files_used = n_files_used + 1\n\n [loc_magSTFT, loc_Feats, locDatas] = load_data_one_audio_file(\n filepath, sr,\n wintime=wintime,\n steptime=steptime,\n max_frame_num_per_file=mfnpf,\n sigma_noise=sigma_noise,\n startpoint = startpoint,\n features=features)\n# if get_data:\n# [loc_magSTFT, loc_Feats, locDatas] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# Data = [Data , locDatas'];\n# else\n# [loc_magSTFT, loc_Feats, ~] = load_data_one_file_melspec(filepath, sr, sigma_noise, params);\n# end\n if not use_custom_stft:\n specseq.append(loc_magSTFT)\n else:\n specseq.append(np.abs(get_stft(locDatas,\n wsize=win_size,\n tstep=step_size,\n sigma = sigma_noise)).T)\n# print wintime, steptime, win_size, step_size\n# print loc_magSTFT.shape\n# print specseq[-1].shape\n# print locDatas.shape\n featseq.append(loc_Feats)\n dataseq.append(locDatas)\n \n n_frames_reached += min(loc_magSTFT.shape[0], loc_Feats.shape[0])\n print n_frames_reached\n \n Spectrums = np.vstack(specseq)\n Features = np.vstack(featseq)\n Data = np.hstack(dataseq)\n\n n_frames_reached = min(n_frames_reached, n_frames)\n Spectrums = Spectrums[0:n_frames_reached,:]\n Features = Features[0:n_frames_reached,:]\n used_files = all_file_paths[0:n_files_used]\n\n return Features, Spectrums, n_frames_reached, Data, used_files", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]", "def load_data(dataset_path: str):\n data = arff.loadarff(dataset_path)\n data_frame = pd.DataFrame(data[0])\n return data_frame", "def _load20news_miao():\n DIR = os.path.dirname(os.path.realpath(__file__)).split('vae_sparse')[0]+'vae_sparse/optvaedatasets'\n DIR += '/20news_miao'\n h5file = DIR+'/miao.h5'\n if not os.path.exists(h5file):\n flen = len(open(DIR+'/vocab').readlines())\n print 'DIM: ',flen\n np.random.seed(1)\n TRAIN_VALID_MAT = readSparseFile(DIR+'/train.feat', flen, zeroIndexed=False)\n idx = np.random.permutation(TRAIN_VALID_MAT.shape[0])\n VALIDMAT = TRAIN_VALID_MAT[idx[:500]]\n TRAINMAT = TRAIN_VALID_MAT[idx[500:]]\n TESTMAT = readSparseFile(DIR+'/test.feat', flen, zeroIndexed=False) \n saveSparseHDF5(TRAINMAT,'train', h5file)\n saveSparseHDF5(VALIDMAT,'valid', h5file)\n saveSparseHDF5(TESTMAT, 'test' , h5file)\n dset = {}\n dset['vocabulary']= [k.strip().split(' ')[0] for k in open(DIR+'/vocab').readlines()]\n dset['train'] = loadSparseHDF5('train',h5file)\n dset['valid'] = loadSparseHDF5('valid',h5file)\n dset['test'] = loadSparseHDF5('test',h5file)\n dset['dim_observations'] = dset['train'].shape[1]\n dset['data_type'] = 'bow'\n return dset", "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def test_load_dataset():\n\n # Given\n dataset_file_name = core.config.app_config.TESTING_DATA_FILE\n\n # When\n subject = utils.load_dataset(filename=dataset_file_name)\n\n # Then\n assert isinstance(subject, pd.DataFrame)\n assert subject.shape == (5940, 41)", "def load(as_pandas=None):\n return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)", "def load(as_pandas=None):\n return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)" ]
[ "0.629331", "0.62574154", "0.61739457", "0.61300355", "0.6085598", "0.6068422", "0.6059043", "0.599034", "0.5854947", "0.58494705", "0.58464795", "0.5826532", "0.58246195", "0.58217824", "0.5821119", "0.57790554", "0.5750343", "0.5750343", "0.5742792", "0.57419103", "0.5741909", "0.57248306", "0.57135147", "0.5704186", "0.56925964", "0.567108", "0.5655235", "0.56461793", "0.5633125", "0.5633125" ]
0.76649237
0
We use the fmi standard to extract the correct set of config_params, inputs, outputs We look into the "causality" attribute for each variable in model description
def _extract_sim_config_from_fmi_std(self): print("\n---- Looking to see if FMU model description contains required 'causality' type definitions ----") sim_config_params = [] sim_inputs = [] sim_outputs = [] sim_other_vars = [] for variable in self.model_description.modelVariables: # extract causality and append valu causality = variable.causality if causality == "parameter": sim_config_params.append(variable.name) elif causality == "input": sim_inputs.append(variable.name) elif causality == "output": sim_outputs.append(variable.name) else: sim_other_vars.append(variable.name) # Validate values extracted if len(sim_inputs) == 0: print("\n[FMU Validator] Sim FMU description file has no sim-input states, and thus cannot be used.") elif len(sim_outputs) == 0: print("\n[FMU Validator] Sim FMU description file has no sim-output states, and thus cannot be used.") else: # Store data extracted as attributes self.sim_config_params = sim_config_params self.sim_inputs = sim_inputs self.sim_outputs = sim_outputs self.sim_other_vars = sim_other_vars return True # Dump auxiliary YMAL file for user to review/edit self._dump_config_to_yaml_file(sim_config_params, sim_inputs, sim_outputs, sim_other_vars, is_aux_yaml = True) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def setup(self):\n declared = []\n spec = get_specs_for_module(self.module_name)\n\n # Inputs\n for entry in spec.cpacs_inout.inputs:\n if entry.var_name in declared:\n log.info(\"Already declared\")\n elif entry.var_name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[entry.var_name]\n if entry.var_name in Rt.optim_var_dict:\n self.add_input(entry.var_name, val=var[1][0])\n declared.append(entry.var_name)\n\n if declared == []:\n self.add_input(self.module_name + \"_in\")\n declared = []\n\n for entry in spec.cpacs_inout.outputs:\n # Replace special characters from the name of the entry and checks for accronyms\n entry.var_name = change_var_name(entry.var_name)\n\n if entry.var_name in declared:\n log.info(\"Already declared\")\n elif entry.var_name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[entry.var_name]\n self.add_output(entry.var_name, val=var[1][0])\n declared.append(entry.var_name)\n elif (\n \"aeromap\" in entry.var_name and self.module_name == Rt.last_am_module\n ): # == 'PyTornado': #not skf^is_skf:\n # Condition to avoid any conflict with skinfriction\n for name in PARAMS:\n if name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[name]\n self.add_input(name, val=var[1][0])\n declared.append(entry.var_name)\n for name in COEFS:\n if name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[name]\n if is_digit(var[1][0]):\n self.add_output(name, val=var[1][0])\n else:\n self.add_output(name)\n declared.append(entry.var_name)\n\n if declared == []:\n self.add_output(self.module_name + \"_out\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def get_input(self):\n system, configuration = self.get_system_configuration(None)\n references = self.parent.references\n\n P = self.parameters.current_values_to_dict(\n context=seamm.flowchart_variables._data\n )\n\n # The model chemistry, for labeling properties.\n self.model = P[\"hamiltonian\"]\n\n # Have to fix formatting for printing...\n PP = dict(P)\n for key in PP:\n if isinstance(PP[key], units_class):\n PP[key] = \"{:~P}\".format(PP[key])\n\n # Save the description for later printing\n self.description = []\n self.description.append(__(self.description_text(PP), **PP, indent=self.indent))\n\n # Start gathering the keywords\n keywords = copy.deepcopy(P[\"extra keywords\"])\n keywords.append(\"1SCF\")\n keywords.append(P[\"hamiltonian\"])\n\n if P[\"hamiltonian\"] == \"AM1\":\n elements = configuration.atoms.symbols\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1985c\"],\n alias=\"Dewar_1985c\",\n module=\"mopac_step\",\n level=1,\n note=\"Main reference for AM1 + C, H, N, O.\",\n )\n for element in (\"F\", \"Cl\", \"Br\", \"I\"):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1988\"],\n alias=\"Dewar_1988\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for F, Cl, Br, I.\",\n )\n break\n if \"Al\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1990\"],\n alias=\"Dewar_1990\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Al.\",\n )\n if \"Si\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1987b\"],\n alias=\"Dewar_1987b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Si.\",\n )\n if \"P\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1989\"],\n alias=\"Dewar_1989\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for P.\",\n )\n if \"S\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1990b\"],\n alias=\"Dewar_1990b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for S.\",\n )\n if \"Zn\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1988b\"],\n alias=\"Dewar_1988b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Zn.\",\n )\n if \"Ge\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1989b\"],\n alias=\"Dewar_1989b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Ge.\",\n )\n if \"Mo\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Voityuk_2000\"],\n alias=\"Voityuk_2000\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Mo.\",\n )\n if \"Hg\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1989c\"],\n alias=\"Dewar_1989c\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Hg.\",\n )\n for element in (\n \"Li\",\n \"Be\",\n \"Na\",\n \"Mg\",\n \"K\",\n \"Ca\",\n \"Ga\",\n \"As\",\n \"Se\",\n \"Rb\",\n \"Sr\",\n \"In\",\n \"Sn\",\n \"Sb\",\n \"Te\",\n \"Cs\",\n \"Ba\",\n \"Pb\",\n \"Bi\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2004\"],\n alias=\"Stewart_2004\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameterization for main-group elements.\",\n )\n break\n elif P[\"hamiltonian\"] == \"MNDO\" or P[\"hamiltonian\"] == \"MNDOD\":\n elements = configuration.atoms.symbols\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1977\"],\n alias=\"Dewar_1977\",\n module=\"mopac_step\",\n level=1,\n note=\"Main reference for MNDO + C, H, N, O.\",\n )\n if \"Be\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1978\"],\n alias=\"Dewar_1978\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Be.\",\n )\n if \"B\" in elements or \"Al\" in elements:\n if \"B\" in elements or P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Davis_1981\"],\n alias=\"Davis_1981\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for B and Al.\",\n )\n if \"F\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1978b\"],\n alias=\"Dewar_1978b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for F.\",\n )\n if \"Si\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1986\"],\n alias=\"Dewar_1986\",\n module=\"mopac_step\",\n level=1,\n note=\"Revised MNDO parameters for Si.\",\n )\n if \"P\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1978b\"],\n alias=\"Dewar_1978b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for P.\",\n )\n if \"S\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1986b\"],\n alias=\"Dewar_1986b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for S.\",\n )\n if \"Cl\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1983\"],\n alias=\"Dewar_1983\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Cl.\",\n )\n if \"Zn\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1986c\"],\n alias=\"Dewar_1986c\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Zn.\",\n )\n if \"Ge\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1987\"],\n alias=\"Dewar_1987\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Ge.\",\n )\n if \"Br\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1983b\"],\n alias=\"Dewar_1983b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Br.\",\n )\n if \"Sn\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1984\"],\n alias=\"Dewar_1984\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Sn.\",\n )\n if \"I\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1984b\"],\n alias=\"Dewar_1984b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for I.\",\n )\n if \"Hg\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1985\"],\n alias=\"Dewar_1985\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Hg.\",\n )\n if \"Pb\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1985b\"],\n alias=\"Dewar_1985b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Pb.\",\n )\n for element in (\n \"Na\",\n \"Mg\",\n \"K\",\n \"Ca\",\n \"Ga\",\n \"As\",\n \"Se\",\n \"Rb\",\n \"Sr\",\n \"In\",\n \"Sb\",\n \"Te\",\n \"Cs\",\n \"Ba\",\n \"Tl\",\n \"Bi\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2004\"],\n alias=\"Stewart_2004\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameterization for main-group elements.\",\n )\n break\n if P[\"hamiltonian\"] == \"MNDOD\":\n for element in (\n \"Al\",\n \"Si\",\n \"P\",\n \"S\",\n \"Cl\",\n \"Br\",\n \"I\",\n \"Zn\",\n \"Cd\",\n \"Hg\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Thiel_1992\"],\n alias=\"Thiel_1992\",\n module=\"mopac_step\",\n level=1,\n note=(\"MNDO-D formalism for d-orbitals.\"),\n )\n references.cite(\n raw=self.parent._bibliography[\"Thiel_1996\"],\n alias=\"Thiel_1996\",\n module=\"mopac_step\",\n level=1,\n note=(\n \"MNDO-D, parameters for Al, Si, P, S, Cl, Br, \"\n \"I, Zn, Cd, and Hg.\"\n ),\n )\n break\n elif P[\"hamiltonian\"] == \"PM3\":\n elements = configuration.atoms.symbols\n references.cite(\n raw=self.parent._bibliography[\"Stewart_1989\"],\n alias=\"Stewart_1989\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n for element in (\n \"Be\",\n \"Mg\",\n \"Zn\",\n \"Ga\",\n \"Ge\",\n \"As\",\n \"Se\",\n \"Cd\",\n \"In\",\n \"Sn\",\n \"Sb\",\n \"Te\",\n \"Hg\",\n \"Tl\",\n \"Pb\",\n \"Bi\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_1991\"],\n alias=\"Stewart_1991\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n break\n if \"Li\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Anders_1993\"],\n alias=\"Anders_1993\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n for element in (\"B\", \"Na\", \"K\", \"Ca\", \"Rb\", \"Sr\", \"Cs\", \"Ba\"):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2004\"],\n alias=\"Stewart_2004\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n break\n elif \"PM6\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2007\"],\n alias=\"Stewart_2007\",\n module=\"mopac_step\",\n level=1,\n note=\"The PM6 parameterization in MOPAC.\",\n )\n if P[\"hamiltonian\"] == \"PM6-D3\":\n references.cite(\n raw=self.parent._bibliography[\"Grimme_2010\"],\n alias=\"Grimme_2010\",\n module=\"mopac_step\",\n level=1,\n note=\"Dispersion correction by Grimme, et al.\",\n )\n if P[\"hamiltonian\"] == \"PM6-DH+\":\n references.cite(\n raw=self.parent._bibliography[\"Korth_2010\"],\n alias=\"Korth_2010\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding correction by Korth.\",\n )\n if \"PM6-DH2\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Korth_2009\"],\n alias=\"Korth_2009\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding and dispersion correction.\",\n )\n references.cite(\n raw=self.parent._bibliography[\"Rezac_2009\"],\n alias=\"Rezac_2009\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding and dispersion correction.\",\n )\n if P[\"hamiltonian\"] == \"PM6-DH2x\":\n references.cite(\n raw=self.parent._bibliography[\"Rezac_2011\"],\n alias=\"Rezac_2011\",\n module=\"mopac_step\",\n level=1,\n note=\"Halogen-bonding correction.\",\n )\n if \"PM6-D3H4\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Rezac_2011\"],\n alias=\"Rezac_2011\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding and dispersion correction.\",\n )\n references.cite(\n raw=self.parent._bibliography[\"Vorlova_2015\"],\n alias=\"Vorlova_2015\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-hydrogen repulsion correction.\",\n )\n if P[\"hamiltonian\"] == \"PM6-D3H4x\":\n references.cite(\n raw=self.parent._bibliography[\"Brahmkshatriya_2013\"],\n alias=\"Brahmkshatriya_2013\",\n module=\"mopac_step\",\n level=1,\n note=\"Halogen-oxygen and halogen-nitrogen correction.\",\n )\n elif \"PM7\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2012\"],\n alias=\"Stewart_2012\",\n module=\"mopac_step\",\n level=1,\n note=\"The PM7 parameterization in MOPAC.\",\n )\n elif P[\"hamiltonian\"] == \"RM1\":\n references.cite(\n raw=self.parent._bibliography[\"Rocha_2006\"],\n alias=\"Rocha_2006\",\n module=\"mopac_step\",\n level=1,\n note=\"RM1 parameterization.\",\n )\n\n # which structure? may need to set default first...\n if P[\"structure\"] == \"default\":\n if self._id[-1] == \"1\":\n structure = \"initial\"\n else:\n structure = \"current\"\n elif self._id[-1] == \"1\":\n structure = \"initial\"\n elif P[\"structure\"] == \"current\":\n structure = \"current\"\n\n if structure == \"current\":\n keywords.append(\"OLDGEO\")\n\n if P[\"convergence\"] == \"normal\":\n pass\n elif P[\"convergence\"] == \"precise\":\n keywords.append(\"PRECISE\")\n elif P[\"convergence\"] == \"relative\":\n keywords.append(\"RELSCF=\" + P[\"relative\"])\n elif P[\"convergence\"] == \"absolute\":\n keywords.append(\"SCFSCRT=\" + P[\"absolute\"])\n else:\n raise RuntimeError(\n \"Don't recognize convergence '{}'\".format(P[\"convergence\"])\n )\n\n if P[\"uhf\"]:\n keywords.append(\"UHF\")\n\n if P[\"MOZYME\"] == \"always\":\n keywords.append(\"MOZYME\")\n elif (\n P[\"MOZYME\"] == \"for larger systems\"\n and configuration.n_atoms >= P[\"nMOZYME\"]\n ):\n keywords.append(\"MOZYME\")\n\n if P[\"COSMO\"]:\n keywords.append(f\"EPS={P['eps']}\")\n rsolve = P[\"rsolve\"].to(\"Å\").magnitude\n keywords.append(f\"RSOLVE={rsolve}\")\n keywords.append(f\"NSPA={P['nspa']}\")\n keywords.append(f\"DISEX={P['disex']}\")\n\n if P[\"calculate gradients\"]:\n keywords.append(\"GRADIENTS\")\n\n if \"yes\" in P[\"bond orders\"]:\n keywords.append(\"BONDS\")\n\n # Add any extra keywords so that they appear at the end\n metadata = self.metadata[\"keywords\"]\n for keyword in P[\"extra keywords\"]:\n if \"=\" in keyword:\n keyword, value = keyword.split(\"=\")\n if keyword not in metadata or \"format\" not in metadata[keyword]:\n keywords.append(keyword + \"=\" + value)\n else:\n keywords.append(metadata[keyword][\"format\"].format(keyword, value))\n\n result = []\n result.append([[*keywords], None, None])\n\n # Handle MOZYME follow-up calculations\n if \"MOZYME\" in keywords:\n follow_up = P[\"MOZYME follow-up\"]\n if \"exact\" in follow_up:\n keywords.remove(\"MOZYME\")\n if \"1SCF\" not in keywords:\n keywords.append(\"1SCF\")\n keywords.append(\"OLDGEO\")\n result.append([[*keywords], None, \"MOZYME follow-up using MOPAC\"])\n elif \"new\" in follow_up:\n if \"1SCF\" not in keywords:\n keywords.append(\"1SCF\")\n keywords.append(\"OLDGEO\")\n result.append([[*keywords], None, \"MOZYME follow-up, reinitializing\"])\n elif follow_up == \"none\":\n pass\n else:\n logger.error(f\"Don't recognize the MOZYME follow-up: '{follow_up}'\")\n\n return result", "def __init__(self, dataset):\n\n self.input_names = ['CORCON_nv_lwc_vcol',\n 'CORCON_nv_lwc_icol',\n 'CORCON_nv_lwc_vref',\n 'CORCON_nv_lwc_iref',\n 'CORCON_nv_twc_vcol',\n 'CORCON_nv_twc_icol',\n 'CORCON_nv_twc_vref',\n 'CORCON_nv_twc_iref',\n 'TAS_RVSM',\n 'IAS_RVSM',\n 'PS_RVSM',\n 'WOW_IND',\n 'CLWCIREF', 'CLWCVREF', 'CLWCICOL', 'CLWCVCOL',\n 'CTWCIREF', 'CTWCVREF', 'CTWCICOL', 'CTWCVCOL',\n 'CALNVTWC',\n 'CALNVLWC1',\n 'CALNVLWC2',\n 'CALNVL']\n\n self.outputs = [parameter('NV_TWC_U',\n units='gram m-3',\n frequency=64,\n long_name='Uncorrected total condensed water content from the Nevzorov probe'),\n parameter('NV_LWC1_U',\n units='gram m-3',\n frequency=64,\n long_name='Uncorrected liquid water content from the Nevzorov probe (1st collector)',\n standard_name='mass_concentration_of_liquid_water_in_air'),\n parameter('NV_LWC2_U',\n units='gram m-3',\n frequency=64,\n long_name='Uncorrected liquid water content from the Nevzorov probe (2nd collector)',\n standard_name='mass_concentration_of_liquid_water_in_air'),\n parameter('NV_TWC_C',\n units='gram m-3',\n frequency=64,\n long_name='Corrected total condensed water content from the Nevzorov probe'),\n parameter('NV_LWC1_C',\n units='gram m-3',\n frequency=64,\n long_name='Corrected liquid water content from the Nevzorov probe (1st collector)',\n standard_name='mass_concentration_of_liquid_water_in_air'),\n parameter('NV_LWC2_C',\n units='gram m-3',\n frequency=64,\n long_name='Corrected liquid water content from the Nevzorov probe (2nd collector)',\n standard_name='mass_concentration_of_liquid_water_in_air'),\n parameter('NV_TWC_P',\n units='W',\n frequency=64,\n long_name='TWC collector power'),\n parameter('NV_LWC1_P',\n units='W',\n frequency=64,\n long_name='LWC1 collector power'),\n parameter('NV_LWC2_P',\n units='W',\n frequency=64,\n long_name='LWC2 collector power'),\n parameter('NV_REF_P',\n units='W',\n frequency=64,\n long_name='Reference power')]\n\n self.version = 1.00\n cal_base.__init__(self, dataset)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['C', 'dual', 'penalty', 'l1_ratio', 'tol', 'fit_intercept',\n 'solver','intercept_scaling', 'max_iter', 'multi_class',\n 'class_weight', 'random_state'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def config1() :\n data_name = \"titanic\" ### in data/input/\n model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py\n n_sample = 1000\n\n def post_process_fun(y): ### After prediction is done\n return int(y)\n\n def pre_process_fun(y): ### Before the prediction is done\n return int(y)\n\n\n model_dict = {'model_pars': {\n ### LightGBM API model #######################################\n 'model_class': model_class\n ,'model_pars' : {\n 'total_time_limit' : 20,\n 'algorithms' : 'auto',\n 'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',\n 'eval_metric' : 'auto'\n\n # mode='Explain',\n # ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,\n # stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',\n # golden_features='auto', features_selection='auto', start_random_models='auto',\n # hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)\n }\n\n , 'post_process_fun' : post_process_fun ### After prediction ##########################################\n , 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################\n\n\n ### Pipeline for data processing ##############################\n 'pipe_list': [\n #### coly target prorcessing\n {'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },\n\n\n {'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },\n {'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },\n\n #### catcol INTO integer, colcat into OneHot\n {'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },\n # {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },\n\n\n ### Cross_feat = feat1 X feat2\n # {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},\n\n\n #### Example of Custom processor\n #{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' }, \n\n\n ],\n }\n },\n\n 'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']\n\n ,'mlflow_pars' : None # {} ### Not empty --> use mlflow\n },\n\n 'data_pars': { 'n_sample' : n_sample,\n\n 'download_pars' : None,\n\n\n 'cols_input_type' : cols_input_type_1,\n ### family of columns for MODEL #########################################################\n # \"colnum\", \"colnum_bin\", \"colnum_onehot\", \"colnum_binmap\", #### Colnum columns\n # \"colcat\", \"colcat_bin\", \"colcat_onehot\", \"colcat_bin_map\", #### colcat columns\n # 'colcross_single_onehot_select', \"colcross_pair_onehot\", 'colcross_pair', #### colcross columns 'coldate', 'coltext',\n 'cols_model_group': [ 'colnum_bin',\n 'colcat_bin',\n # 'coltext',\n # 'coldate',\n #'colcross_pair',\n \n ### example of custom\n # 'col_myfun'\n ]\n\n ### Filter data rows ##################################################################\n ,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }\n\n }\n }\n\n ##### Filling Global parameters ############################################################\n model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )\n return model_dict", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')", "def __init__(\n self,\n model_filepath: str,\n user_validation: bool = True,\n ):\n\n # ensure model filepath is balid, and save as att if it is\n assert model_filepath.endswith(\".fmu\"), \"Provided filepath is not an FMU file: '{}'\".format(model_filepath)\n self.model_filepath = model_filepath\n # config file with config_params, inputs, outputs\n self.sim_config_filepath = SIM_CONFIG_NAME_f(self.model_filepath)\n\n # read the model description\n self.model_description = read_model_description(model_filepath)\n error_log = \"Provided model ({}) doesn't have modelVariables in XLS description file\".format(model_filepath)\n assert len(self.model_description.modelVariables) > 0, error_log\n\n # correct non-alphanumeric tags.\n # note, it doesn't suppose any problem, since interaction with sim uses indices, not names.\n self._clean_non_alphanumeric_chars()\n\n\n # collect the value references (indices)\n # collect the value types (Real, Integer or Enumeration)\n # collect the variables to be initialized and the value to do so at\n self.vars_to_idx = {}\n self.vars_to_type_f = {}\n self.vars_to_ini_vals = {}\n for variable in self.model_description.modelVariables:\n # extract key attributes per variable\n var_idx = variable.valueReference #, variable.causality\n var_name = variable.name\n var_type = variable.type\n var_start = variable.start\n \n # collect type reference\n if var_type == \"Real\":\n self.vars_to_type_f[var_name] = float\n elif var_type == \"Integer\":\n self.vars_to_type_f[var_name] = int\n else:\n # [TODO] Integrate variables of type \"Enumeration\". How do we cast? Define a function for \"self.vars_to_type_f\".\n # [TODO] Integrate variables of type string (need to find correct var_type tag first).\n # [TODO] Integrate variables of type boolean (need to find correct var_type tag first).\n print(f\"Variable '{var_name}' will be skipped. FMU connector cannot currently handle vars of type '{var_type}'.\")\n continue\n \n # collect the value references (indices)\n self.vars_to_idx[var_name] = var_idx\n\n # collect the variables to be initialized and the value to do so at\n if var_start is not None:\n # cast variable prior to storing\n self.vars_to_ini_vals[var_name] = self.vars_to_type_f[var_name](var_start)\n \n\n # initialize sim config\n self.is_model_config_valid = False # Currently unused, since error is raised if model invalid\n self.sim_config_params = []\n self.sim_inputs = []\n self.sim_outputs = []\n self.sim_other_vars = []\n\n # ---------------------------------------------------------------------\n # YAML CONFIG --> check for existing config using SIM_CONFIG_NAME_f --> e.g: \"{model_name}_conf.yaml\"\n valid_config = self._validate_sim_config()\n \n # exit if model is valid, unless validation has been activated\n if valid_config:\n\n # print model config for user reference: config_params, inputs, outputs\n print(self._get_sim_config_str())\n\n if user_validation:\n # prompt user to manually validate model if selected\n validation_asserted = input(\"Is this configuration correct (y|n)? \")\n\n if validation_asserted == \"y\":\n self.is_model_config_valid = True\n return\n \n # reset config if invalid\n self.sim_config_params = []\n self.sim_inputs = []\n self.sim_outputs = []\n self.sim_other_vars = []\n \n else:\n # when no validation is selected, we assume the sim config is valid\n self.is_model_config_valid = True\n return\n \n # ---------------------------------------------------------------------\n # FMI CONFIG --> if model is invalid we look for attributes within the .fmi model definition\n valid_config = self._extract_sim_config_from_fmi_std()\n\n if valid_config:\n\n # print model config for user reference: config_params, inputs, outputs\n print(self._get_sim_config_str())\n \n if user_validation:\n # prompt user to manually validate model if selected\n validation_asserted = input(\"Is this configuration correct (y|n)? \")\n\n if validation_asserted == \"y\":\n self.is_model_config_valid = True\n # dump YMAL file to reuse next time the model is loaded\n self._dump_config_to_yaml_file()\n return\n \n else:\n # when no validation is selected, we assume the sim config is valid\n self.is_model_config_valid = True\n # dump YMAL file to reuse next time the model is loaded\n self._dump_config_to_yaml_file()\n return\n \n # Dump auxiliary YAML config file if user doesn't assert the provided set\n # of config_params/inputs/outputs\n self._dump_config_to_yaml_file(is_aux_yaml = True)\n \n # If neither YAML nor FMI model is sufficient raise error\n error_log = \"MODEL DOES NOT HAVE THE CORRECT CONFIG DEFINED NEITHER ON YAML CONFIG FILE \"\n error_log += \"NOR FMI MODEL DESCRIPTION. A YAML FILE HAS BEEN CREATED FOR YOU TO MODIFY. \"\n error_log += \"THE SIM HAS BEEN FORCED TO EXIT, BUT FEEL FREE TO RERUN ONCE SET-UP IS COMPLETED.\"\n raise Exception(error_log)", "def buildVariables(self, model):\n\n \"\"\"\n #Inputs\n \"\"\"\n\n \"\"\"\n #Outputs\n \"\"\"\n #-------- Register Settings Used During Calibration --------\n #auxNDiv (to be put into synth.auxfreq.mmddenom during ir cal only)\n self._addModelVariable(model, 'ircal_auxndiv', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #auxLoDiv (to be put into synth.divctrl.auxlodivfreqctrl during ir cal only)\n self._addModelVariable(model, 'ircal_auxlodiv', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #rampVal (to be put into modem.rampctrl.rampval during ir cal only)\n self._addModelVariable(model, 'ircal_rampval', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #rxAmp_PLL (to be put into rac.auxctrl.rxamp during PLL loopback, ir cal only)\n self._addModelVariable(model, 'ircal_rxamppll', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #rxAmp_PA (to be put into rac.auxctrl.rxamp during PA loopback, ir cal only)\n self._addModelVariable(model, 'ircal_rxamppa', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n \n #-------- Decide Between Calibration Procedures --------\n #diConfigIsValid (true = DI value / PTE value is an option)\n self._addModelVariable(model, 'ircal_manufconfigvalid', bool, ModelVariableFormat.ASCII, 'True = the manufacturing calibration value is saved on the chip')\n #pllLoopbackConfigIsValid (true = PLL loopback is an option)\n self._addModelVariable(model, 'ircal_pllconfigvalid', bool, ModelVariableFormat.ASCII, 'True = PLL loopback is permitted to generate a calibration value')\n #paLoopbackConfigIsValid (true = PA loopback is an option)\n self._addModelVariable(model, 'ircal_paconfigvalid', bool, ModelVariableFormat.ASCII, 'True = PA loopback is permitted to generate a calibration value')\n #recommendedConfig (DI/PTE vs PLL loopback vs PA loopback)\n var = self._addModelVariable(model, 'ircal_bestconfig', Enum, ModelVariableFormat.DECIMAL, 'Specify the best calibration method for this radio configuration.')\n member_data = [\n ['MANUFACTURING', 1, 'Use the calibration value saved during manufacturing, if applicable.'],\n ['PLL', 2, 'Put the part into a PLL loopback to generate a calibration value.'],\n ['PA', 3, 'Put the part into a PA loopback to generate a calibration value.'],\n ['UNSUPPORTED', 4, 'Image rejection calibration not supported.'],\n ]\n var.var_enum = CreateModelVariableEnum(\n 'configType',\n 'Specify how image rejection calibration is to run.',\n member_data)\n\n #-------- Decide Between Software/Hardware RSSI Averaging --------\n self._addModelVariable(model, 'ircal_useswrssiaveraging', bool, ModelVariableFormat.ASCII, 'True = use software RSSI averaging; False = use hardware RSSI averaging')\n self._addModelVariable(model, 'ircal_numrssitoavg', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values (2^value) to average in software. If value = 3, 8 values will be averaged.')\n self._addModelVariable(model, 'ircal_throwawaybeforerssi', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values to discard before starting to average RSSI values.')\n self._addModelVariable(model, 'ircal_delayusbeforerssi', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between applying a calibration value and then reading RSSI values.')\n self._addModelVariable(model, 'ircal_delayusbetweenswrssi', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between gathering RSSI values. Software RSSI averaging mode only.')\n\n #------ Determine number of raw RSSI values averaged by hardware ------\n #agcRssiPeriod\n self._addModelVariable(model, 'ircal_agcrssiperiod', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of raw RSSI values averaged by hardware.')\n\n #------ Registers specific to Jumbo (and new Dumbo) support ------\n self._addModelVariable(model, 'ircal_useswrssiaveraging2', bool, ModelVariableFormat.ASCII, 'True = use software RSSI averaging; False = use hardware RSSI averaging; Jumbo support')\n self._addModelVariable(model, 'ircal_numrssitoavg2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values (2^value) to average in software. If value = 3, 8 values will be averaged. Jumbo support')\n self._addModelVariable(model, 'ircal_throwawaybeforerssi2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values to discard before starting to average RSSI values. Jumbo support')\n self._addModelVariable(model, 'ircal_delayusbeforerssi2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between applying a calibration value and then reading RSSI values. Jumbo support')\n self._addModelVariable(model, 'ircal_delayusbetweenswrssi2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between gathering RSSI values. Software RSSI averaging mode only. Jumbo support')\n\n #\n # Bools not allowed as advanced inputs due to GUI constraint. Using enum instead\n var = self._addModelVariable(model, 'ircal_rxtx_path_common', Enum, ModelVariableFormat.DECIMAL, 'RX and TX are on a common/shared circuit, or split. Refer to document AN971.')\n member_data = [\n ['SHARED_RX_TX_PATH' , 0, 'RX and TX circuit paths are common/shared/connected'],\n ['SPLIT_RX_TX_PATH', 1, 'RX and TX circuit paths are separated/not connected'],\n ]\n var.var_enum = CreateModelVariableEnum(\n 'IRCalRXTXPathCommonEnum',\n 'RX and TX are on a common/shared circuit, or split. Refer to document AN971.',\n member_data)\n\n self._addModelVariable(model, 'ircal_power_level', int, ModelVariableFormat.DECIMAL, units='codes', desc='Specify IR cal power level (amplitude) instead of auto (0). Refer to document AN971.')", "def __init__(self, encut, ldaul, Uparam, Jparam, name=\"DFTU_settings\"):\n\n dftu_settings = {\"LDAU\": \".TRUE.\" , \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LADAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def input_config():\n run_dir = 'runs/ODEMnistClassification/8'\n epoch = 'latest'\n device = 'cpu'\n min_end_time = 10\n max_end_time = 100\n tol = 1e-3", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['nu','C', 'kernel', 'degree', 'gamma', 'coef0',\n 'tol', 'cache_size', 'shrinking', 'max_iter'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def test_config():\n import yaml\n import astropy.units as u\n from tqdm import tqdm\n # Need these for `eval` below\n from numpy import array\n\n # Same test suite as used in test_imsim above.\n # This time, we just use this for the det names.\n with open(DATA_DIR / \"wcs_466749.yaml\", 'r') as f:\n wcss = yaml.safe_load(f)\n\n cmds = {}\n with open(DATA_DIR / \"phosim_cat_466749.txt\", 'r') as f:\n for line in f:\n k, v = line.split()\n try:\n v = int(v)\n except ValueError:\n try:\n v = float(v)\n except ValueError:\n pass\n cmds[k] = v\n\n # Values below (and others) from phosim_cat_466749.txt\n rc = cmds['rightascension']\n dc = cmds['declination']\n boresight = galsim.CelestialCoord(\n rc*galsim.degrees,\n dc*galsim.degrees\n )\n obstime = Time(cmds['mjd'], format='mjd', scale='utc')\n obstime -= 15*u.s\n band = \"ugrizy\"[cmds['filter']]\n wavelength_dict = dict(\n u=365.49,\n g=480.03,\n r=622.20,\n i=754.06,\n z=868.21,\n y=991.66\n )\n wavelength = wavelength_dict[band]\n camera = imsim.get_camera()\n\n rotTelPos = cmds['rottelpos'] * galsim.degrees\n telescope = imsim.load_telescope(f\"LSST_{band}.yaml\", rotTelPos=rotTelPos)\n # Non-default values.\n temperature = 293.\n pressure = 69.0\n H2O_pressure = 2.0\n\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=pressure,\n H2O_pressure=H2O_pressure\n )\n\n config = {\n 'input': {\n 'telescope': {\n 'file_name':f\"LSST_{band}.yaml\",\n 'rotTelPos': rotTelPos\n }\n },\n 'image': {\n 'wcs': {\n 'type': 'Batoid',\n 'boresight': boresight,\n 'camera': 'LsstCam',\n 'obstime': obstime,\n 'wavelength': wavelength,\n 'temperature': temperature,\n 'pressure': pressure,\n 'H2O_pressure': H2O_pressure,\n 'order': 2,\n }\n }\n }\n\n rng = np.random.default_rng(1234)\n for k in tqdm(wcss.keys()):\n name = k[18:25].replace('-', '_')\n det = camera[name]\n\n wcs1 = factory.getWCS(det, order=2)\n config['image']['wcs']['det_name'] = name\n galsim.config.RemoveCurrent(config['image']['wcs'])\n galsim.config.ProcessInput(config)\n wcs2 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n\n # Test points\n xs = rng.uniform(0, 4000, 100)\n ys = rng.uniform(0, 4000, 100)\n ra1, dec1 = wcs1.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs2.xyToradec(xs, ys, units='radians')\n np.testing.assert_allclose(ra1, ra2)\n np.testing.assert_allclose(dec1, dec2)\n\n # Test == when identical\n galsim.config.RemoveCurrent(config['image']['wcs'])\n wcs3 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n assert wcs3 == wcs2\n\n # Test that pressure and temperature matter.\n config['image']['wcs']['temperature'] = 250\n galsim.config.RemoveCurrent(config['image']['wcs'])\n wcs4 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n assert wcs4 != wcs2\n\n config['image']['wcs']['temperature'] = temperature\n config['image']['wcs']['pressure'] = 55\n galsim.config.RemoveCurrent(config['image']['wcs'])\n wcs5 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n assert wcs5 != wcs2\n\n config['image']['wcs']['pressure'] = pressure\n config['image']['wcs']['H2O_pressure'] = 10\n galsim.config.RemoveCurrent(config['image']['wcs'])\n wcs6 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n assert wcs6 != wcs2\n\n # Test defaults\n del config['image']['wcs']['temperature']\n del config['image']['wcs']['pressure']\n del config['image']['wcs']['H2O_pressure']\n galsim.config.RemoveCurrent(config['image']['wcs'])\n config = galsim.config.CleanConfig(config)\n galsim.config.ProcessInput(config)\n wcs7 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n default_pressure = 101.325 * (1-2.25577e-5*2715)**5.25588\n wcs7a = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength, camera,\n temperature=280,\n pressure=default_pressure,\n H2O_pressure=1.0,\n ).getWCS(det, order=2)\n assert wcs7 == wcs7a\n\n # Default wavelength from bandpass\n del config['image']['wcs']['wavelength']\n config['bandpass'] = imsim.RubinBandpass('r')\n galsim.config.RemoveCurrent(config['image']['wcs'])\n config = galsim.config.CleanConfig(config)\n galsim.config.ProcessInput(config)\n wcs8 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n wcs8a = imsim.BatoidWCSFactory(\n boresight, obstime, telescope,\n wavelength=config['bandpass'].effective_wavelength,\n camera=camera,\n temperature=280,\n pressure=default_pressure,\n H2O_pressure=1.0,\n ).getWCS(det, order=2)\n assert wcs8 == wcs8a\n\n del config['bandpass']\n config['image']['bandpass'] = {\n 'type': 'RubinBandpass',\n 'band' : 'r',\n }\n galsim.config.RemoveCurrent(config['image']['wcs'])\n config = galsim.config.CleanConfig(config)\n galsim.config.ProcessInput(config)\n wcs8b = galsim.config.BuildWCS(config['image'], 'wcs', config)\n assert wcs8b == wcs8a\n\n # Obstime can be a string\n print('obstime = ',obstime.to_value('iso'), type(obstime.to_value('iso')))\n config['image']['wcs']['obstime'] = obstime.to_value('iso')\n # Doesn't quite roundtrip perfectly. But within a millisecond.\n obstime = Time(obstime.to_value('iso'), scale='tai')\n print('obstime => ',obstime)\n galsim.config.RemoveCurrent(config['image']['wcs'])\n config = galsim.config.CleanConfig(config)\n galsim.config.ProcessInput(config)\n wcs9 = galsim.config.BuildWCS(config['image'], 'wcs', config)\n wcs9a = imsim.BatoidWCSFactory(\n boresight, obstime, telescope,\n wavelength=config['bandpass'].effective_wavelength,\n camera=camera,\n temperature=280,\n pressure=default_pressure,\n H2O_pressure=1.0,\n ).getWCS(det, order=2)\n assert wcs9 == wcs9a", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def initializeParameters(self):\n \n # 0 - Output Workspace\n # 1 - Output Feature Name\n # 2 - X coordinate (longitude)\n # 3 - Y coordinate (latitude)\n # 4 - Z coordinate (elevation)\n # 5 - Observer offset (OFFSETA)\n # 6 - Terrain offset (OFFSETB)\n # 7 - Near distance (RADIUS1)\n # 8 - Far distance (RADIUS2\n # 9 - Left Azimuth (AZIMUTH1)\n # 10 - Right Azimuth (AZIMUTH2)\n # 11 - Top vertical angle (VERT1)\n # 12 - Bottom vertical angle (VERT2)\n # 13 - Spatial Reference\n # 14 - output point features\n \n \n #GETTING ERROR HERE, DOESN'T VALIDATE CORRECTLY, BUG?????\n #---------------------------\n #Testing get_ParameterInfo\n #---------------------------\n #initializeParameters Execution Error: Runtime error <type 'exceptions.AttributeError'>: 'NoneType' object has no attribute 'Type'\n #---------------------------\n #OK \n #---------------------------\n #\n #\n #self.params[0].Filter.Type = \"Workspace\"\n #self.params[0].Filter.List = [\"FileSystem\", \"LocalDatabase\", \"RemoteDatabase\"]\n #\n\n \n self.params[5].Value = 2.0 # OFFSETA\n self.params[6].Value = 0.0 # OFFSETB\n \n self.params[7].Value = 0.0 # RADIUS1\n self.params[8].Value = 1000.0 # RADIUS2\n \n self.params[9].Filter.Type = \"Range\" \n self.params[9].Filter.List = [0.0,360.0]\n self.params[9].Value = 0.0 # AZIMUTH1\n \n self.params[10].Filter.Type = \"Range\"\n self.params[10].Filter.List = [0.0,360.0]\n self.params[10].Value = 360.0 # AZIMUTH2\n \n self.params[11].Filter.Type = \"Range\"\n self.params[11].Filter.List = [-90.0,90.0]\n self.params[11].Value = 90.0 # VERT1\n \n self.params[12].Filter.Type = \"Range\"\n self.params[12].Filter.List = [-90.0,90.0]\n self.params[12].Value = -90.0 # VERT2\n\n # setting output parameter schema\n self.params[14].Schema.ExtentRule = \"Union\"\n self.params[14].Schema.FeatureTypeRule = \"AsSpecified\"\n self.params[14].Schema.FeatureType = \"Simple\"\n self.params[14].Schema.GeometryTypeRule = \"AsSpecified\"\n self.params[14].Schema.GeometryType = \"Point\"\n self.params[14].Schema.FieldsRule = \"All\" \n #spot_field = self.makeField(\"SPOT\", \"DOUBLE\", \"8\", \"4\", \"12\")\n offseta_field = self.makeField(\"OFFSETA\", \"DOUBLE\", \"8\", \"4\", \"12\")\n offsetb_field = self.makeField(\"OFFSETB\", \"DOUBLE\", \"8\", \"4\", \"12\")\n vert1_field = self.makeField(\"VERT1\", \"DOUBLE\", \"8\", \"4\", \"12\")\n vert2_field = self.makeField(\"VERT2\", \"DOUBLE\", \"8\", \"4\", \"12\")\n azimuth1_field = self.makeField(\"AZIMUTH1\", \"DOUBLE\", \"8\", \"4\", \"12\")\n azimuth2_field = self.makeField(\"AZIMUTH2\", \"DOUBLE\", \"8\", \"4\", \"12\")\n radius1_field = self.makeField(\"RADIUS1\", \"DOUBLE\", \"8\", \"4\", \"12\")\n radius2_field = self.makeField(\"RADIUS2\", \"DOUBLE\", \"8\", \"4\", \"12\")\n field_list = [offseta_field,offsetb_field,vert1_field,vert2_field,azimuth1_field,azimuth2_field,radius1_field,radius2_field]\n self.params[14].Schema.AdditionalFields = field_list\n\n return", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def config_params0(data,parameter):\n model = []\n #Range of value of p\n acf = sm.graphics.tsa.acf(data.diff().dropna())\n for i in range(len(acf)):\n acf[i] = abs(acf[i]*10)\n if (ceil(acf[i])) <= 2:\n p = range(ceil(acf[i])-1,ceil(acf[i])+2)\n break\n\n #range of value of q\n pacf = sm.graphics.tsa.pacf(data.diff().dropna())\n for i in range(len(pacf)):\n pacf[i] = abs(pacf[i]*10)\n if (ceil(pacf[i])) <= 2:\n q = range(ceil(pacf[i])-1,ceil(pacf[i])+2)\n break\n\n\t# define config lists\n p_params = p\n d_params = parameter['d']\n q_params = q\n m_params = parameter['m']\n #P_params = p\n #D_params = [0, 1]\n #Q_params = q\n \n pdq_m = list(itertools.product(p_params, d_params, q_params,m_params)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def split_inputs(self):\n\n lca = self.lca\n inputs = self.inputs\n\n inputs_dict = {} # Only store exchanges with uncertainty\n\n # Keep track of which tech_params and bio_params are already included to the analysis\n # Needed to avoid running sa indices computations twice for the same tech or bio params. \n # Initialize with parameterized exchanges\n if self.parameters != None and self.ParametersModel != None:\n indices_tech_all = self.parameters_dict['tech_params_where']\n indices_bio_all = self.parameters_dict['bio_params_where']\n else:\n indices_tech_all = np.array([], dtype=int)\n indices_bio_all = np.array([], dtype=int)\n\n for input_ in inputs:\n\n if input_ == 'biosphere':\n continue\n\n inputs_dict[input_] = {}\n\n indices_tech = np.array([], dtype=int)\n indices_bio = np.array([], dtype=int)\n\n if input_ == 'technosphere':\n indices_tech = np.where(lca.tech_params['uncertainty_type']!=0)[0]\n if 'biosphere' in inputs:\n indices_bio = np.where(lca.bio_params['uncertainty_type']!=0)[0]\n\n elif input_ == 'demand_exc':\n # Select all products that pertain to activities in the given demand vector\n for act_index in np.nonzero(lca.demand_array)[0]:\n mask_tech = np.all([lca.tech_params['uncertainty_type']!=0, lca.tech_params['col']==act_index], axis=0)\n indices_tech = np.concatenate([indices_tech, np.where(mask_tech)[0]])\n if 'biosphere' in inputs:\n mask_bio = np.all([lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==act_index], axis=0)\n indices_bio = np.concatenate([indices_bio, np.where(mask_bio)[0]])\n\n elif input_ in self.databases:\n # Select all products and flows that are linked to the given database\n # Indices corresponding to exchanges in the tech_params depending on the given database\n db_act_indices_tech = [val for key,val in lca.activity_dict.items() if key[0]==input_]\n if len(db_act_indices_tech) > 0:\n db_act_index_min_tech = db_act_indices_tech[0]\n db_act_index_max_tech = db_act_indices_tech[-1]\n mask = lambda i : np.all( [lca.tech_params['uncertainty_type']!=0, \n lca.tech_params['col']==i,\n lca.tech_params['amount']!=0], axis=0 )\n indices_tech = [ np.where( mask(i) ) [0] for i in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_tech = np.concatenate(indices_tech)\n\n # Indices corresponding to flows in the biosphere params depending on the given database\n if 'biosphere' in inputs:\n mask = lambda j : np.all( [lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==j], axis=0 )\n indices_bio = [ np.where(mask(j))[0] for j in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_bio = np.concatenate(indices_bio)\n\n indices_tech = np.sort(indices_tech)\n indices_bio = np.sort(indices_bio)\n\n # Do not add indices_tech that are already in the indices_tech_all\n indices_tech_same = np.intersect1d(indices_tech, indices_tech_all)\n pos_tech = np.array([ np.where(indices_tech==s)[0] for s in indices_tech_same ]).flatten()\n indices_tech = np.delete(indices_tech, pos_tech)\n np.append(indices_tech_all, indices_tech)\n\n # Do not add indices_bio that are already in the indices_bio_all\n indices_bio_same = np.intersect1d(indices_bio, indices_bio_all)\n pos_bio = np.array([ np.where(indices_bio==s)[0] for s in indices_bio_same ]).flatten()\n indices_bio = np.delete(indices_bio, pos_bio)\n np.append(indices_bio_all, indices_bio)\n \n inputs_dict[input_]['tech_params'] = lca.tech_params[indices_tech] #TODO maybe remove later, indices should be sufficient\n inputs_dict[input_]['tech_params_where'] = indices_tech\n inputs_dict[input_]['tech_n_params'] = len(indices_tech) #TODO remove later\n\n inputs_dict[input_]['bio_params'] = lca.bio_params[indices_bio] #TODO maybe remove later\n inputs_dict[input_]['bio_params_where'] = indices_bio\n inputs_dict[input_]['bio_n_params'] = len(indices_bio)\n\n\n self.indices_tech_all = indices_tech_all #TODO remove later\n self.indices_bio_all = indices_bio_all\n self.inputs_dict = inputs_dict" ]
[ "0.6243876", "0.6197848", "0.61577994", "0.6136601", "0.61252385", "0.60175735", "0.5826551", "0.5813928", "0.5751767", "0.5715533", "0.5710865", "0.5692011", "0.56493515", "0.5560804", "0.5558627", "0.55206186", "0.5514668", "0.5512474", "0.5510357", "0.5506426", "0.5503846", "0.54862106", "0.5482662", "0.54141146", "0.53913194", "0.53725797", "0.5369344", "0.53609294", "0.53330445", "0.53189135" ]
0.6741056
0
Dump sim's config_params, inputs, and outputs to YAML file By default, we overwrite to main YAML config file.
def _dump_config_to_yaml_file(self, sim_config_params = None, sim_inputs = None, sim_outputs = None, sim_other_vars = None, is_aux_yaml = False): if sim_config_params is None: sim_config_params = self.sim_config_params if sim_inputs is None: sim_inputs = self.sim_inputs if sim_outputs is None: sim_outputs = self.sim_outputs if sim_other_vars is None: sim_other_vars = self.sim_other_vars if not is_aux_yaml: config_file = self.sim_config_filepath else: config_file = self.sim_config_filepath.replace(".yaml", "_EDIT.yaml") # Prepare set of unused data ( to be shared with user for editing ) full_sim_config = {"config_params": sim_config_params, "inputs": sim_inputs, "outputs": sim_outputs, "other_vars": sim_other_vars} full_sim_data = {"simulation": full_sim_config} # Dump configuration to YAML file for later reuse (or user editing if "is_aux_yaml==True") with open(config_file, 'w') as file: dump = yaml.dump(full_sim_data, sort_keys = False, default_flow_style=False) file.write( dump ) # Raise error, and avoid continuing using model log = "\n[FMU Validator] A YAML file with bonsai required fields, as well as available " log += "sim variables, has been created at: \n --> '{}'\n".format(config_file) if is_aux_yaml: log += "[FMU Validator] Edit the YAML file, and remove the '_EDIT' nametag to use this model.\n" print(log) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_config(_config, simulation_dir):\n with open(os.path.join(simulation_dir, 'config.yaml'), 'w') as f:\n yaml.dump(_config, f, default_flow_style=False)", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "def write(self):\n print yaml.dump(self._config, default_flow_style=False),", "def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()", "def dump(self, config_file = 'config.yaml'):\n\n with open(config_file, 'w') as fp:\n yaml.dump(self.__dict__, fp)", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def dump(self, config):\n raise NotImplementedError", "def dump(self):\n with open(self._config_filename, 'w', encoding='utf-8') as file:\n self._parser.write(file)", "def save_config(conf, save_path):\n with open(os.path.join(save_path), \"w\") as f:\n f.write(yaml.dump({'param': conf}, default_flow_style=False))", "def showconfig():\n print(yaml.dump(CONFIG))", "def save(self) -> None:\n logger.info(\"Saving to config...\")\n yml.save(self._config, self.configpath)", "def set_yaml_config(self) -> None:\n\n # LT-248: We can pick Artillery Phase configuration from conf file\n self.yaml_config = {\n \"config\": {\n \"target\": self.get_swagger_url(),\n \"processor\": f\"./{self.OUT_FILE}\",\n \"phases\": [\n {\n \"duration\": settings.DURATION or 1,\n \"arrivalRate\": settings.SPAWN_RATE or 1\n }\n ]\n },\n \"scenarios\": self.task_set.yaml_flow\n }", "def dump_config_and_makefile(_config):\n experiment_dir = Path(_config['trainer']['storage_dir'])\n makefile_path = Path(experiment_dir) / \"Makefile\"\n\n if not makefile_path.exists():\n from padertorch.contrib.examples.source_separation.tasnet.templates import \\\n MAKEFILE_TEMPLATE_TRAIN\n\n config_path = experiment_dir / \"config.json\"\n pt.io.dump_config(_config, config_path)\n\n makefile_path.write_text(\n MAKEFILE_TEMPLATE_TRAIN.format(\n main_python_path=pt.configurable.resolve_main_python_path(),\n experiment_name=experiment_name,\n eval_python_path=('.'.join(\n pt.configurable.resolve_main_python_path().split('.')[:-1]\n ) + '.evaluate')\n )\n )", "def test_dump_config(self):\n config = easydms.config.Config()\n print(config)", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def run(self):\n write_config(self.filename)\n print('Wrote default config to', self.filename)", "def save(config: dict, out_dir: str, filename: str = \"config.yaml\"):\n assert filename.endswith(\".yaml\")\n with open(os.path.join(out_dir, filename), \"w+\") as f:\n f.write(yaml.dump(config))", "def _save_config_log(self, data):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n with open(config_path, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)", "def save_config(logdir, config):\n with open(os.path.join(logdir, 'config.yml'), 'w') as f:\n yaml.dump(config.__dict__, f)", "def _dumpConfiguration(self):\n print \"Writing configuration:\"\n print \" - title = \" + self.title\n print \" - executablePath = \" + self.exePath\n print \" - configPath = \" + self.configPath\n print \" - outputName = \" + self.outputName\n print \" - branch = \" + self.branch\n print \" - branchName = \" + self.branchName\n print \" - buildid = \" + self.buildid\n print \" - currentDate = \" + self.currentDate\n print \" - testDate = \" + self.testDate\n print \" - resultsServer = \" + self.resultsServer\n print \" - resultsLink = \" + self.resultsLink\n print \" - activeTests = \" + self.activeTests\n if self.symbolsPath:\n print \" - symbolsPath = \" + self.symbolsPath", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def test_yaml(self):\n with utils.timer('loading'):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n with utils.timer('serializing'):\n serial = s.to_yaml()\n with utils.timer('recovering'):\n recovered = yaml.load(serial, Loader=yaml.SafeLoader)\n with utils.timer('deleting'):\n del recovered['topology']\n assert config == recovered", "def _dump_tf_config(self):\n with open(os.path.join(self._tf_temp_dir.name, \"config.tf.json\"), \"w\") as f:\n json.dump(self.tf_config, f, indent=2)\n\n subprocess.run([\"cat\", os.path.join(self._tf_temp_dir.name, \"config.tf.json\")])\n\n self._init_tf() # Re-init post reconfiguration", "def save_config(config: Dict[str, Any], path: str) -> None:\n\n with open(path, 'w+', encoding='utf-8') as stream:\n yaml.dump(config, stream, default_flow_style=False)", "def to_yaml(self, **kwargs):\n if not self._is_graph_network:\n raise NotImplementedError\n\n if yaml is None:\n raise ImportError('Requires yaml module installed.')\n return yaml.dump(self._updated_config(), **kwargs)", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def dump_default_config():\n output = \"PythiaPlotter_config.py\"\n log.info(\"Dumping config to %s\", output)\n import pythiaplotter.default_config as dc\n shutil.copy(dc.__file__.replace(\".pyc\", \".py\"), output)" ]
[ "0.730918", "0.699196", "0.68951195", "0.67941695", "0.6579302", "0.6568991", "0.6554705", "0.64560366", "0.64269376", "0.64144593", "0.6292432", "0.6266346", "0.6234814", "0.62344337", "0.62238747", "0.617985", "0.6164769", "0.6161922", "0.6141542", "0.6134677", "0.6123942", "0.61237407", "0.609949", "0.60739744", "0.60521775", "0.60447997", "0.6012628", "0.6007977", "0.5958405", "0.5957956" ]
0.81717044
0
Get string with the sim's config_params, inputs, and outputs for the model
def _get_sim_config_str(self): log = "[FMU Validator] The set of configuration_parameters, inputs, and outputs defined is the following:\n" log += "\n{}: {}".format("Sim Config Params -- Brain Config ", self.sim_config_params) log += "\n{}: {}".format("Sim Inputs -- Brain Actions ", self.sim_inputs) log += "\n{}: {}".format("Sim Outputs -- Brain States ", self.sim_outputs) log += "\n{}: {}".format("Sim Other Vars -- Other Sim States ", self.sim_other_vars) return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_config(model_name, args):\n if model_name == 'Tacotron2':\n model_config = dict(\n # optimization\n mask_padding=args.mask_padding,\n # audio\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=args.n_symbols,\n symbols_embedding_dim=args.symbols_embedding_dim,\n # encoder\n encoder_kernel_size=args.encoder_kernel_size,\n encoder_n_convolutions=args.encoder_n_convolutions,\n encoder_embedding_dim=args.encoder_embedding_dim,\n # attention\n attention_rnn_dim=args.attention_rnn_dim,\n attention_dim=args.attention_dim,\n # attention location\n attention_location_n_filters=args.attention_location_n_filters,\n attention_location_kernel_size=args.attention_location_kernel_size,\n # decoder\n n_frames_per_step=args.n_frames_per_step,\n decoder_rnn_dim=args.decoder_rnn_dim,\n prenet_dim=args.prenet_dim,\n max_decoder_steps=args.max_decoder_steps,\n gate_threshold=args.gate_threshold,\n p_attention_dropout=args.p_attention_dropout,\n p_decoder_dropout=args.p_decoder_dropout,\n # postnet\n postnet_embedding_dim=args.postnet_embedding_dim,\n postnet_kernel_size=args.postnet_kernel_size,\n postnet_n_convolutions=args.postnet_n_convolutions,\n decoder_no_early_stopping=args.decoder_no_early_stopping\n )\n return model_config\n elif model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n else:\n raise NotImplementedError(model_name)", "def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")", "def get_configuration_parameters_values(self):\n return (self.timestamp, self.model_name, self.model.WEIGHT_PATH, self.model.FIT_HISTORY_PATH,\n self.learning_rate, self.load_weights, self.freeze_layers, self.margin,\n self.hard_sampling_batch_size, self.batch_size, self.number_of_validation_imgs,\n self.input_shape)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def config(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"config\")", "def config(self):\n return {\"input_dims\": self.dims, \"output_dims\": self.output_dims, \"mapping\": self.mapping}", "def get_config(ctx):\n global HISTORY_LOGS, EXPERIMENT_ID #Ugly hack, make it better at some point, may be ;)\n id = ctx.job.id\n EXPERIMENT_ID = hash(id)\n\n import montezuma_env\n\n ctx.job.register_action(\"Set starting point procssor:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.STARTING_POINT_SELECTOR))\n ctx.job.register_action(\"Set rewards:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.REWARDS_FILE))\n\n logger.auto_set_dir(suffix=id)\n\n # (self, parameters, number_of_actions, input_shape)\n\n M = EXPERIMENT_MODEL\n\n name_base = str(uuid.uuid1())[:6]\n PIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR_{}'.format(id), '.').rstrip('/')\n namec2s = 'ipc://{}/sim-c2s-{}-{}'.format(PIPE_DIR, name_base, id)\n names2c = 'ipc://{}/sim-s2c-{}-{}'.format(PIPE_DIR, name_base, id)\n procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]\n ensure_proc_terminate(procs)\n start_proc_mask_signal(procs)\n\n master = MySimulatorMaster(namec2s, names2c, M)\n dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE)\n\n # My stuff - PM\n neptuneLogger = NeptuneLogger.get_instance()\n lr = tf.Variable(0.001, trainable=False, name='learning_rate')\n tf.scalar_summary('learning_rate', lr)\n num_epochs = get_atribute(ctx, \"num_epochs\", 100)\n\n rewards_str = get_atribute(ctx, \"rewards\", \"5 1 -200\")\n with open(montezuma_env.REWARDS_FILE, \"w\") as file:\n file.write(rewards_str)\n\n\n if hasattr(ctx.params, \"learning_rate_schedule\"):\n schedule_str = str(ctx.params.learning_rate_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting learing rate schedule:{}\".format(schedule_str))\n learning_rate_scheduler = ScheduledHyperParamSetter('learning_rate', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"entropy_beta_schedule\"):\n schedule_str = str(ctx.params.entropy_beta_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting entropy beta schedule:{}\".format(schedule_str))\n entropy_beta_scheduler = ScheduledHyperParamSetter('entropy_beta', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"explore_factor_schedule\"):\n schedule_str = str(ctx.params.explore_factor_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 2], [100, 3], [120, 4], [140, 5]]\"\n logger.info(\"Setting explore factor schedule:{}\".format(schedule_str))\n explore_factor_scheduler = ScheduledHyperParamSetter('explore_factor', json.loads(schedule_str))\n\n\n\n return TrainConfig(\n dataset=dataflow,\n optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n learning_rate_scheduler, entropy_beta_scheduler, explore_factor_scheduler,\n HumanHyperParamSetter('learning_rate'),\n HumanHyperParamSetter('entropy_beta'),\n HumanHyperParamSetter('explore_factor'),\n NeputneHyperParamSetter('learning_rate', ctx),\n NeputneHyperParamSetter('entropy_beta', ctx),\n NeputneHyperParamSetter('explore_factor', ctx),\n master,\n StartProcOrThread(master),\n PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['logits'], neptuneLogger, HISTORY_LOGS), 1),\n neptuneLogger,\n ]),\n session_config=get_default_sess_config(0.5),\n model=M,\n step_per_epoch=STEP_PER_EPOCH,\n max_epoch=num_epochs,\n )", "def params():\n return utils.Params('../experiments/base-model/params.json')", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def get_config(self):\n config = {\n 'membership_transform': self.membership_transform,\n 'predictions_transform': self.predictions_transform,\n 'membership_kernel': self.membership_kernel,\n 'predictions_kernel': self.predictions_kernel,\n 'name': self.name,\n }\n config = {k: v for k, v in config.items() if v is not None}\n return self._serialize_config(config)", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n #print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n with open(os.path.join(self.train_dir,'model_arch.txt'), 'a') as fp:\n print(model, file=fp)\n print(name, file=fp)\n print(\"The number of parameters: {}\".format(num_params),file=fp)", "def info(self):\r\n\r\n return self.sim_info", "def __str__(self):\n model_parameters = filter(lambda p: p.requires_grad, self.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n return super(BaseModel, self).__str__() + '\\nTrainable parameters: {}'.format(params)\n # print(super(BaseModel, self))", "def extractModelParam(self):\n copasi_filename = self.genPathCopasi(\"extractor\")\n self.recentModel = model.loada(self.antString, copasi_filename)\n return self.recentModel.parameters.copy().squeeze().to_dict()", "def input_config():\n run_dir = 'runs/ODEMnistClassification/8'\n epoch = 'latest'\n device = 'cpu'\n min_end_time = 10\n max_end_time = 100\n tol = 1e-3", "def get_model_output(self):\n\n return self.model_output_file", "def get_info_string(config):\n if config.train.folds is not None:\n return f\"folds_{config.train.folds}\"\n elif config.train.validation_portion is not None:\n return \"valportion_{}\".format(config.train.validation_portion)\n return \"\"", "def make_params(config):\n params = copy.deepcopy(config.view.params)\n params.t2bins = np.arange(0, params.t2bin_max + 1e-4, params.t2bin_stepsize)\n params.out = make_Bunch(\"State and output of detection processing\") # outputs are not parameters, maybe separate \n return params", "def get_input_info_from_cfg(deploy_cfg: mmengine.Config) -> Dict[str, List]:\n # The partition is not supported now. Set the id of model to 0.\n model_inputs = get_model_inputs(deploy_cfg)[0]\n input_info = model_inputs['opt_shapes']\n ir_config = get_ir_config(deploy_cfg)\n if ir_config is not None:\n input_names = ir_config.get('input_names', None)\n if input_names:\n if not isinstance(input_info, Dict):\n input_info = dict(zip(input_names, input_info))\n input_info = update_input_names(input_info, input_names)\n return input_info", "def getParamsForWhizard(self, model):\n params = ''\n if model == 'sm':\n params = \"\"\"<GF type=\"float\" value=\"1.16639E-5\">\n<!-- Fermi constant -->\n</GF>\n<mZ type=\"float\" value=\"91.1882\">\n<!-- Z-boson mass -->\n</mZ>\n<mW type=\"float\" value=\"80.419\">\n<!-- W-boson mass -->\n</mW>\n<mH type=\"float\" value=\"120\">\n<!-- Higgs mass -->\n</mH>\n<alphas type=\"float\" value=\"0.1178\">\n<!-- Strong coupling constant alpha_s(MZ) -->\n</alphas>\n<me type=\"float\" value=\"0.\">\n<!-- electron mass -->\n</me>\n<mmu type=\"float\" value=\"0.1066\">\n<!-- muon mass -->\n</mmu>\n<mtau type=\"float\" value=\"1.777\">\n<!-- tau-lepton mass -->\n</mtau>\n<ms type=\"float\" value=\"0.\">\n<!-- s-quark mass -->\n</ms>\n<mc type=\"float\" value=\"0.54\">\n<!-- c-quark mass -->\n</mc>\n<mb type=\"float\" value=\"2.9\">\n<!-- b-quark mass -->\n</mb>\n<mtop type=\"float\" value=\"174\">\n<!-- t-quark mass -->\n</mtop>\n<wtop type=\"float\" value=\"1.523\">\n<!-- t-quark width -->\n</wtop>\n<wZ type=\"float\" value=\"2.443\">\n<!-- Z-boson width -->\n</wZ>\n<wW type=\"float\" value=\"2.049\">\n<!-- W-boson width -->\n</wW>\n<wH type=\"float\" value=\"0.3605E-02\">\n<!-- Higgs width -->\n</wH>\n<vckm11 type=\"float\" value=\"0.97383\">\n<!-- Vud -->\n</vckm11>\n<vckm12 type=\"float\" value=\"0.2272\">\n<!-- Vus -->\n</vckm12>\n<vckm13 type=\"float\" value=\"0.00396\">\n<!-- Vub -->\n</vckm13>\n<vckm21 type=\"float\" value=\"-0.2271\">\n<!-- Vcd -->\n</vckm21>\n<vckm22 type=\"float\" value=\"0.97296\">\n<!-- Vcs -->\n</vckm22>\n<vckm23 type=\"float\" value=\"0.04221\">\n<!-- Vcb -->\n</vckm23>\n<vckm31 type=\"float\" value=\"0.00814\">\n<!-- Vtd -->\n</vckm31>\n<vckm32 type=\"float\" value=\"-0.04161\">\n<!-- Vts -->\n</vckm32>\n<vckm33 type=\"float\" value=\"0.99910\">\n<!-- Vtb -->\n</vckm33>\n<khgaz type=\"float\" value=\"1.000\">\n<!-- anomaly Higgs coupling K factors -->\n</khgaz>\n<khgaga type=\"float\" value=\"1.000\">\n<!-- anomaly Higgs coupling K factors -->\n</khgaga>\n<khgg type=\"float\" value=\"1.000\">\n<!-- anomaly Higgs coupling K factors -->\n</khgg>\n\"\"\"\n else:\n params = \"\"\"\n \"\"\"\n return S_OK(params)", "def get_configuration_parameters_names(self):\n return (\n 'timestamp', 'model_name', 'weight_path', 'fit_history_path', 'learning_rate', 'load_weights',\n 'freeze_layers', 'margin', 'hard_sampling_batch_size', 'batch_size',\n 'number_of_validation_imgs', 'input_shape')", "def __str__(self):\n string = 'input dim: {} \\noutput dim: {} \\n'.format(\n self.dim_inputs, self.dim_outputs\n )\n string += 'sequence length: {} \\n'.format(\n self.tensors[0].shape[1]\n )\n key = 'train' if self.train else 'test'\n string += '{}_samples: {} \\n{}_sequences: {} \\n'.format(\n key, self.experiment_length, key, self.tensors[0].shape[0]\n )\n return string", "def get_config(self):\n config = {\n \"units\": self.units,\n \"activation\": activations.serialize(self.activation),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation\n ),\n \"attention_activation\": activations.serialize(\n self.attention_activation\n ),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer\n ),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"attention_initializer\": initializers.serialize(\n self.attention_initializer\n ),\n \"use_chrono_initialization\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer\n ),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(\n self.activity_regularizer\n ),\n \"attention_regularizer\": regularizers.serialize(\n self.attention_regularizer\n ),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint\n ),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n \"attention_constraint\": constraints.serialize(\n self.attention_constraint\n ),\n \"dropout\": self.dropout,\n \"recurrent_dropout\": self.recurrent_dropout,\n \"return_attention\": self.return_attention,\n }\n base_config = super().get_config()\n del base_config[\"cell\"]\n return dict(list(base_config.items()) + list(config.items()))", "def get_config_parameter(config):\n\n selected_event = config['selected_event']\n datasource_raw_data = config['datasource_raw_data']['database']\n measurement_raw = config['datasource_raw_data']['measurement']\n measurement_enriched = config['datasource_enriched_data']['measurement']\n datasource_enriched_data = config['datasource_enriched_data']['database']\n datasource_marked_data = config['datasource_marked_data']['database']\n datasource_predicted_data = config['datasource_predicted_data']['database']\n start_time = config['timeframe'][0]\n end_time = config['timeframe'][1]\n register_dict = config['register_dict']\n required_registers = config[f\"{selected_event}_register\"]\n events = config[selected_event]\n measurement_predicted = config['datasource_predicted_data']['measurement']\n return selected_event, datasource_raw_data, measurement_raw, start_time, end_time, register_dict, \\\n required_registers, datasource_enriched_data, datasource_marked_data, \\\n measurement_enriched, events, datasource_predicted_data, measurement_predicted", "def getModelParams(self):\n log.info(\"Importing model params from %s\" % self.modelParamsPath)\n moduleName = os.path.basename(self.modelParamsPath)\n importedModelParams = imp.load_source(moduleName, self.modelParamsPath)\n return importedModelParams.MODEL_PARAMS", "def display_sim_parameters(self):\n pprint.pprint(vars(self))\n return", "def configuration(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"configuration\")", "def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True" ]
[ "0.63613605", "0.6227064", "0.6027238", "0.6021502", "0.59806687", "0.59724784", "0.58838063", "0.5799649", "0.5774296", "0.57192713", "0.5702105", "0.57006687", "0.568292", "0.56661975", "0.5660352", "0.5649805", "0.56424356", "0.56185853", "0.56185603", "0.5617623", "0.5615714", "0.5590734", "0.55902725", "0.55628484", "0.5554227", "0.5527074", "0.5506852", "0.54998374", "0.54964006", "0.54950285" ]
0.737774
0
Remove nonalphanumeric characters to make them valid with Bonsai interaction.
def _clean_non_alphanumeric_chars(self): for i,variable in enumerate(self.model_description.modelVariables): clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name) if clean_name != variable.name: log = "Sim variable '{}' has been renamed to '{}' ".format(variable.name, clean_name) log += "to comply with Bonsai naming requirements." print(log) self.model_description.modelVariables[i].name = clean_name return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name", "def clean_string(value):\n\treturn re.sub(r'[^a-zA-Z0-9_.]', '', str(value))", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def _scrub(self, string):\n if not string.isalnum():\n raise ValueError(\"Table name cannot include non-alphanumerics.\")\n return string", "def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n return string", "def clean_username(value):\n if NO_ASCII_REGEX.search(value):\n value = unidecode(value)\n value = NO_ASCII_REGEX.sub('', value)\n value = NO_SPECIAL_REGEX.sub('', value)\n return value", "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def strip_other_charcter():\n pass", "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())", "def to_clean_str(s: str) -> str:\n return re.sub(\"[^a-zA-Z0-9]\", \"\", s).lower()", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def fix_characters(title):\n return re.sub('[^0-9a-zA-Z]+', ' ', title)", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def clean_word(word: str) -> str:\n\n cleaned_word = ''\n for char in word.lower():\n if char.isalnum():\n cleaned_word = cleaned_word + char\n return cleaned_word", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def scrub(input_string):\n return ''.join(k for k in input_string if k.isalnum())", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string" ]
[ "0.73747957", "0.73650575", "0.73443496", "0.7294863", "0.72666633", "0.72465616", "0.7158626", "0.7149394", "0.713084", "0.7125717", "0.71202695", "0.71086025", "0.71029955", "0.70836455", "0.707941", "0.70679945", "0.70411354", "0.70361745", "0.70283735", "0.7028341", "0.70243955", "0.7019881", "0.7018226", "0.7016978", "0.70134974", "0.6982579", "0.6982041", "0.6963868", "0.69545656", "0.6945799" ]
0.744641
0
Template for simulating FMU models for Bonsai integration. Note, it calls FMUSimValidation to validate the model when first instanced.
def __init__( self, model_filepath: str, fmi_version: str = FMI_VERSION, start_time = START_TIME, stop_time = STOP_TIME, step_size = STEP_SIZE, user_validation: bool = False, use_unzipped_model: bool = False, ): # validate simulation: config_vars (optional), inputs, and outputs validated_sim = FMUSimValidation(model_filepath, user_validation) # extract validated sim configuration self.model_filepath = validated_sim.model_filepath self.sim_config_filepath = validated_sim.sim_config_filepath self.model_description = validated_sim.model_description # model variable names structured per type (config, inputs/brain actions, outputs/brain states) self.sim_config_params = validated_sim.sim_config_params self.sim_inputs = validated_sim.sim_inputs self.sim_outputs = validated_sim.sim_outputs self.sim_other_vars = validated_sim.sim_other_vars # model variable dictionaries with self.vars_to_idx = validated_sim.vars_to_idx self.vars_to_type_f = validated_sim.vars_to_type_f self.vars_to_ini_vals = validated_sim.vars_to_ini_vals # get parent directory and model name (without .fmu) aux_head_and_tail_tup = os.path.split(self.model_filepath) self.model_dir = aux_head_and_tail_tup[0] self.model_name = aux_head_and_tail_tup[1].replace(".fmu", "") # placeholder to prevent accessing methods if initialization hasn't been called first # also prevents calling self.fmu.terminate() if initialization hasn't occurred or termination has already been applied self._is_initialized = False # get FMI version read_fmi_version = self.model_description.fmiVersion if read_fmi_version in ["1.0", "2.0", "3.0"]: # Use fmi version from model_description print(f"[FMU Connector] FMU model indicates to be follow fmi version '{read_fmi_version}'.") self.fmi_version = read_fmi_version else: assert fmi_version in ["1.0", "2.0", "3.0"], f"fmi version provided ({fmi_version}) is invalid." # Use fmi version provided by user if the one on model_description is invalid print(f"[FMU Connector] Using fmi version provided by user: v'{fmi_version}'. Model indicates v'{read_fmi_version}' instead.") self.fmi_version = fmi_version # save time-related data error_log = "Stop time provided ({}) is lower than start time provided ({})".format(stop_time, start_time) assert stop_time > start_time, error_log error_log = "Step size time ({}) is greater than the difference between ".format(step_size) error_log += "stop and start times, ({}) and ({}), respectively".format(stop_time, start_time) assert step_size < stop_time-start_time, error_log self.start_time = float(start_time) self.stop_time = float(stop_time) self.step_size = float(step_size) self.sim_time = float(self.start_time) # retrieve FMU model type, as well as model identifier self.model_type = "None" self.model_identifier = self.model_name coSimulation = self.model_description.coSimulation if coSimulation is not None: self.model_identifier = coSimulation.modelIdentifier self.model_type = "coSimulation" else: scheduledExecution = self.model_description.scheduledExecution if scheduledExecution is not None: self.model_identifier = scheduledExecution.modelIdentifier self.model_type = "scheduledExecution" else: modelExchange = self.model_description.modelExchange if modelExchange is not None: self.model_identifier = modelExchange.modelIdentifier self.model_type = "modelExchange" else: raise Exception("Model is not of any known type: coSimulation, scheduledExecution, nor modelExchange") # extract the FMU extract_path = os.path.join(self.model_dir, self.model_name + "_unzipped") if not use_unzipped_model: # extract model to subfolder by default self.unzipdir = extract(self.model_filepath, unzipdir=extract_path) else: # use previouslly unzipped model self.unzipdir = extract_path # get unique identifier using timestamp for instance_name (possible conflict with batch) self.instance_name = self._get_unique_id() # --------------------------------------------------------------- # instance model depending on 'fmi version' and 'fmu model type' self.fmu = None print(f"[FMU Connector] Model has been determined to be of type '{self.model_type}' with fmi version == '{self.fmi_version}'.") if self.model_type == "modelExchange": ## [TODO] test integrations print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.") if self.fmi_version == "1.0": self.fmu = fmi1.FMU1Model(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) elif self.fmi_version == "2.0": self.fmu = fmi2.FMU2Model(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) elif self.fmi_version == "3.0": self.fmu = fmi3.FMU3Model(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) elif self.model_type == "coSimulation": if self.fmi_version == "1.0": ## [TODO] test integrations print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.") self.fmu = fmi1.FMU1Slave(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) elif self.fmi_version == "2.0": self.fmu = fmi2.FMU2Slave(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) elif self.fmi_version == "3.0": ## [TODO] test integrations print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.") self.fmu = fmi3.FMU3Slave(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) elif self.model_type == "scheduledExecution": if self.fmi_version == "1.0" or self.fmi_version == "2.0": raise Exception("scheduledExecution type only exists in fmi v'3.0', but fmi version '{}' was provided.".format(self.fmi_version)) print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.") ## [TODO] test integrations #elif self.fmi_version_int == 3: self.fmu = fmi3.FMU3ScheduledExecution(guid=self.model_description.guid, unzipDirectory=self.unzipdir, modelIdentifier=self.model_identifier, instanceName=self.instance_name) # --------------------------------------------------------------- return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __set_fmu__(self, fmu_file, result_handler, solver, atol, rtol, verbose):\n if self.fmu is None:\n \n # TODO:\n # See what can be done in catching the exception/propagating it\n self.fmu = pyfmi.load_fmu(fmu_file)\n \n # Get the options for the simulation\n self.opts = self.fmu.simulate_options()\n \n # Define the simulation options\n self.set_simulation_options(result_handler, solver, atol, rtol, verbose)\n \n # Define the standard value for the result file\n self.set_result_file(None)\n \n # set the number of states\n self.N_STATES = len(self.get_state())\n \n # get the value references of the state variables\n self.stateValueReferences = self.fmu.get_state_value_references()\n \n # Properties of the FMU\n self.name = str(self.fmu.get_name())\n self.author = str(self.fmu.get_author())\n self.description = str(self.fmu.get_description())\n self.fmu_type = str(self.fmu.__class__.__name__)\n self.version = str(self.fmu.version)\n self.guid = str(self.fmu.get_guid())\n self.tool = str(self.fmu.get_generation_tool())\n [Ncont, Nevt] = self.fmu.get_ode_sizes()\n self.numStates = \"( \"+str(Ncont)+\" , \"+str(Nevt)+\" )\"\n \n # prepare the list of inputs and outputs\n self.__set_inputs__()\n self.__set_outputs__()\n \n else:\n logger.warn(\"The FMU has already been assigned to this model\")", "def __init__(\n self,\n model_filepath: str,\n user_validation: bool = True,\n ):\n\n # ensure model filepath is balid, and save as att if it is\n assert model_filepath.endswith(\".fmu\"), \"Provided filepath is not an FMU file: '{}'\".format(model_filepath)\n self.model_filepath = model_filepath\n # config file with config_params, inputs, outputs\n self.sim_config_filepath = SIM_CONFIG_NAME_f(self.model_filepath)\n\n # read the model description\n self.model_description = read_model_description(model_filepath)\n error_log = \"Provided model ({}) doesn't have modelVariables in XLS description file\".format(model_filepath)\n assert len(self.model_description.modelVariables) > 0, error_log\n\n # correct non-alphanumeric tags.\n # note, it doesn't suppose any problem, since interaction with sim uses indices, not names.\n self._clean_non_alphanumeric_chars()\n\n\n # collect the value references (indices)\n # collect the value types (Real, Integer or Enumeration)\n # collect the variables to be initialized and the value to do so at\n self.vars_to_idx = {}\n self.vars_to_type_f = {}\n self.vars_to_ini_vals = {}\n for variable in self.model_description.modelVariables:\n # extract key attributes per variable\n var_idx = variable.valueReference #, variable.causality\n var_name = variable.name\n var_type = variable.type\n var_start = variable.start\n \n # collect type reference\n if var_type == \"Real\":\n self.vars_to_type_f[var_name] = float\n elif var_type == \"Integer\":\n self.vars_to_type_f[var_name] = int\n else:\n # [TODO] Integrate variables of type \"Enumeration\". How do we cast? Define a function for \"self.vars_to_type_f\".\n # [TODO] Integrate variables of type string (need to find correct var_type tag first).\n # [TODO] Integrate variables of type boolean (need to find correct var_type tag first).\n print(f\"Variable '{var_name}' will be skipped. FMU connector cannot currently handle vars of type '{var_type}'.\")\n continue\n \n # collect the value references (indices)\n self.vars_to_idx[var_name] = var_idx\n\n # collect the variables to be initialized and the value to do so at\n if var_start is not None:\n # cast variable prior to storing\n self.vars_to_ini_vals[var_name] = self.vars_to_type_f[var_name](var_start)\n \n\n # initialize sim config\n self.is_model_config_valid = False # Currently unused, since error is raised if model invalid\n self.sim_config_params = []\n self.sim_inputs = []\n self.sim_outputs = []\n self.sim_other_vars = []\n\n # ---------------------------------------------------------------------\n # YAML CONFIG --> check for existing config using SIM_CONFIG_NAME_f --> e.g: \"{model_name}_conf.yaml\"\n valid_config = self._validate_sim_config()\n \n # exit if model is valid, unless validation has been activated\n if valid_config:\n\n # print model config for user reference: config_params, inputs, outputs\n print(self._get_sim_config_str())\n\n if user_validation:\n # prompt user to manually validate model if selected\n validation_asserted = input(\"Is this configuration correct (y|n)? \")\n\n if validation_asserted == \"y\":\n self.is_model_config_valid = True\n return\n \n # reset config if invalid\n self.sim_config_params = []\n self.sim_inputs = []\n self.sim_outputs = []\n self.sim_other_vars = []\n \n else:\n # when no validation is selected, we assume the sim config is valid\n self.is_model_config_valid = True\n return\n \n # ---------------------------------------------------------------------\n # FMI CONFIG --> if model is invalid we look for attributes within the .fmi model definition\n valid_config = self._extract_sim_config_from_fmi_std()\n\n if valid_config:\n\n # print model config for user reference: config_params, inputs, outputs\n print(self._get_sim_config_str())\n \n if user_validation:\n # prompt user to manually validate model if selected\n validation_asserted = input(\"Is this configuration correct (y|n)? \")\n\n if validation_asserted == \"y\":\n self.is_model_config_valid = True\n # dump YMAL file to reuse next time the model is loaded\n self._dump_config_to_yaml_file()\n return\n \n else:\n # when no validation is selected, we assume the sim config is valid\n self.is_model_config_valid = True\n # dump YMAL file to reuse next time the model is loaded\n self._dump_config_to_yaml_file()\n return\n \n # Dump auxiliary YAML config file if user doesn't assert the provided set\n # of config_params/inputs/outputs\n self._dump_config_to_yaml_file(is_aux_yaml = True)\n \n # If neither YAML nor FMI model is sufficient raise error\n error_log = \"MODEL DOES NOT HAVE THE CORRECT CONFIG DEFINED NEITHER ON YAML CONFIG FILE \"\n error_log += \"NOR FMI MODEL DESCRIPTION. A YAML FILE HAS BEEN CREATED FOR YOU TO MODIFY. \"\n error_log += \"THE SIM HAS BEEN FORCED TO EXIT, BUT FEEL FREE TO RERUN ONCE SET-UP IS COMPLETED.\"\n raise Exception(error_log)", "def __init__(self, fmu_file = None, result_handler = None, solver = None, atol = 1e-6, rtol = 1e-4, verbose = None, offset = None):\n \n # Reference to the FMU, that will be loaded using pyfmi\n self.fmu = None\n self.fmu_file = fmu_file\n # List of parameters\n self.parameters = []\n # List of state variables\n self.variables = []\n # List of inputs\n self.inputs = []\n # List of outputs\n self.outputs = []\n \n # Initialize the properties of the FMU\n self.name = \"\"\n self.author = \"\"\n self.description = \"\"\n self.fmu_type = \"\"\n self.version = \"\"\n self.guid = \"\"\n self.tool = \"\"\n self.numStates = \"\"\n \n # Number of maximum tries for a simulation to be successfully run\n self.SIMULATION_TRIES = 4\n \n # Empty dictionary that will contain the simulation options\n self.opts = {}\n \n # Set the number of states\n self.N_STATES = 0\n \n # Set the simulation date time offset\n self.offset = offset\n \n # An array that contains the value references for every state variable\n self.stateValueReferences = []\n \n # See what can be done in catching the exception/propagating it\n if fmu_file is not None:\n self.__set_fmu__(fmu_file, result_handler, solver, atol, rtol, verbose)", "def run(self):\n msg = sfmt.format(\"Run test\", self.name)\n print(msg)\n\n # Set nam as namefile name without path\n nam = None\n\n # run mf6 models\n exe = str(self.targets[\"mf6\"].absolute())\n msg = sfmt.format(\"using executable\", exe)\n print(msg)\n\n if self.parallel:\n print(\"running parallel on\", self.ncpus, \"processes\")\n try:\n success, buff = self.run_parallel(\n exe,\n )\n except Exception as exc: \n msg = sfmt.format(\"MODFLOW 6 run\", self.name)\n print(msg)\n print(exc)\n success = False\n else:\n try:\n success, buff = flopy.run_model(\n exe,\n nam,\n model_ws=self.simpath,\n silent=False,\n report=True,\n )\n msg = sfmt.format(\"MODFLOW 6 run\", self.name)\n if success:\n print(msg)\n else:\n print(msg)\n except:\n msg = sfmt.format(\"MODFLOW 6 run\", self.name)\n print(msg)\n success = False\n\n # set failure based on success and require_failure setting\n if self.require_failure is None:\n msg = \"MODFLOW 6 model did not terminate normally\"\n if success:\n failure = False\n else:\n failure = True\n else:\n if self.require_failure:\n msg = \"MODFLOW 6 model should have failed\"\n if not success:\n failure = False\n else:\n failure = True\n else:\n msg = \"MODFLOW 6 model should not have failed\"\n if success:\n failure = False\n else:\n failure = True\n\n # print end of mfsim.lst to the screen\n if failure and self.is_CI:\n fpth = os.path.join(self.simpath, \"mfsim.lst\")\n msg = self._get_mfsim_listing(fpth) + msg\n\n # test for failure\n assert not failure, msg\n\n self.nam_cmp = None\n if success:\n if self.action is not None:\n if self.action.lower() == \"compare\":\n msg = sfmt.format(\"Comparison files\", self.name)\n print(msg)\n else:\n cpth = os.path.join(self.simpath, self.action)\n key = self.action.lower().replace(\".cmp\", \"\")\n exe = str(self.targets[key].absolute())\n msg = sfmt.format(\"comparison executable\", exe)\n print(msg)\n if (\n \"mf6\" in key\n or \"libmf6\" in key\n or \"mf6_regression\" in key\n ):\n nam = None\n else:\n npth = get_namefiles(cpth)[0]\n nam = os.path.basename(npth)\n self.nam_cmp = nam\n try:\n if self.api_func is None:\n success_cmp, buff = flopy.run_model(\n exe,\n nam,\n model_ws=cpth,\n silent=False,\n report=True,\n )\n else:\n success_cmp, buff = self.api_func(\n exe, self.idxsim, model_ws=cpth\n )\n msg = sfmt.format(\n \"Comparison run\", self.name + \"/\" + key\n )\n print(msg)\n\n # print end of mfsim.lst to the screen\n if \"mf6\" in key:\n if not success and self.is_CI:\n fpth = os.path.join(cpth, \"mfsim.lst\")\n print(self._get_mfsim_listing(fpth))\n\n except:\n success_cmp = False\n msg = sfmt.format(\n \"Comparison run\", self.name + \"/\" + key\n )\n print(msg)\n\n assert success_cmp, \"Unsuccessful comparison run\"\n\n return", "def _simulate(ctx, gui):\n ctx.env['SFFUnits'] = load_SFFUnits(ctx)\n\n \"\"\"\n Creates the directory path and nodes in the build directory.\n Creates a taskgen from each other library in units_hdl\n \"\"\"\n\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n\n for u in top.synu_deps + top.simu_deps:\n lib = u.script.parent.get_bld().make_node('work_vlib')\n lib.mkdir()\n u.b['vlib'] = lib\n\n if u.use('use'):\n tsk = ModelsimTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n includes=u.use('includes'),\n after=u.use('use'),\n output=lib,\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n else:\n tsk = ModelsimTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n output=lib,\n includes=u.use('includes'),\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n\n\n \"\"\"\n Create the testbench taskgen last as it is always at the top dep\n \"\"\"\n ctx.add_group()\n tb_lib = top.script.parent.get_bld().make_node('work_vlib')\n tb_lib.mkdir()\n top.b['tbvlib'] = tb_lib\n\n tsk = ModelsimTask(\n name=top.use('tb'),\n target=tb_lib,\n source=top.use('tb_src'),\n output=tb_lib,\n includes=top.use('tb_includes'),\n after=ctx.env.top_level,\n scan=SFF_verilog_scan,\n env=ctx.env )\n ctx.add_to_group(tsk)\n ctx.add_group()\n\n \"\"\"\n Run the Modelsim command with gui options provided.\n \"\"\"\n ##Run vsim\n ctx(name='vsim',\n rule='vsim %s -lib %s %s' % (gui,top.b['tbvlib'], top.use('tb')[0]),\n always = True)", "def __init__(\n self,\n model,\n ipakcb=None,\n intercellt=0,\n laycon=3,\n trpy=1.0,\n hdry=-1e30,\n iwdflg=0,\n wetfct=0.1,\n iwetit=1,\n ihdwet=0,\n ikvflag=0,\n ikcflag=0,\n tran=1.0,\n hy=1.0,\n vcont=1.0,\n kv=1.0,\n anglex=0.0,\n ksat=1.0,\n sf1=1e-5,\n sf2=0.15,\n wetdry=-0.01,\n extension=\"bcf\",\n unitnumber=None,\n filenames=None,\n add_package=True,\n ):\n msg = (\n \"Model object must be of type flopy.mfusg.MfUsg\\n\"\n f\"but received type: {type(model)}.\"\n )\n assert isinstance(model, MfUsg), msg\n\n super().__init__(\n model,\n ipakcb=ipakcb,\n intercellt=intercellt,\n laycon=laycon,\n trpy=trpy,\n hdry=hdry,\n iwdflg=iwdflg,\n wetfct=wetfct,\n iwetit=iwetit,\n ihdwet=ihdwet,\n tran=tran,\n hy=hy,\n vcont=vcont,\n sf1=sf1,\n sf2=sf2,\n wetdry=wetdry,\n extension=extension,\n unitnumber=unitnumber,\n filenames=filenames,\n add_package=False,\n )\n\n dis = model.get_package(\"DIS\")\n if dis is None:\n dis = model.get_package(\"DISU\")\n structured = self.parent.structured\n\n nrow, ncol, nlay, _ = self.parent.nrow_ncol_nlay_nper\n\n self.ikvflag = ikvflag\n self.ikcflag = ikcflag\n self.kv = kv\n self.anglex = anglex\n self.ksat = ksat\n\n if not structured:\n njag = dis.njag\n self.anglex = Util2d(\n model,\n (njag,),\n np.float32,\n anglex,\n \"anglex\",\n locat=self.unit_number[0],\n )\n\n # item 1\n self.kv = Util3d(\n model,\n (nlay, nrow, ncol),\n np.float32,\n kv,\n \"Vertical Hydraulic Conductivity\",\n locat=self.unit_number[0],\n )\n if not structured:\n self.ksat = Util3d(\n model,\n (njag,),\n np.float32,\n ksat,\n \"ksat\",\n locat=self.unit_number[0],\n )\n\n if add_package:\n self.parent.add_package(self)", "def main():\n parser = argparse.ArgumentParser(description=\"Generate standard form system FMUs through commandline\")\n parser.add_argument(\"--name\", default=\"qmodel\", type=str, help=\"Target FMU identifier\")\n parser.add_argument(\"--dir\", default=os.getcwd(), type=str, help=\"Target FMU path\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose output\", action=\"store_true\")\n parser.add_argument(\"-n\", \"--dry-run\", help=\"Only print system information, use with -v.\", action=\"store_true\")\n\n subparsers = parser.add_subparsers(title=\"System form\", dest=\"subcmd\")\n ss = subparsers.add_parser(\"ss\", help=\"State space model: A, B, C, D\",\n description=\"Define ABCD matrices using string. The string is interpreted as a matrix with commas or spaces separating columns, and semicolons separating rows. e.g. '1,2;3,4' -> 2x2 matrix\")\n ss.add_argument(\"-A\", required=False, type=str, help=\"A matrix\")\n ss.add_argument(\"-B\", required=False, type=str, help=\"B matrix\")\n ss.add_argument(\"-C\", required=False, type=str, help=\"C matrix\")\n ss.add_argument(\"-D\", required=False, type=str, help=\"D matrix\")\n ss.add_argument(\"-x0\", required=False, type=str, help=\"Init state values, zero vector if empty\")\n ss.add_argument(\"-u0\", required=False, type=str, help=\"Init input values, zero vector if empty\")\n\n # tf = subparsers.add_parser(\"tf\", help=\"Transfer function (WIP)\")\n # tf.add_argument(\"-n\", default=\"1,0\", type=str, help=\"Numerator\")\n # tf.add_argument(\"-d\", default=\"1\", type=str, help=\"Denominator\")\n\n try:\n args = parser.parse_args()\n if args.subcmd == \"ss\":\n from qfmu.utils import str_to_1d_array, str_to_2d_array\n A = None if args.A is None or args.A==\"\" else str_to_2d_array(args.A)\n B = None if args.B is None or args.B==\"\" else str_to_2d_array(args.B)\n C = None if args.C is None or args.C==\"\" else str_to_2d_array(args.C)\n D = None if args.D is None or args.D==\"\" else str_to_2d_array(args.D)\n x0 = None if args.x0 is None or args.x0==\"\" else str_to_1d_array(args.x0)\n u0 = None if args.u0 is None or args.u0==\"\" else str_to_1d_array(args.u0)\n ss = StateSpace(A, B, C, D, x0, u0)\n m = Lti(ss, identifier=args.name)\n if args.verbose:\n logging.basicConfig(level=logging.INFO)\n if args.dry_run:\n print(f\"Target FMU:\\n{os.path.join(os.path.abspath(args.dir), args.name)}.fmu\")\n print(f\"System info:\\n{ss}\")\n else:\n m.buildFMU(args.dir)\n else:\n raise Exception(\"Unknown system form\")\n except Exception as ex:\n logging.error(ex)\n return -1\n\n return 0", "def setup(self):\n log.debug('Initializing Mex...')\n if (self.options.user and self.options.pwd and self.options.root):\n self.bqSession = BQSession().init_local( self.options.user, self.options.pwd, bisque_root=self.options.root)\n self.options.mexURL = self.bqSession.mex.uri\n\n elif (self.options.mexURL and self.options.token):\n self.bqSession = BQSession().init_mex(self.options.mexURL, self.options.token)\n else:\n return\n \n self.mex_parameter_parser(self.bqSession.mex.xmltree)\n \n #finds and opens model file\n self.bqSession.update_mex('Initializing Classification Model...')\n log.debug('Forming Feature Requests...')\n\n \n #no options currently\n #combo = mex_xml.xpath('tag[@name=\"plant_part\"]/@value')[0]\n combo = 'bush'\n if combo:\n if combo=='bush':\n MODEL_QUERY['tag_query'] = '\"module_identifier\":\"Botanicam\" AND \"Classification Method\":\"Bush Descriptor\"'\n elif combo=='leaf':\n MODEL_QUERY['tag_query'] = '\"module_identifier\":\"Botanicam\" AND \"Classification Method\":\"Leaf Descriptor\"'\n else:\n raise BotanicamError('The incorrect model type was found -> Model Type: %s'%combo)\n else:\n raise BotanicamError('No model type was choosen')\n \n query_xml = self.bqSession.fetchxml('/data_service/file', **MODEL_QUERY)\n\n self.options.model_url = None\n if len(query_xml)>0:\n try:\n model_url = query_xml[0].attrib['uri']\n self.options.model_url = model_url\n log.debug('Fetching Model @ %s' % model_url)\n self.model_xml = self.bqSession.fetchxml(model_url, view='deep')\n self.model_path = os.path.join(self.options.stagingPath, 'model')\n model = self.bqSession.load(model_url)\n model_url = self.bqSession.service_url('blob_service', path=model.resource_uniq)\n self.bqSession.fetchblob(model_url, path=self.model_path+'.zip')\n with zipfile.ZipFile(self.model_path+'.zip') as dirzip:\n dirzip.extractall(self.model_path)\n except BQCommError:\n raise BotanicamError('Model file was not found! Ask admin to set the correct model file')\n else: #run demo classifier model store in the module\n raise BotanicamError('No model file was found. Ask your admin to train a new model with \\\n the Botanicam Trainer.')\n\n self.bqSession.update_mex('Initialized...')\n log.debug('Botanicam: image URL: %s, mexURL: %s, stagingPath: %s, token: %s' % (self.options.image_url, self.options.mexURL, self.options.stagingPath, self.options.token))", "def run_models(request):\n job_form_data = request.session['job_form_data']\n job_wrapper = JobWrapper(job_form_data)\n job_wrapper.create_data_file()\n print job_wrapper.job_form_data\n # Must run emits to generate emis_co2.dat - this step is requried to\n # run the models and it's a lot simpler to have it run form here than\n # from a job manager script\n cmd = \"/var/opt/IMOGEN/EMITS/emits\"\n subprocess.call(cmd, shell=True)\n print \"Ran {0} program\".format(cmd)\n # Now submit the models via the job manager\n jr = DRMAAJobRunner()\n return jr.queue_job(job_wrapper)", "def test_models_multiclass(model):\n atom = ATOMClassifier(X_class2, y_class2, test_size=0.24, random_state=1)\n atom.run(\n models=model,\n metric=\"f1_micro\",\n n_calls=2,\n n_initial_points=1,\n bo_params={\"base_estimator\": \"rf\", \"cv\": 1},\n )\n assert not atom.errors\n assert hasattr(atom, model)", "def build_mod_bpmf_model(train, alpha=2, dim=10, std=0.01):\n n, m = train.shape\n beta_0 = 1 # scaling factor for lambdas; unclear on its use\n\n # Mean value imputation on training data.\n train = train.copy()\n nan_mask = np.isnan(train)\n train[nan_mask] = train[~nan_mask].mean()\n\n # We will use separate priors for sigma and correlation matrix.\n # In order to convert the upper triangular correlation values to a\n # complete correlation matrix, we need to construct an index matrix:\n n_elem = dim * (dim - 1) / 2\n tri_index = np.zeros([dim, dim], dtype=int)\n tri_index[np.triu_indices(dim, k=1)] = np.arange(n_elem)\n tri_index[np.triu_indices(dim, k=1)[::-1]] = np.arange(n_elem)\n\n logging.info('building the BPMF model')\n with pm.Model() as bpmf:\n # Specify user feature matrix\n sigma_u = pm.Uniform('sigma_u', shape=dim)\n corr_triangle_u = pm.LKJCorr(\n 'corr_u', n=1, p=dim,\n testval=np.random.randn(n_elem) * std)\n\n corr_matrix_u = corr_triangle_u[tri_index]\n corr_matrix_u = t.fill_diagonal(corr_matrix_u, 1)\n cov_matrix_u = t.diag(sigma_u).dot(corr_matrix_u.dot(t.diag(sigma_u)))\n lambda_u = t.nlinalg.matrix_inverse(cov_matrix_u)\n\n mu_u = pm.Normal(\n 'mu_u', mu=0, tau=beta_0 * t.diag(lambda_u), shape=dim,\n testval=np.random.randn(dim) * std)\n U = pm.MvNormal(\n 'U', mu=mu_u, tau=lambda_u, shape=(n, dim),\n testval=np.random.randn(n, dim) * std)\n\n # Specify item feature matrix\n sigma_v = pm.Uniform('sigma_v', shape=dim)\n corr_triangle_v = pm.LKJCorr(\n 'corr_v', n=1, p=dim,\n testval=np.random.randn(n_elem) * std)\n\n corr_matrix_v = corr_triangle_v[tri_index]\n corr_matrix_v = t.fill_diagonal(corr_matrix_v, 1)\n cov_matrix_v = t.diag(sigma_v).dot(corr_matrix_v.dot(t.diag(sigma_v)))\n lambda_v = t.nlinalg.matrix_inverse(cov_matrix_v)\n\n mu_v = pm.Normal(\n 'mu_v', mu=0, tau=beta_0 * t.diag(lambda_v), shape=dim,\n testval=np.random.randn(dim) * std)\n V = pm.MvNormal(\n 'V', mu=mu_v, tau=lambda_v, shape=(m, dim),\n testval=np.random.randn(m, dim) * std)\n\n # Specify rating likelihood function\n R = pm.Normal(\n 'R', mu=t.dot(U, V.T), tau=alpha * np.ones((n, m)),\n observed=train)\n\n logging.info('done building the BPMF model')\n return bpmf", "def model_test(nu, fsigma_T, fsigma_P, models_in, amps_in, params_in, models_fit, label):\n # Generate fake data with some \"true\" parameters\n (D_vec, Ninv) = gen_data(nu, fsigma_T, fsigma_P, models_in, amps_in, params_in)\n Ninv_sqrt = np.matrix(linalg.sqrtm(Ninv))\n (dust_params, sync_params, cmb_params) = params_in\n (dust_amp, sync_amp, cmb_amp) = amps_in\n \n # Beam model\n beam_mat = np.identity(3*len(nu))\n\n # Set-up MCMC\n dust_guess = np.array([1.6, 20.])\n sync_guess = np.array([-3.])\n cmb_guess = np.array([])\n guess = np.concatenate((dust_guess, sync_guess, cmb_guess))\n #ndim = len(dust_guess) + len(sync_guess) + len(cmb_guess)\n \n # Run MCMC sampler on this model\n t0 = time.time()\n dust_params_out, sync_params_out, cmb_params_out, samples \\\n = mcmc(guess, nu, D_vec, Ninv, beam_mat, models_fit, label)\n print \"MCMC run in %d sec.\" % (time.time() - t0)\n \n # Estimate error on recovered CMB amplitudes\n (F_fg, F_cmb, F) = F_matrix(nu, dust_params_out, sync_params_out, cmb_params_out, models_fit)\n H = F_fg.T*Ninv*F_fg\n x_mat = np.linalg.inv(F.T*beam_mat.T*Ninv*beam_mat*F)*F.T*beam_mat.T*Ninv*D_vec # Equation A3\n \n U, Lambda, VT = np.linalg.svd(Ninv_sqrt*F_fg, full_matrices=False) # Equation A14\n \n print \"-\"*30\n print \"F_cmb.T\", F_cmb.T.shape\n print \"Ninv_sqrt\", Ninv_sqrt.shape\n print \"F_cmb\", F_cmb.shape\n print \"I\", np.identity(U.shape[0]).shape\n print \"U\", U.shape\n print \"U.T\", U.T.shape\n print \"-\"*30\n \n \n \n N_eff_inv_cmb = F_cmb.T*Ninv_sqrt*(np.matrix(np.identity(U.shape[0])) - U*U.T)*Ninv_sqrt*F_cmb # Equation A16\n N_eff_cmb = np.linalg.inv(N_eff_inv_cmb)\n cmb_noise = np.array([N_eff_cmb[0,0], N_eff_cmb[1,1], N_eff_cmb[2,2]])\n\n gls_cmb = x_mat[0:3,0]\n cmb_chisq = (np.matrix(cmb_amp).T - gls_cmb).T*N_eff_inv_cmb*(np.matrix(cmb_amp).T - gls_cmb)\n \n # Output triangle plots for dust\n if label != None:\n if (models_fit[0] == 'mbb' and models_fit[1] == 'pow'):\n if (models_in[0] == 'mbb'):\n fig = corner.corner(samples, truths=[dust_params[0], dust_params[1], sync_params[0]],\n labels=[r\"$\\beta_d$\", r\"$T_d$\",r\"$\\alpha_s$\"])\n else :\n fig = corner.corner(samples, labels=[r\"$\\beta_d$\", r\"$T_d$\",r\"$\\alpha_s$\"])\n else :\n print 'Error! Not configured for this plot!'\n exit()\n fig.savefig('triangle_' + label + '.png')\n plt.close('all')\n \n # Run multinest sampler\n #multinest(nu, D_vec, Ninv, beam_mat, ndim, models_fit, label)\n \n return gls_cmb, cmb_chisq, cmb_noise", "def main(**kwargs):\n flowsheet = Flowsheet(name='MB_Model') \n \n # Fix variables\n setInputs(flowsheet) \n\n ts = time.time() \n\n mb = flowsheet.MB_fuel\n \n # Initialize fuel reactor\n flowsheet.MB_fuel._initialize(outlvl=1,\n optarg={\"tol\" : 1e-8,\n \"max_cpu_time\" : 600,\n \"print_level\" : 5,\n \"halt_on_ampl_error\": 'yes'}) \n \n # Create a solver\n opt = SolverFactory('ipopt')\n opt.options = {'tol': 1e-8,\n 'linear_solver' : 'ma27',\n 'bound_push': 1e-8,\n 'max_cpu_time': 600,\n 'print_level': 5}\n \n results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n keepfiles=False)\n\n #flowsheet.MB_fuel.Solid_In_M.fix(691.4)\n #flowsheet.MB_fuel.Gas_In_y['CO2'].fix(0.03999)\n #flowsheet.MB_fuel.Gas_In_y['H2O'].fix(0.00001)\n #flowsheet.MB_fuel.Gas_In_y['CH4'].fix(0.96)\n\n\n\n #results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n # keepfiles=False)\n \n \n print(\"\\n\")\n print(\"----------------------------------------------------------\")\n print('Total simulation time: ', value(time.time() - ts), \" s\")\n print(\"----------------------------------------------------------\")\n\n \n # Print some variables \n #print_summary_fuel_reactor(flowsheet) \n\n # Plot some variables \n #results_plot_fuel_reactor(flowsheet) \n\n m = flowsheet.MB_fuel\n if 'Solid_M' in kwargs:\n m.Solid_In_M.fix(kwargs['Solid_M'])\n if 'Solid_T' in kwargs:\n m.Solid_In_Ts[t].fix(kwargs['Solid_T'])\n if 'Solid_x' in kwargs:\n m.Solid_In_x['Fe2O3'].fix(kwargs['Solid_x']['Fe2O3'])\n m.Solid_In_x['Fe3O4'].fix(kwargs['Solid_x']['Fe3O4'])\n m.Solid_In_x['Al2O3'].fix(kwargs['Solid_x']['Al2O3'])\n if 'Gas_F' in kwargs:\n m.Gas_In_F.fix(kwargs['Gas_F'])\n if 'Gas_P' in kwargs:\n m.Gas_In_P.fix(kwargs['Gas_P'])\n if 'Gas_T' in kwargs:\n m.Gas_In_T.fix(kwargs['Gas_T'])\n if 'Gas_y' in kwargs:\n m.Gas_In_y['CO2'].fix(kwargs['Gas_y']['CO2'])\n m.Gas_In_y['H2O'].fix(kwargs['Gas_y']['H2O'])\n m.Gas_In_y['CH4'].fix(kwargs['Gas_y']['CH4'])\n\n results = opt.solve(flowsheet, tee=True)\n\n with open('ss_fs.txt','w') as f:\n flowsheet.display(ostream=f)\n\n dt_Gflux_CO2 = []\n dt_Gflux_H2O = []\n dt_Gflux_CH4 = []\n dt_Sflux_Fe2O3 = []\n dt_Sflux_Fe3O4 = []\n dt_Sflux_Al2O3 = []\n dt_Ctrans_CO2 = []\n dt_Ctrans_H2O = []\n dt_Ctrans_CH4 = []\n dt_qtrans_Fe2O3 = []\n dt_qtrans_Fe3O4 = []\n dt_qtrans_Al2O3 = []\n dt_Ghflux = []\n dt_Ts = []\n dt_TgGS = []\n dt_TsGS = []\n dt_vg = []\n dt_vs = []\n\n# for z in mb.z.get_finite_elements():\n# if z != mb.z.first() and z != mb.z.last():\n#\n# dt_Gflux_CO2.append( (mb.Cg[z,'CO2'].value-mb.Cg[prev,'CO2'].value)/\\\n# (mb.G_flux[z,'CO2'].value-mb.G_flux[prev,'CO2'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Gflux_H2O.append( (mb.Cg[z,'H2O'].value-mb.Cg[prev,'H2O'].value)/\\\n# (mb.G_flux[z,'H2O'].value-mb.G_flux[prev,'H2O'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Gflux_CH4.append( (mb.Cg[z,'CH4'].value-mb.Cg[prev,'CH4'].value)/\\\n# (mb.G_flux[z,'CH4'].value-mb.G_flux[prev,'CH4'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Ctrans_CO2.append( (mb.Cg[z,'CO2'].value-mb.Cg[prev,'CO2'].value)/\\\n# (mb.Ctrans[z,'CO2'].value)* \\\n# #-mv.Ctrans[prev,'CO2'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Ctrans_H2O.append( (mb.Cg[z,'H2O'].value-mb.Cg[prev,'H2O'].value)/\\\n# (mb.Ctrans[z,'H2O'].value)* \\\n# #-mv.Ctrans[prev,'H2O'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Ctrans_CH4.append( (mb.Cg[z,'CH4'].value-mb.Cg[prev,'CH4'].value)/\\\n# (mb.Ctrans[z,'CH4'].value)* \\\n# #-mv.Ctrans[prev,'CH4'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Sflux_Fe2O3.append( (mb.q[z,'Fe2O3'].value-mb.q[prev,'Fe2O3'].value)/\\\n# (mb.S_flux[z,'Fe2O3'].value-mb.S_flux[prev,'Fe2O3'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_Sflux_Fe3O4.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.S_flux[z,'Fe3O4'].value-mb.S_flux[prev,'Fe3O4'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_Sflux_Al2O3.append( (mb.q[z,'Al2O3'].value-mb.q[prev,'Al2O3'].value)/\\\n# (mb.S_flux[z,'Al2O3'].value-mb.S_flux[prev,'Al2O3'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_qtrans_Fe2O3.append( (mb.q[z,'Fe2O3'].value-mb.q[prev,'Fe2O3'].value)/\\\n# (mb.qtrans[z,'Fe2O3'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe2O3'].value) )\n#\n# dt_qtrans_Fe3O4.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.qtrans[z,'Fe3O4'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe3O4'].value) )\n#\n# dt_qtrans_Al2O3.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.qtrans[z,'Fe3O4'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe3O4'].value) )\n#\n# dt_Ghflux.append( (mb.Tg[z].value-mb.Tg[prev].value)/\\\n# (mb.Gh_flux[z].value-mb.Gh_flux[prev].value)* (z-prev)* mb.eps.value*\\\n# mb.L.value* mb.rho_vap[z].value* mb.cp_gas[z].value /(z-prev)) \n#\n# dt_Ts.append( (z-prev)*(1-mb.eps.value)*mb.L.value/mb.vs.value /(z-prev))\n#\n# dt_TgGS.append( (mb.Tg[z].value - mb.Tg[prev].value)/\\\n# mb.Tg_GS[z].value* mb.eps.value* mb.rho_vap[z].value* mb.cp_gas[z].value \n# /(z-prev))\n# \n# dt_TsGS.append( (mb.Ts[z].value - mb.Ts[prev].value)/\\\n# mb.Tg_GS[z].value* (1-mb.eps.value)* mb.rho_sol.value* mb.cp_sol[z].value*1e-3 \n# /(z-prev))\n# \n# dt_vg.append( mb.L.value*(z-prev)/mb.vg[z].value /(z-prev))\n# \n# dt_vs.append( mb.L.value*(z-prev)/mb.vs.value /(z-prev))\n#\n# prev = z\n#\n# with open('dt.txt','w') as f:\n# f.write('dt_Gflux_CO2\\t')\n# for t in dt_Gflux_CO2:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Gflux_H2O\\t')\n# for t in dt_Gflux_H2O:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Gflux_CH4\\t') \n# for t in dt_Gflux_CH4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Fe2O3\\t') \n# for t in dt_Sflux_Fe2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Fe3O4\\t') \n# for t in dt_Sflux_Fe3O4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Al2O3\\t') \n# for t in dt_Sflux_Al2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ctrans_CO2\\t') \n# for t in dt_Ctrans_CO2:\n# f.write('%1.3f'%t +'\\t')\n# \n# f.write('\\ndt_Ctrans_H2O\\t') \n# for t in dt_Ctrans_H2O:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ctrans_CH4\\t') \n# for t in dt_Ctrans_CH4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Fe2O3\\t') \n# for t in dt_qtrans_Fe2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Fe3O4\\t') \n# for t in dt_qtrans_Fe3O4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Al2O3\\t') \n# for t in dt_qtrans_Al2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ghflux\\t') \n# for t in dt_Ghflux:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ts\\t\\t') \n# for t in dt_Ts:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_TgGS\\t\\t') \n# for t in dt_TgGS:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_TsGS\\t\\t') \n# for t in dt_TsGS:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_vg\\t\\t') \n# for t in dt_vg:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_vs\\t\\t') \n# for t in dt_vs:\n# f.write('%1.3f'%t +'\\t')\n\n\n # Store the flowsheet \n return flowsheet", "def cli_simulate(model_file, output_dir, exporter, overwrite, compression,\n confirm, progress: int, progress_tag, output_same,\n simtime_total, simtime_lims, max_sweeps, max_residual, fipy_solver,\n snapshot_interval,\n plot, video, frames, budget, resume, show_eqns):\n\n click.secho('Starting MicroBenthos simulation', fg='green')\n from microbenthos.utils import yaml\n\n click.echo('Loading model from {}'.format(model_file))\n with open(model_file, 'r') as fp:\n defs = yaml.unsafe_load(fp)\n\n if 'model' not in defs and 'domain' in defs:\n # model is not under a separate key, so insert it under \"model\"\n defs = dict(model=defs)\n\n if 'simulation' not in defs:\n defs['simulation'] = {}\n\n # we want to override the keys in the loaded simulation dictionary,\n # so that when it is created the definition stored on the instance and\n # eventually exported to file includes these user overrides\n\n sim_kwargs = dict(\n simtime_total=simtime_total,\n fipy_solver=fipy_solver,\n max_sweeps=max_sweeps,\n simtime_lims=simtime_lims,\n max_residual=max_residual,\n snapshot_interval=snapshot_interval,\n )\n for k, v in sim_kwargs.items():\n if v is None:\n continue\n else:\n defs['simulation'][k] = v\n\n if output_same:\n output_dir = str(Path(model_file).parent)\n click.secho(f'Output directory set to: {output_dir}')\n\n from microbenthos.runners import SimulationRunner\n runner = SimulationRunner(output_dir=output_dir,\n model=defs['model'],\n simulation=defs['simulation'],\n resume=resume,\n overwrite=overwrite,\n confirm=confirm,\n progress=progress,\n progress_tag=progress_tag,\n plot=plot,\n video=video,\n frames=frames,\n budget=budget,\n exporters=exporter,\n show_eqns=show_eqns)\n\n if not runner.get_data_exporters():\n click.secho('No data exporters defined. Adding with compression={}'.format(\n compression), fg='red')\n runner.add_exporter('model_data', output_dir=runner.output_dir,\n compression=compression)\n\n runner.run()", "def runModel(quickLogger,\n\t base,\n modelFile=\"\",\n\t irfs=\"P7SOURCE_V6\",\n run=True):\n \n if(modelFile):\n model = modelFile\n else:\n model = base+\"_likeMinuit.xml\"\n\n\n try:\n checkForFiles(quickLogger,\n [base+\"_srcMaps.fits\",\n model,\n base+\"_ltcube.fits\",\n base+\"_BinnedExpMap.fits\"])\n except(FileNotFound):\n quickLogger.critical(\"One or more needed files do not exist.\")\n return\n\n model_map['srcmaps'] = base+\"_srcMaps.fits\"\n model_map['srcmdl'] = model\n model_map['outfile'] = base+\"_modelMap.fits\"\n model_map['expcube'] = base+\"_ltcube.fits\"\n model_map['irfs'] = irfs\n model_map['bexpmap'] = base+\"_BinnedExpMap.fits\"\n \n runCommand(model_map,quickLogger,run)", "def test_FEMM_periodicity_time_no_periodicity_a():\n\n SPMSM_015 = load(join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"))\n\n assert SPMSM_015.comp_periodicity() == (9, False, 9, True)\n\n simu = Simu1(name=\"test_FEMM_periodicity_time_no_periodicity_a\", machine=SPMSM_015)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 9,\n Nt_tot=4 * 9,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=False,\n is_periodicity_t=True,\n nb_worker=cpu_count(),\n Kmesh_fineness=2,\n )\n simu.force = ForceMT()\n\n # Definition of the magnetic simulation: no periodicity\n simu2 = simu.copy()\n simu2.mag.is_periodicity_t = False\n\n # Run simulations\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu2)\n simu2.run()\n\n # Plot the result\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[1]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber=[0,100]\",\n \"time[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_space_fft.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"freqs\",\n \"angle[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_fft2.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Tem.plot_2D_Data(\n \"time\",\n data_list=[out2.mag.Tem],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Tem_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Phi_wind_stator.plot_2D_Data(\n \"time\",\n \"phase\",\n data_list=[out2.mag.Phi_wind_stator],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Phi_wind_stator_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n Bflux = out.mag.B\n arg_list = [\"time\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n time = result[\"time\"]\n\n Bflux2 = out2.mag.B\n arg_list = [\"time\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n time2 = result2[\"time\"]\n\n # Compare both simu\n assert_array_almost_equal((Brad - Brad2) / Brad2, 0, decimal=2)\n assert_array_almost_equal(time, time2, decimal=6)\n\n AGSF = out.force.AGSF\n arg_list = [\"time\"]\n result_AGSF = AGSF.get_rphiz_along(*arg_list)\n Prad = result_AGSF[\"radial\"]\n time3 = result_AGSF[\"time\"]\n\n AGSF2 = out2.force.AGSF\n arg_list = [\"time\"]\n result_AGSF2 = AGSF2.get_rphiz_along(*arg_list)\n Prad2 = result_AGSF2[\"radial\"]\n time4 = result_AGSF2[\"time\"]\n\n # Compare both simu\n assert_array_almost_equal((Prad - Prad2) / Prad2, 0, decimal=2)\n assert_array_almost_equal(time3, time4, decimal=6)\n\n return out, out2", "def test_FEMM_periodicity_time():\n\n SPMSM_015 = load(join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"))\n\n assert SPMSM_015.comp_periodicity() == (9, False, 9, True)\n\n simu = Simu1(name=\"test_FEMM_periodicity_time\", machine=SPMSM_015)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 9,\n Nt_tot=4 * 9,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=True,\n is_periodicity_t=True,\n nb_worker=cpu_count(),\n Kmesh_fineness=2,\n )\n simu.force = ForceMT()\n\n # Definition of the magnetic simulation: no periodicity\n simu2 = simu.copy()\n simu2.mag.is_periodicity_t = False\n\n # Run simulations\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu2)\n simu2.run()\n\n # Plot the result\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[1]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber=[0,100]\",\n \"time[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_space_fft.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"freqs\",\n \"angle[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_fft2.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Tem.plot_2D_Data(\n \"time\",\n data_list=[out2.mag.Tem],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Tem_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Phi_wind_stator.plot_2D_Data(\n \"time\",\n \"phase\",\n data_list=[out2.mag.Phi_wind_stator],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Phi_wind_stator_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n # Compare both simu with B\n Bflux = out.mag.B\n arg_list = [\"time\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n time = result[\"time\"]\n\n # Check Flux spatio-temporal reconstruction full\n Bflux2 = out2.mag.B\n arg_list = [\"time\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n time = result2[\"time\"]\n\n assert_array_almost_equal(Brad, Brad2, decimal=2)\n\n # Compare both simu with AGSF\n AGSF = out.force.AGSF\n arg_list = [\"time\"]\n result_AGSF = AGSF.get_rphiz_along(*arg_list)\n Prad = result_AGSF[\"radial\"]\n time = result_AGSF[\"time\"]\n\n AGSF2 = out2.force.AGSF\n arg_list = [\"time\"]\n result_AGSF2 = AGSF2.get_rphiz_along(*arg_list)\n Prad2 = result_AGSF2[\"radial\"]\n time = result_AGSF2[\"time\"]\n\n assert_array_almost_equal(Prad / 1000, Prad2 / 1000, decimal=0)\n\n return out, out2", "def run(self):\n\n self._logger.debug(\"Starting Dummy Model: modelID=%s;\" % (self._modelID))\n\n # =========================================================================\n # Initialize periodic activities (e.g., for model result updates)\n # =========================================================================\n periodic = self._initPeriodicActivities()\n\n self._optimizedMetricLabel = self._optimizeKeyPattern\n self._reportMetricLabels = [self._optimizeKeyPattern]\n\n # =========================================================================\n # Create our top-level loop-control iterator\n # =========================================================================\n if self._iterations >= 0:\n iterTracker = iter(xrange(self._iterations))\n else:\n iterTracker = iter(itertools.count())\n\n # =========================================================================\n # This gets set in the unit tests. It tells the worker to sys exit\n # the first N models. This is how we generate orphaned models\n doSysExit = False\n if self._sysExitModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._sysExitModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n doSysExit = True\n\n if self._delayModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._delayModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n time.sleep(10)\n \n # DEBUG!!!! infinite wait if we have 50 models\n #if len(modelIDs) >= 50:\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n # while not jobCancel:\n # time.sleep(1)\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n\n if self._errModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._errModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n raise RuntimeError(\"Exiting with error due to errModelRange parameter\")\n\n # =========================================================================\n # Delay, if necessary\n if self._delay is not None:\n time.sleep(self._delay)\n\n # =========================================================================\n # Run it!\n # =========================================================================\n self._currentRecordIndex = 0\n while True:\n\n # =========================================================================\n # Check if the model should be stopped\n # =========================================================================\n\n # If killed by a terminator, stop running\n if self._isKilled:\n break\n\n # If job stops or hypersearch ends, stop running\n if self._isCanceled:\n break\n\n # If model is mature, stop running ONLY IF we are not the best model\n # for the job. Otherwise, keep running so we can keep returning\n # predictions to the user\n if self._isMature:\n if not self._isBestModel:\n self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED\n break\n else:\n self._cmpReason = self._jobsDAO.CMPL_REASON_EOF\n\n # =========================================================================\n # Get the the next record, and \"write it\"\n # =========================================================================\n try:\n self._currentRecordIndex = next(iterTracker)\n except StopIteration:\n break\n\n # \"Write\" a dummy output value. This is used to test that the batched\n # writing works properly\n\n self._writePrediction(ModelResult(None, None, None, None))\n\n periodic.tick()\n\n # =========================================================================\n # Compute wait times. See if model should exit\n # =========================================================================\n\n if self.__shouldSysExit(self._currentRecordIndex):\n sys.exit(1)\n\n # Simulate computation time\n if self._busyWaitTime is not None:\n time.sleep(self._busyWaitTime)\n self.__computeWaitTime()\n\n # Asked to abort after so many iterations?\n if doSysExit:\n sys.exit(1)\n\n # Asked to raise a jobFailException?\n if self._jobFailErr:\n raise utils.JobFailException(\"E10000\",\n \"dummyModel's jobFailErr was True.\")\n\n # =========================================================================\n # Handle final operations\n # =========================================================================\n if self._doFinalize:\n if not self._makeCheckpoint:\n self._model = None\n\n # Delay finalization operation\n if self._finalDelay is not None:\n time.sleep(self._finalDelay)\n\n self._finalize()\n\n self._logger.info(\"Finished: modelID=%r \"% (self._modelID))\n\n return (self._cmpReason, None)", "def _backend_run(self):\n if self.vM is not None:\n return self.vM\n else:\n everything = copy.copy(self.model.attrs)\n if hasattr(self,'Iext'):\n everything.update({'Iext':self.Iext})\n\n if 'current_inj' in everything.keys():\n everything.pop('current_inj',None)\n everything = copy.copy(self.model.attrs)\n\n self.model.attrs['celltype'] = round(self.model.attrs['celltype'])\n if self.model.attrs['celltype'] <= 3:\n everything.pop('celltype',None)\n v = get_vm_matlab_one_two_three(**everything)\n else:\n if self.model.attrs['celltype'] == 4:\n v = get_vm_matlab_four(**everything)\n if self.model.attrs['celltype'] == 5:\n v = get_vm_matlab_five(**everything)\n if self.model.attrs['celltype'] == 6:\n v = get_vm_matlab_six(**everything)\n if self.model.attrs['celltype'] == 7:\n #print('gets into multiple regimes',self.attrs['celltype'])\n\n v = get_vm_matlab_seven(**everything)\n\n return AnalogSignal(v, units=pq.mV,\n sampling_period=0.125*pq.ms)", "def initialize_model(self, config_param_vals = None):\n self._is_initialized = True\n\n self.fmu.instantiate()\n self.fmu.reset()\n self.fmu.setupExperiment(startTime=self.start_time)\n if config_param_vals is not None:\n self._apply_config(config_param_vals)\n self.fmu.enterInitializationMode()\n self.fmu.exitInitializationMode()\n\n return", "def setup_biosafe(self):\n # Generate dummy data in the right format\n species_presence = pd.DataFrame(\n np.random.randint(2, size=len(self.links_law)),\n columns=['speciesPresence'], index=self.links_law.index)\n\n ecotope_area = pd.DataFrame(\n np.ones(len(self.links_eco2.columns)-1) * 1e5,\n columns = ['area_m2'],\n index = self.links_eco2.columns.values[0:-1])\n\n # Simplify ecotope tables to VR ecotopes\n unique_eco = np.unique(\n np.hstack((self.vr_eco.ecotope1.values,\n self.vr_eco.ecotope2.values)))\n links_eco3 = self.links_eco2.reindex(columns=unique_eco)\n ecotope_area = ecotope_area.reindex(index=unique_eco)\n\n # Run a first version of Biosafe\n self.bsf_model = bsf.biosafe(\n self.legal_weights, self.links_law, links_eco3,\n species_presence, ecotope_area)\n\n #PotTax = self.bsf_model.TFI()\n #PotAll = self.bsf_model.FI()\n return", "def build_bpmf_model(train, alpha=2, dim=10, std=0.01):\n n, m = train.shape\n beta_0 = 1 # scaling factor for lambdas; unclear on its use\n\n # Mean value imputation on training data.\n train = train.copy()\n nan_mask = np.isnan(train)\n train[nan_mask] = train[~nan_mask].mean()\n\n logging.info('building the BPMF model')\n with pm.Model() as bpmf:\n # Specify user feature matrix\n lambda_u = pm.Wishart(\n 'lambda_u', n=dim, V=np.eye(dim), shape=(dim, dim),\n testval=np.random.randn(dim, dim) * std)\n mu_u = pm.Normal(\n 'mu_u', mu=0, tau=beta_0 * lambda_u, shape=dim,\n testval=np.random.randn(dim) * std)\n U = pm.MvNormal(\n 'U', mu=mu_u, tau=lambda_u, shape=(n, dim),\n testval=np.random.randn(n, dim) * std)\n\n # Specify item feature matrix\n lambda_v = pm.Wishart(\n 'lambda_v', n=dim, V=np.eye(dim), shape=(dim, dim),\n testval=np.random.randn(dim, dim) * std)\n mu_v = pm.Normal(\n 'mu_v', mu=0, tau=beta_0 * lambda_v, shape=dim,\n testval=np.random.randn(dim) * std)\n V = pm.MvNormal(\n 'V', mu=mu_v, tau=lambda_v, shape=(m, dim),\n testval=np.random.randn(m, dim) * std)\n\n # Specify rating likelihood function\n R = pm.Normal(\n 'R', mu=t.dot(U, V.T), tau=alpha * np.ones((n, m)),\n observed=train)\n\n logging.info('done building the BPMF model')\n return bpmf", "def run_libfm(train, test, iter=20, std=0.2, dim=8, bias=False,\n outfile=''):\n kwargs = {k: v for k, v in locals().items() if not k in ['train', 'test']}\n args = compose_libfm_args(train, test, **kwargs)\n cmd = ' '.join(args)\n logging.debug(cmd)\n\n proc = sub.Popen(cmd, shell=True, stdout=sub.PIPE)\n retcode = proc.wait()\n if retcode:\n raise LibFMFailed(\"libFM failed to execute.\\n%s\" % cmd)\n\n output = proc.communicate()[0]\n lines = output.split('\\n')\n rows = [row.split('\\t')[1:] for row in lines[-iter:] if row]\n train_err = '%.6f' % float(rows[-1][0].split('=')[1])\n test_err = '%.6f' % float(rows[-1][1].split('=')[1])\n return [train_err, test_err]", "def run(mu_v, Sigma_w, Sigma_z, a_mu, l_sensor):\n N = 1000\n # Init tracking\n mu_x = np.zeros(N) # Belief or estimation of hidden state \n F = np.zeros(N) # Free Energy of AI neuron\n mu_y = np.zeros(N) # Belief or prediction of sensory signal \n x = np.zeros(N) # True hidden state\n y = np.zeros(N) # Sensory signal as input to AI neuron\n\n robot_brain = pp_unit(dt, mu_v, Sigma_w, Sigma_z, a_mu) #make pp object\n \n \n\n start_time = time.time()\n for i in np.arange(1, N):\n #Active inference\n y[i] = l_sensor.ambient_light_intensity #take sensor reading\n print('light reading', y[i])\n F[i], mu_x[i], mu_y[i] = robot_brain.inference_step(i, mu_v, y[i])\n\n\n t_elapsed = time.time() - start_time\n\n print(\"Elapsed Time\", t_elapsed, \"sec\")\n return F, mu_x, mu_y, x, y", "def test_flim_model(datadir):\n # ifuslot_063\n filename = datadir.join(\"test_hdf.h5\").strpath\n hdcon1 = SensitivityCubeHDF5Container(filename, flim_model=\"hdr1\")\n hdcon2 = SensitivityCubeHDF5Container(filename, flim_model=\"hdr2pt1\")\n\n scube1 = hdcon1.extract_ifu_sensitivity_cube(\"ifuslot_063\")\n scube2 = hdcon2.extract_ifu_sensitivity_cube(\"ifuslot_063\")\n\n s1 = scube1.get_f50(161.4201, 50.8822, 3470.0, 5.5)\n s2 = scube2.get_f50(161.4201, 50.8822, 3470.0, 5.5)\n\n print(s1)\n # if different models passed should be different\n assert abs(s1 - s2) > 1e-19", "def test_pregenerated_model(sub_test, case):\n\n if case.startswith(\"sensi2\"):\n model_name = sub_test + \"_o2\"\n else:\n model_name = sub_test\n\n model_swig_folder = str(\n Path(__file__).parents[2]\n / \"build\"\n / \"tests\"\n / \"cpp\"\n / f\"external_{model_name}-prefix\"\n / \"src\"\n / f\"external_{model_name}-build\"\n / \"swig\"\n )\n\n test_model_module = amici.import_model_module(\n module_name=model_name, module_path=model_swig_folder\n )\n model = test_model_module.getModel()\n solver = model.getSolver()\n amici.readModelDataFromHDF5(\n options_file, model.get(), f\"/{sub_test}/{case}/options\"\n )\n amici.readSolverSettingsFromHDF5(\n options_file, solver.get(), f\"/{sub_test}/{case}/options\"\n )\n\n edata = None\n if \"data\" in expected_results[sub_test][case].keys():\n edata = amici.readSimulationExpData(\n str(expected_results_file), f\"/{sub_test}/{case}/data\", model.get()\n )\n rdata = amici.runAmiciSimulation(model, solver, edata)\n\n check_derivative_opts = dict()\n\n if model_name == \"model_nested_events\":\n check_derivative_opts[\"rtol\"] = 1e-2\n elif model_name == \"model_events\":\n check_derivative_opts[\"atol\"] = 1e-3\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n and not model_name.startswith(\"model_neuron\")\n and not case.endswith(\"byhandpreeq\")\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n verify_simulation_opts = dict()\n\n if model_name.startswith(\"model_neuron\"):\n verify_simulation_opts[\"atol\"] = 1e-5\n verify_simulation_opts[\"rtol\"] = 1e-2\n\n if model_name.startswith(\"model_robertson\") and case == \"sensiforwardSPBCG\":\n verify_simulation_opts[\"atol\"] = 1e-3\n verify_simulation_opts[\"rtol\"] = 1e-3\n\n verify_simulation_results(\n rdata, expected_results[sub_test][case][\"results\"], **verify_simulation_opts\n )\n\n if model_name == \"model_steadystate\" and case == \"sensiforwarderrorint\":\n edata = amici.amici.ExpData(model.get())\n\n # Test runAmiciSimulations: ensure running twice\n # with same ExpData yields same results\n if (\n edata\n and model_name != \"model_neuron_o2\"\n and not (model_name == \"model_robertson\" and case == \"sensiforwardSPBCG\")\n ):\n if isinstance(edata, amici.amici.ExpData):\n edatas = [edata, edata]\n else:\n edatas = [edata.get(), edata.get()]\n\n rdatas = amici.runAmiciSimulations(\n model, solver, edatas, num_threads=2, failfast=False\n )\n verify_simulation_results(\n rdatas[0],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n verify_simulation_results(\n rdatas[1],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n\n # test residuals mode\n if solver.getSensitivityMethod() == amici.SensitivityMethod.adjoint:\n with pytest.raises(RuntimeError):\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n else:\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"res\", \"sres\", \"y\", \"sy\", \"sigmay\", \"ssigmay\"],\n **verify_simulation_opts,\n )\n with pytest.raises(RuntimeError):\n solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n\n chi2_ref = rdata.chi2\n\n # test likelihood mode\n solver.setReturnDataReportingMode(amici.RDataReporting.likelihood)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"llh\", \"sllh\", \"s2llh\", \"FIM\"],\n **verify_simulation_opts,\n )\n\n # test sigma residuals\n\n if (\n model_name == \"model_jakstat_adjoint\"\n and solver.getSensitivityMethod() != amici.SensitivityMethod.adjoint\n ):\n model.setAddSigmaResiduals(True)\n solver.setReturnDataReportingMode(amici.RDataReporting.full)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether activation changes chi2\n assert chi2_ref != rdata.chi2\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n chi2_ref = rdata.chi2\n res_ref = rdata.res\n\n model.setMinimumSigmaResiduals(100)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether changing the minimum changes res but not chi2\n assert np.isclose(chi2_ref, rdata.chi2)\n assert not np.allclose(res_ref, rdata.res)\n\n model.setMinimumSigmaResiduals(-10)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether having a bad minimum results in nan chi2\n assert np.isnan(rdata.chi2)\n\n with pytest.raises(RuntimeError):\n model.getParameterByName(\"thisParameterDoesNotExist\")", "def setUp(self):\n # Set Model Parameters\n odeparam = np.array([1, 2])\n y0, y0_unc = 1.0, 0 \n t0, tmax = 0.0, 1.25\n\n # Set Method Parameters\n q = 1\n h = 0.1\n\n # Set up and solve ODE\n ibm = statespace.IBM(q=q, dim=1)\n solver = linsolve.LinearisedODESolver(ibm)\n ivp = linode.LogisticODE(t0, tmax, odeparam, y0, y0_unc)\n tsteps, means, __, rhs_parts, uncerts = solver.solve(ivp, stepsize=h)\n self.mean = odesolver.get_trajectory(means, 0, 0)\n\n # Set up BM and IBM covariance matrices\n evalpt = np.array(tsteps[[-1, -10]])\n derdat = (tsteps, rhs_parts, 0.)\n\n const, jacob = linearisation.compute_linearisation(\n ssm=ibm, initial_value=y0,\n derivative_data=derdat, prdct_tsteps=evalpt)\n\n # Compute GP Estimation of filter mean at t=tmax\n self.postmean = const + np.dot(jacob, odeparam)", "def test_FEMM_periodicity_angle():\n\n SPMSM_015 = load(join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"))\n\n assert SPMSM_015.comp_periodicity() == (9, False, 9, True)\n\n simu = Simu1(name=\"test_FEMM_periodicity_angle\", machine=SPMSM_015)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 9,\n Nt_tot=4 * 9,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=1,\n type_BH_rotor=1,\n is_periodicity_a=True,\n is_periodicity_t=False,\n nb_worker=cpu_count(),\n Kmesh_fineness=2,\n )\n simu.force = ForceMT()\n\n # Definition of the magnetic simulation: no periodicity\n # Definition of the magnetic simulation: no periodicity\n simu2 = simu.copy()\n simu2.mag.is_periodicity_a = False\n\n simu2.force = ForceMT()\n\n # Run simulations\n out = Output(simu=simu)\n simu.run()\n\n out2 = Output(simu=simu2)\n simu2.run()\n\n # Plot the result\n out.mag.B.plot_2D_Data(\n \"time\",\n \"angle[0]{°}\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.B.plot_2D_Data(\n \"angle{°}\",\n \"time[1]\",\n data_list=[out2.mag.B],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_B_space.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber=[0,100]\",\n \"time[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_space_fft.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.force.AGSF.plot_2D_Data(\n \"freqs\",\n \"angle[0]\",\n data_list=[out2.force.AGSF],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_P_fft2.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Tem.plot_2D_Data(\n \"time\",\n data_list=[out2.mag.Tem],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Tem_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n out.mag.Phi_wind_stator.plot_2D_Data(\n \"time\",\n \"phase\",\n data_list=[out2.mag.Phi_wind_stator],\n legend_list=[\"Periodic\", \"Full\"],\n save_path=join(save_path, simu.name + \"_Phi_wind_stator_time.png\"),\n is_show_fig=False,\n **dict_2D\n )\n\n # Compare both simu\n Bflux = out.mag.B\n arg_list = [\"angle\"]\n result = Bflux.get_rphiz_along(*arg_list)\n Brad = result[\"radial\"]\n angle = result[\"angle\"]\n\n Bflux2 = out2.mag.B\n arg_list = [\"angle\"]\n result2 = Bflux2.get_rphiz_along(*arg_list)\n Brad2 = result2[\"radial\"]\n\n assert_array_almost_equal(Brad, Brad2, decimal=1)\n\n return out, out2", "def run_model(species, Av, Air, rh, obliquity, temp=-1, verbosity=1):\n\n if isinstance(species, int):\n if species in range(1, len(speciesList) + 1):\n # The species indexing if offset by +1 to make the inputs consistent with the original fortran script.\n species = speciesList[species - 1]\n else:\n logging.error(f\"The inputted index of {species} is not currently supported \\n\"\n f\"Please input one of the following integers or strings: \\n\"\n f\"1: 'H2O', 2: 'H2O-CH4', 3: 'CO2', 4: 'CO'\")\n raise ValueError('Invalid species.')\n if species not in speciesList:\n logging.error(f\"The inputted species of \\\"{species}\\\" is not currently supported \\n\"\n f\"Please input one of the following integers or strings: \\n\"\n f\"1: 'H2O', 2: 'H2O-CH4', 3: 'CO2', 4: 'CO'\")\n raise ValueError('Invalid species.')\n\n if Av < 0:\n logging.error(\n f'A visual albedo of {Av} is not a valid input.'\n ' Please input a value greater than 0.')\n raise ValueError('Invalid visual albedo.')\n\n if verbosity == 0:\n logging.basicConfig(level='WARNING')\n elif verbosity == 1:\n logging.basicConfig(level='INFO')\n else:\n logging.basicConfig(level='DEBUG')\n\n logging.info(\"Input Parameters:\")\n logging.info(\n f'Species = {species}, Avis = {Av}, Air = {Air}, r_H = {rh}, Obl = {obliquity}')\n\n incl = (90 - obliquity) * math.pi / 180\n\n mass, xlt, xltprim, press, pprim, temp = sublime(species, temp)\n root = 1 / math.sqrt(mass * 2 * math.pi * boltz)\n\n nflag = 1\n perc = 0\n gd = None\n for n in range(0, nb):\n temp, gd, perc, nflag = main_loop(\n n, species, Av, Air, rh, obliquity, incl, temp, root, nflag, perc, gd)\n\n zbar = 0.\n for nn in range(0, nb - 1):\n zbar = zbar + 0.5 * (z[nn] + z[nn + 1]) * delsb\n\n zbar = zbar / 2\n zlog = math.log10(zbar)\n rlog = math.log10(rh)\n\n output = {\n \"species\": species,\n \"obliquity\": obliquity,\n \"r_H\": rh,\n \"rlog\": rlog,\n \"Av\": Av,\n \"Air\": Air,\n \"Zbar\": zbar,\n \"Zlog\": zlog,\n }\n\n logging.info(\"Final Results:\")\n logging.info(output)\n\n return output", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0" ]
[ "0.629378", "0.6192165", "0.60618526", "0.6044517", "0.60348445", "0.6019483", "0.5948904", "0.5907536", "0.5832203", "0.58019847", "0.572914", "0.5647409", "0.5641304", "0.5582606", "0.55634594", "0.55448914", "0.5505208", "0.5491149", "0.54623306", "0.5438175", "0.53968614", "0.53924066", "0.53631943", "0.53469366", "0.5322211", "0.5309065", "0.5281697", "0.5276423", "0.527359", "0.5273153" ]
0.70113957
0
Close model and remove unzipped model from temporary folder.
def close_model(self): # Ensure model has been initialized at least once self._model_has_been_initialized("close_model") # terminate fmu model # - avoids error from calling self.fmu.terminate if termination has already been performed self._terminate_model() # free fmu self.fmu.freeInstance() # clean up # [TODO] enforce clean up even when exceptions are thrown, or after keyboard interruption shutil.rmtree(self.unzipdir, ignore_errors=True) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None", "def cleanUp(self):\r\n # Close any open models\r\n openModels = getAllModels()\r\n if len(openModels):\r\n for model in openModels:\r\n setCurrentModel(model)\r\n performAction(\"FileClose\")\r\n # Wait \r\n time.sleep(1)", "def __exit__(self, exc_type, exc_value, traceback):\n if self.cleanup_model_file:\n os.unlink(self.model_file)", "def delete_best_model(self):\n if self.best_model_path.exists():\n # not using `missing_ok=True` because we are running this code on pythin 3.7\n self.best_model_path.unlink()", "def close(self):\n\n sp.call([\"convert\", \"{}_*\".format(self.tmp_prefix),\n self.filename])\n\n sp.call(\"rm {}_*\".format(self.tmp_prefix), shell=True)\n sp.call([\"rmdir\", self.tmp_dir])", "def tearDown(self):\n self.model.close()\n os.remove(self.DATABASE_PATH)", "def cleanup(self):\n if os.path.exists(f\"{self.save_path}{self.name}\"):\n shutil.rmtree(f\"{self.save_path}{self.name}\")", "def dispose(self):\n rmtree(self._temp_path)", "def shutdown(self):\n del self.model\n del self.train_dataset\n del self.test_dataset", "def tearDownClass(self):\n remove('temp_mol_file.csv')", "def shutdown(self):\n path = self.opt.get('model_file', None)\n if path is not None:\n self.save(path + '.shutdown_state')\n super().shutdown()", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def clean_up(model_path):\n cmds = [\"rm */grad*.pickle\",\n \"rm -r checkpoints\",\n \"rm */train_len\",\n \"rm log_human_read.csv\",\n \"rm */log_human_read.csv\",\n \"rm -r best_model\",\n \"rm */*epoch*\"]\n\n for cmd in cmds:\n os.system(\"cd {} && {}\".format(model_path, cmd))", "def teardown(self):\n super(TestCisObjInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def teardown(self):\n super(TestCisPickleInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def clean(self):\n\t\tself.archiver.closeFile()", "def tearDown(self) -> None:\n\n self.temp_env_file.close()\n os.remove(self.temp_env_file.name)\n\n del self.temp_env_file\n del self.test_name\n del self.helper", "def close(self):\n\t\tif os.path.exists(self.name): os.system(\"rm -rf %s\" % (self.name))\n\t\tos.system(\"mkdir %s\" % (self.name))\n\t\told_path = os.getcwd()\n\t\tos.chdir(self.name)\n\t\tfor i in self.objects.keys():\n\t\t\t# dill will taken care of down the line\n\t\t\tpickled_object(self.objects[i], name = i,\n\t\t\t\tdefault = self._default).save()\n\t\tos.chdir(old_path)", "def teardown(self):\n super(TestCisPickleOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def __deleteSave(self) -> None:\n os.remove(self.save_location)", "def tearDown(self):\n self.db.close()\n self.dbfile.close()\n os.unlink(self.path)", "def cleanup(self):\r\n if self.tempDirectory != None:\r\n shutil.rmtree(self.tempDirectory, True)\r\n self.tempDirectory = None", "def tearDown(self):\n utils.rm_rf(TMP_DIR_PATH)", "def unload_model(app: FastAPI) -> None:\n\n logging.info(\"Shuting down the app\")\n app.state.model = None", "def shutdown(self):\n path = self.opt.get('model_file', None)\n if path is not None and hasattr(self, 'optimizer'):\n self.save(path + '.shutdown_state')\n super().shutdown()", "def close(self):\r\n if self._session:\r\n self._session.close()\r\n self._session = None\r\n try:\r\n self._writer.remove_file()\r\n self._reader.remove_file()\r\n except Oct2PyError:\r\n pass", "def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)", "def teardown(self):\n super(TestCisObjOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)" ]
[ "0.71848893", "0.7168014", "0.69020855", "0.6778169", "0.66447943", "0.6597088", "0.63135016", "0.6312071", "0.63115525", "0.62721944", "0.6262568", "0.6260771", "0.6243184", "0.61231935", "0.60675275", "0.60634017", "0.6029513", "0.59984267", "0.5996684", "0.599638", "0.5995369", "0.5994043", "0.59669465", "0.591555", "0.5895086", "0.5891867", "0.5832516", "0.5832041", "0.58318305", "0.5831473" ]
0.7889111
0
Get a list of all variables in the sim (removing duplicates, if any). Note, list is kept the same from first time this method is called.
def get_all_var_names(self): if hasattr(self, "all_var_names"): return self.all_var_names # Append all variables in model (defined in YAML). aux_all_var_names = [] aux_all_var_names.extend(self.sim_config_params) aux_all_var_names.extend(self.sim_inputs) aux_all_var_names.extend(self.sim_outputs) aux_all_var_names.extend(self.sim_other_vars) # Remove duplicates (if any) -- Keeping initial order all_var_names = [aux_all_var_names[i] for i in range(len(aux_all_var_names)) \ if aux_all_var_names[i] not in aux_all_var_names[:i]] # Store for following calls self.all_var_names = all_var_names return self.all_var_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_variables(self):\n return []", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]", "def get_all_variables(self):\n return [self.item]", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def get_vars(self):\n return [self.mu, self.var]", "def variables(self):\n return sorted(set(self._variables))", "def getVariables(self):\n return [x for x in self.variables.values() if x is not None]", "def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars", "def get_all_variables(self):\n return self.item.get_all_variables()", "def variables(self):\n return {u for u in self if u.type == 'var'}", "def get_variables(self):\n return [self.g_t, self.m_t]", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def variables(self):\n return [term.variable for term in self.terms]", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def get_variables(self):\n return set(self._head_vars)", "def marginals(self):\n all_variables = [None for ii in range(self.nvars)]\n for ii in range(self.nunique_vars):\n for jj in self.unique_variable_indices[ii]:\n all_variables[jj] = self.unique_variables[ii]\n return all_variables", "def to_list(self):\n return copy.deepcopy(self._varvals)", "def get_variables(self):\n\n self._enforce_coupling()\n\n dv = []\n for scenario in self.scenarios:\n if scenario.group_master:\n dv.extend(scenario.active_variables())\n else:\n dv.extend(scenario.uncoupled_variables())\n\n for body in self.bodies:\n if body.group_master:\n dv.extend(body.active_variables())\n else:\n dv.extend(body.uncoupled_variables())\n\n return dv", "def get_variables_list(self):\n variables = self.variables.values()\n # handle reference variables\n for variable in variables:\n name = variable['name']\n if name in self.references:\n variable['data'] = self.references[name]\n return variables", "def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))", "def get_variables(self):\n\t\treturn self.variables", "def get_all_variables(self):\n raise NotImplementedError()", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def get_all_variables(self):\n return self._properties.copy()", "def setOfVariables(self):\n return set(self.dictOfVariables().keys())", "def get_all_variables(self):\n return self.start.get_all_variables() + self.end.get_all_variables()", "def get_all_variables_names(self):\n return self.project.get_variable_names() + self.design.get_variable_names()" ]
[ "0.7587621", "0.74283415", "0.74283415", "0.74283415", "0.74034715", "0.73497486", "0.73163354", "0.71726215", "0.7025907", "0.6996247", "0.6937989", "0.6923813", "0.67796665", "0.6772739", "0.67677814", "0.67526543", "0.6719864", "0.6713113", "0.6711427", "0.67106885", "0.6696938", "0.6681457", "0.6674172", "0.66414714", "0.66307503", "0.6598537", "0.6590695", "0.65682864", "0.6561983", "0.65334386" ]
0.7627399
0
Get var indices for each var name provided in list.
def _var_names_to_indices(self, var_names: List): if type(var_names) is not type([]): # Return empty array if input is not 'list' type print("[_var_names_to_indices] Provided input is not of type list.") return [] indices_array = [] names_array = [] for name in var_names: if name not in self.vars_to_idx.keys(): print("[_var_names_to_indices] Invalid variable name '{}' has been skipped.".format(name)) continue indices_array.append(self.vars_to_idx[name]) names_array.append(name) if not len(var_names) > 0: print("[_var_names_to_indices] No (valid) states have been provided.") return indices_array, names_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j", "def index(self, variables):\n return [self._variables.index(v) for v in variables]", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def vars(svars):\n return np.array([pm.var(var) for var in svars.split()])", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def index(self, varname):\n if not isinstance(varname, str):\n raise TypeError(\"argument must be str\")\n varname = self._find_vars(varname, empty_ok=False, single=True)[0]\n return self._varlist.index(varname)", "def coord_indices_of(self, v_list):\n return [self.coord_index_of(v) for v in v_list]", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n return ret_list", "def get_predicate_indices(tags: List[str]) -> List[int]:\n return [ind for ind, tag in enumerate(tags) if \"V\" in tag]", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def get_varnams(self, varnam_list):\n self.varnams = [[v.name, v.composer, v.talam] for v in varnam_list\n if v.raga == self.name]", "def map_to_scope(var_list):\n return {var.op.name.split('/', 1)[1]: var for var in var_list}", "def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n elif prefix in varname:\r\n ret_list.append(var)\r\n return ret_list", "def getOqiVarNames( self ):\n\n if self.oqiVarNames:\n return self.oqiVarNames.keys()\n\n n = self.adb.get( \"nOqiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oqiVarName\",\n indx ) \n self.oqiVarNames[name] = indx\n\n return self.oqiVarNames.keys()", "def getVar2FactorsMap(self):\r\n V = self.getAllNodes()\r\n return list(list(idx for idx,f in enumerate(self.factors) if i in f.var) for i in V)", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def compute_variable_indexes(path, overwrite=True, multiproc=False):\n if multiproc is True:\n tf.keras.backend.clear_session()\n set_cpu_option()\n\n gin_bindings = [\n \"evaluation.evaluation_fn = @variables_idx\",\n \"variables_idx.num_train = 10000\", \"evaluation.random_seed = 2051556033\",\n \"dataset.name='auto'\", \"evaluation.name = 'variables index'\"\n ]\n path = pathlib.Path(path)\n result_path = path.parent.parent / \"metrics\" / \"variance\" / \"filtered_variables\"\n logger.info(\"Computing variable indexes of {}\".format(path.parent.parent))\n gin_evaluation(path, result_path, overwrite, gin_bindings)", "def names_to_indices(names, ordered_names):\r\n indices = []\r\n names_list = list(names)\r\n for ordered_name in ordered_names:\r\n if ordered_name in names_list:\r\n indices.append(names_list.index(ordered_name))\r\n return array(indices)", "def get_list_vars(my_vars):\n lists = []\n for var in my_vars:\n try:\n temp = my_vars[var].getValue()\n #print var + '=' + str(temp)\n except ValueError:\n lists.append(var)\n return lists", "def get_vars_by_prefix(self, prefix):\n\n t_vars = tf.global_variables()\n return [var for var in t_vars if prefix in var.name]", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def getOhcVarNames( self ):\n\n if self.ohcVarNames:\n return self.ohcVarNames.keys()\n \n n = self.adb.get( \"nOhcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ohcVarName\",\n indx ) \n self.ohcVarNames[name] = indx\n\n return self.ohcVarNames.keys()", "def read_variables(var_or_list):\n session = ph.get_session()\n return session.run(var_or_list)", "def get_variable_names(self):\n return [var[1] for var in self.variables]", "def getOfcVarNames( self ):\n\n if self.ofcVarNames:\n return self.ofcVarNames.keys()\n \n n = self.adb.get( \"nOfcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ofcVarName\",\n indx ) \n self.ofcVarNames[name] = indx\n\n return self.ofcVarNames.keys()", "def _get_indices(self, indices: VecEnvIndices) -> Iterable[int]:\n if indices is None:\n indices = range(self.num_envs)\n elif isinstance(indices, int):\n indices = [indices]\n return indices", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def var_index(self, code=1, s=False):\n if s:\n code = 2\n\n index = None\n for i, var in enumerate(self.primary_header['variables']):\n if var['Variable code'] == code:\n assert index is None, 'Appears to be two sets of same data in profile'\n index = i\n return index", "def getOthVarNames( self ):\n\n if self.othVarNames:\n return self.othVarNames.keys()\n\n n = self.adb.get( \"nOthVars\" )\n for indx in range( n ):\n name = self.adb.get( \"othVarName\",\n indx ) \n self.othVarNames[ name ] = indx\n\n return self.othVarNames.keys()" ]
[ "0.74116653", "0.6916294", "0.61451054", "0.611666", "0.59596854", "0.5946558", "0.5905192", "0.5892637", "0.58531195", "0.5848885", "0.5842754", "0.5840236", "0.58368546", "0.5804591", "0.5756725", "0.5740407", "0.57103246", "0.56983846", "0.56811184", "0.5670195", "0.5658434", "0.5651586", "0.5642636", "0.56304073", "0.56239456", "0.5611239", "0.5608428", "0.55922604", "0.556347", "0.5553206" ]
0.72467065
1
Get unique id for instance name (identifier).
def _get_unique_id(self): now = datetime.now() u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year))))) return "instance" + str(u_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instance_id(self) -> str:\n return pulumi.get(self, \"instance_id\")", "def instance_identifier(self):\n return self._instance_identifier", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def get_instance_id(self):\n return \"{0}-{1}\".format(self._vc_name, self._host)", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"instance_id\")", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def _get_instance_id(self):\n return self.__instance_id", "def get_id(self):\n return self.name", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")", "def get_instance_id(self):\n return self.instance_id", "def unique_id(self):\r\n name_slug = slugify(self._name)\r\n return f\"{name_slug}\"", "def get_identifier(self) -> str:\n return self.identifier", "def get_instance_id():\n global _instance_id\n if _instance_id == '__unset':\n try:\n _instance_id = _fetch_instance_id()\n except IOError:\n log.exception(\"Exception retrieving InstanceId\")\n _instance_id = None\n\n return _instance_id" ]
[ "0.7969054", "0.7793897", "0.7737086", "0.7737086", "0.7737086", "0.7737086", "0.7737086", "0.7737086", "0.7641148", "0.763706", "0.7468007", "0.7468007", "0.7468007", "0.7468007", "0.7468007", "0.7468007", "0.7405071", "0.7405071", "0.7405071", "0.7374773", "0.7366122", "0.73619384", "0.73375064", "0.7329024", "0.7329024", "0.7329024", "0.73041207", "0.7303458", "0.7259503", "0.7253596" ]
0.7873312
1
Make sure all elements are in bond_len_dict, and return the value
def check_bond_len(dict, el_a, el_b): if el_a in dict: if el_b in dict[el_a]: return dict[el_a][el_b] print() print(el_a + " and " + el_b + " bond length currently unsupported. Add value to the csv file.") sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bond_checker(atom, dict, bond_dict):\n bound = []\n for item, values in dict.items():\n bond_range = check_bond_len(bond_dict, atom[0], values[\"element\"]) + 0.2\n if distance_checker(atom[1:], values[\"coor\"]) <= bond_range:\n bound.append(item)\n return bound", "def get_dict_data_len(x_dict: Dict[Any, Collection]):\n return check_all_same_length(*x_dict.values())", "def __len__(self):\n return dict.__len__(self) // 2", "def __len__(self):\n return len(self.pairs)", "def __len__(self):\n return len(self.vals)", "def __len__(self):\n return self._data_dict.__len__()", "def __len__(self) -> int:\n return len(self._dict)", "def __len__(self) -> int:\n return len(self.value)", "def __len__(self) -> int:\n return len(self.mapping)", "def _dict_length(typingctx, d):\n resty = types.intp\n sig = resty(d)\n\n def codegen(context, builder, sig, args):\n fnty = ir.FunctionType(\n ll_ssize_t,\n [ll_dict_type],\n )\n fn = cgutils.get_or_insert_function(builder.module, fnty,\n 'numba_dict_length')\n [d] = args\n [td] = sig.args\n dp = _container_get_data(context, builder, td, d)\n n = builder.call(fn, [dp])\n return n\n\n return sig, codegen", "def __len__(self):\n return len(self.__values)", "def __len__(self):\n return len(self.value)", "def __len__(self):\n return len(self.value)", "def __len__(self):\n return len(self.value)", "def _check_values_len(self, data_batch: Dict[str, List[str]]):\n values_len = [len(v) for _, v in data_batch.items()]\n unique_len = len(set(values_len))\n assert unique_len == 1, \"Length of values are not consistent across\"", "def __len__(self):\n return reduce(operator.add, self.values(), 0)", "def __len__(self):\n return len(self._values)", "def __len__(self):\n return len(self._value)", "def __len__(self):\n return len(self.atom_rings)", "def __len__(self):\n return len(self._dict)", "def test_get_bc_lens(self):\r\n\r\n sample_data = {('CCCC', ''): 's3', ('AAAA', ''): 's1',\r\n ('TTTT', ''): 's2'}\r\n\r\n expected_lens = [4]\r\n\r\n actual_lens = get_bc_lens(sample_data)\r\n\r\n self.assertEqual(actual_lens, expected_lens)\r\n\r\n # Test with multiple lengths\r\n\r\n sample_data = {('CCCC', ''): 's3', ('', ''): 's1',\r\n ('TTTTT', ''): 's2'}\r\n\r\n expected_lens = [5, 4, 0]\r\n\r\n actual_lens = get_bc_lens(sample_data)\r\n\r\n self.assertEqual(actual_lens, expected_lens)", "def __len__(self):\n\n value_length = []\n for v in chain(self.values(), self.metainfo_values()):\n if isinstance(v, LabelData):\n value_length.append(v.label.shape[0])\n elif is_splitable_var(v):\n value_length.append(len(v))\n else:\n continue\n\n # NOTE: If length of values are not same or the current data sample\n # is empty, return length as 1\n if len(list(set(value_length))) != 1:\n return 1\n\n length = value_length[0]\n return length", "def len12(self, len): # -> None:\n ...", "def pick_length(self, ak_spec: Union[str, BKT]) -> Tuple[Optional[List[Hedron]], Optional[BKT]]:\n ...", "def __len__(self):\n return 19", "def __len__(self):\n return self.keyvaluepair_set.count()", "def __len__(self):\n return sum(itertools.imap(len, self._forwardMap.itervalues()))", "def __len__(self):\n return sum(list(self.lookup.values()))", "def __len__():", "def __len__():" ]
[ "0.6285508", "0.6202662", "0.59115434", "0.5817686", "0.5773312", "0.5631281", "0.5628951", "0.5608762", "0.559284", "0.5581179", "0.5579353", "0.5575587", "0.5575587", "0.5575587", "0.5542769", "0.55310816", "0.55075777", "0.5494907", "0.5452239", "0.54492265", "0.5444122", "0.5437052", "0.5422979", "0.5419606", "0.5407918", "0.5406074", "0.54040426", "0.54022837", "0.5389472", "0.5389472" ]
0.70560527
0
Check for all atoms in bonding range
def bond_checker(atom, dict, bond_dict): bound = [] for item, values in dict.items(): bond_range = check_bond_len(bond_dict, atom[0], values["element"]) + 0.2 if distance_checker(atom[1:], values["coor"]) <= bond_range: bound.append(item) return bound
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlaps(self, atom, check_up_to, get_all_overlapping_atoms=True):\n if (check_up_to == 0):\n return True, []\n distances = self.structure.get_distances(atom, [i for i in range(0, check_up_to)], mic=True)\n minimum_percentage_allowed = 0.99\n valid = True\n overlappingAtoms = []\n\n init_distance = self.Atoms[atom][\"radius\"]\n\n for i in range(0, check_up_to):\n if (i == atom):\n continue\n minimum_distance = init_distance + self.Atoms[i][\"radius\"]\n if (distances[i] / minimum_distance < minimum_percentage_allowed):\n overlappingAtoms.append([i, minimum_distance - distances[i]])\n #print(\"Minimum allowed: \" + str(minimum_distance) + \", dist: \" + str(distances[i]))\n valid = False\n if (not get_all_overlapping_atoms):\n break\n\n return valid, overlappingAtoms", "def validBond(index1, index2, direction):\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True", "def bond_check(bond_distance,bond_min=0,bond_max=1.5): # we can define the default min and max in the def\n if bond_distance >bond_min and bond_distance<bond_max:\n return True\n else:\n return False", "def bond_atoms(atom_list):\n pass", "def in_range(center_bot, nanobots):\n return [b for b in nanobots if center_bot.distance_to(b) <= center_bot.strength]", "def test_is_in_ring(self):\n molecule = Molecule.from_smiles(\"c1ccccc1\")\n\n for atom in molecule.atoms:\n if atom.atomic_number == 6:\n assert atom.is_in_ring()\n\n for bond in molecule.bonds:\n if 1 in (bond.atom1.atomic_number, bond.atom2.atomic_number):\n continue\n assert bond.is_in_ring()", "def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []", "def check_bond(atom1, atom2):\n check = False\n for bond in bond_list:\n if (((bond.identity == get_bond_id(atom1, atom2)[0]) or (bond.identity == get_bond_id(atom1, atom2)[1])) and 0.975 * bond.length <= calculate_3D_distance_2_atoms(atom1, atom2) <= 1.025 * bond.length):\n check = True\n break\n return check", "def bond_check(distance, minimum=0, maximum=1.5): # when variables are set equal to => default\n if distance > minimum and distance < maximum:\n return True\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def check_positions_in_range_for_list(self, reachable, total, list):\n for pose in list:\n total += 1\n distance_to_base = math.sqrt(pose[0] ** 2 + pose[1] ** 2 + pose[2] ** 2)\n if distance_to_base < self.robot_reachable_distance:\n reachable += 1\n else:\n rospy.logwarn('Position not in range: {}, distance to base: {}'.format(pose, distance_to_base))\n return reachable, total", "def reached_final_point():\n return all(point.constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds)", "def _check_atom_connectivity_in_rd_mol_block(self, rmg_mol, rd_mol_block):\n for line in rd_mol_block:\n splits = line.split()\n if len(splits) == 4:\n index1, index2 = int(splits[0]) - 1, int(splits[1]) - 1\n self.assertIn(rmg_mol.atoms[index1], list(rmg_mol.atoms[index2].edges.keys()))", "def _idxs_are_present(self, *args):\n return set(args).issubset(set(range(self.n_atoms)))", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def challenge1(self):\n self.parse_input()\n\n # Find strongest nanobot\n strongest = max(self.nanobots, key=lambda n: n.r)\n\n # Find all in range of this\n num_in_range = 0\n for nanobot in self.nanobots:\n if manhattan_dist(nanobot.coord, strongest.coord) <= strongest.r:\n num_in_range += 1\n\n print(f\"{num_in_range} nanobots are in range of strongest\")", "def purgeHis(atoms):\n for a in atoms:\n if getAtype(a) == \"N\" or getAtype(a) == \"NA\":\n found = 0\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = 1\n break\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def in_bomb_range(self, game_state: dict):\n is_in_bomb_range = False\n agent_position = game_state['self'][3]\n agent_position = list(agent_position)\n\n for bomb in game_state['bombs']:\n if agent_position == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[0] - i - 1 >= 0:\n agent_search[0] = agent_position[0] - i - 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[0] + i + 1 <= 16:\n agent_search[0] = agent_position[0] + i + 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[1] - i - 1 >= 0:\n agent_search[1] = agent_position[1] - i - 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n for i in range(3):\n agent_search = copy.copy(agent_position)\n if agent_position[1] + i + 1 <= 16:\n agent_search[1] = agent_position[1] + i + 1\n if agent_search == list(bomb[0]):\n is_in_bomb_range = True\n\n # check if a stone wall is between the agent an the bomb\n if is_in_bomb_range:\n if agent_position[0] == list(bomb[0])[0]:\n if agent_position[1] < list(bomb[0])[1]:\n for i in range(agent_position[1], list(bomb[0])[1]):\n if game_state['field'][agent_position[0]][i] == -1:\n is_in_bomb_range = False\n else:\n for i in range(list(bomb[0])[1], agent_position[1]):\n if game_state['field'][agent_position[0]][i] == -1:\n is_in_bomb_range = False\n elif agent_position[1] == list(bomb[0])[1]:\n if agent_position[0] < list(bomb[0])[0]:\n for i in range(agent_position[0], list(bomb[0])[0]):\n if game_state['field'][i][agent_position[1]] == -1:\n is_in_bomb_range = False\n else:\n for i in range(list(bomb[0])[0], agent_position[0]):\n if game_state['field'][i][agent_position[1]] == -1:\n is_in_bomb_range = False\n\n return is_in_bomb_range", "def check_bp(self):\n return self.min_basepairs <= self.seqdata.basepairs <= self.max_basepairs", "def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True", "def check_if_no_bond(atom1, atom2, bond_list, bond_generic):\n check = False\n for bond in bond_list:\n if ((bond.identity == get_bond_id(atom1, atom2)[0]) or (bond.identity == get_bond_id(atom1, atom2)[1]) and calculate_3D_distance_2_atoms(atom1, atom2) > 1.05 * bond.length):\n check = True\n for bond in bond_generic:\n if (((atom1.atom_name[0] + atom2.atom_name[0]) == bond.identity) or (atom2.atom_name[0] + atom1.atom_name[0] == bond.identity) and (calculate_3D_distance_2_atoms(atom1, atom2) > 1.05 * bond.length)):\n check = True \n return check", "def check_positions_in_range(self):\n reachable = 0\n total = 0\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.close_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.medium_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.far_positions_world)\n\n return float(reachable) / float(total)", "def bondscan(lammps_command, potential, symbols, mpi_command=None,\n rmin=0.5, rmax=5.5, rnum=100,\n thetamin=1.0, thetamax=180, thetanum=100):\n # Build filedict if function was called from iprPy\n try:\n assert __name__ == pkg_name\n calc = iprPy.load_calculation(calculation_style)\n filedict = calc.filedict\n except:\n filedict = {}\n \n # Create cluster object\n cluster = am.cluster.BondAngleMap(rmin=rmin, rmax=rmax, rnum=rnum,\n thetamin=thetamin, thetamax=thetamax, thetanum=thetanum,\n symbols=symbols)\n \n # Get lammps units\n lammps_units = lmp.style.unit(potential.units)\n \n # Define lammps variables\n lammps_variables = {}\n \n # Add range parameters\n lammps_variables['rmin'] = rmin\n lammps_variables['rmax'] = rmax\n lammps_variables['rnum'] = rnum\n lammps_variables['thetamin'] = thetamin\n lammps_variables['thetamax'] = thetamax\n lammps_variables['thetanum'] = thetanum\n\n # Add atomic types\n if len(cluster.symbols) == 1:\n natypes = 1\n atype = np.array([1,1,1])\n symbols = cluster.symbols\n elif len(cluster.symbols) == 3:\n symbols, atype = np.unique(cluster.symbols, return_inverse=True)\n atype += 1\n natypes = len(symbols) \n lammps_variables['natypes'] = natypes\n lammps_variables['atype1'] = atype[0]\n lammps_variables['atype2'] = atype[1]\n lammps_variables['atype3'] = atype[2]\n \n # Add potential information\n lammps_variables['atomman_pair_info'] = potential.pair_info(symbols)\n lammps_variables['atom_style'] = potential.atom_style\n lammps_variables['units'] = potential.units\n\n # Build lammps input script\n # Read template file\n template_file = 'bondscan.template'\n script_file = 'bondscan.in'\n template = iprPy.tools.read_calc_file(template_file, filedict)\n with open(script_file, 'w') as f:\n f.write(iprPy.tools.filltemplate(template, lammps_variables, '<', '>'))\n\n # Run lammps and extract data\n lmp.run(lammps_command, script_file, mpi_command=mpi_command, logfile=None, screen=False)\n energies = uc.set_in_units(np.loadtxt('energies.txt'), lammps_units['energy'])\n \n # Round energies to a specified precision\n str_energies = []\n for energy in energies:\n str_energies.append(np.format_float_scientific(energy, precision=5))\n cluster.df.energy = np.array(str_energies, dtype=float)\n \n # Collect results\n results_dict = {}\n results_dict['cluster'] = cluster\n \n return results_dict", "def find_nb(self, ox1, atoms, r1, r2):\n nb_check = [{}, \"\"]\n for k in atoms:\n dox = Vector.length(ox1[1][1] - atoms[k][1])\n if (k != ox1[0] and ox1[1][2] != atoms[k][2] and\n dox <= (r1 + r2)):\n nb_check[0][k] = atoms[k]\n if dox <= r2:\n nb_check[1] = ''.join([nb_check[1], atoms[k][0]])\n return nb_check", "def has_degenerated_atom_positions(self, threshold):\n\n for i in range(0, len(self.conformer.GetNumAtoms())):\n center = self.conformer.GetAtomPosition(i)\n point = [center.x, center.y, center.z]\n surrounding = self.kd_tree.query_ball_point(point, threshold)\n\n if len(surrounding) > 1:\n return True\n\n return False", "def sstable_marking_test_not_intersecting_all_ranges(self):\n cluster = self.cluster\n cluster.populate(4).start(wait_for_binary_proto=True)\n node1, node2, node3, node4 = cluster.nodelist()\n\n debug(\"Inserting data with stress\")\n node1.stress(['write', 'n=3', 'no-warmup', '-rate', 'threads=1', '-schema', 'replication(factor=3)'])\n\n debug(\"Flushing nodes\")\n cluster.flush()\n\n repair_options = '' if self.cluster.version() >= '2.2' else '-inc -par'\n\n debug(\"Repairing node 1\")\n node1.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 2\")\n node2.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 3\")\n node3.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 4\")\n node4.nodetool(\"repair {}\".format(repair_options))\n\n for out in (node.run_sstablemetadata(keyspace='keyspace1').stdout for node in cluster.nodelist() if len(node.get_sstables('keyspace1', 'standard1')) > 0):\n self.assertNotIn('Repaired at: 0', out)", "def _check_market_place_in_range(self):\n\t\tfor building in self.get_buildings_in_range():\n\t\t\tif building.id == BUILDINGS.MARKET_PLACE_CLASS:\n\t\t\t\tif StaticPather.get_path_on_roads(self.island, self, building) is not None:\n\t\t\t\t\t# a market place is in range\n\t\t\t\t\treturn\n\t\t# no market place found\n\t\tself.session.ingame_gui.message_widget.add(self.position.origin.x, self.position.origin.y, \\\n\t\t 'NO_MARKET_PLACE_IN_RANGE')", "def IsInRange(self, id, start, isStartInclusive, end, isEndInclusive):\r\n if isStartInclusive == False:\r\n start = (start + 1) % NODES\r\n if isEndInclusive == True:\r\n end = (end + 1) % NODES\r\n allRanges = []\r\n if(start < end):\r\n allRanges.append(range(start, end))\r\n else:\r\n allRanges.append(range(start, NODES))\r\n allRanges.append(range(0, end))\r\n for r in allRanges:\r\n if id in r:\r\n return True\r\n return False", "def all_bees_raised_flag(self):\n pos, com, success = self.perception\n if len(pos) > 0:\n return all(map(lambda x: x[1][\"flag\"] == (self.nr_of_possible_neighbors + 1), com))\n else:\n return True", "def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)" ]
[ "0.6651107", "0.6298358", "0.6284692", "0.61737514", "0.6166369", "0.61443436", "0.6105336", "0.61047226", "0.6011335", "0.5903171", "0.5824142", "0.5755893", "0.5749033", "0.57480836", "0.57447016", "0.57203454", "0.5719759", "0.57171327", "0.5711364", "0.5645471", "0.5635807", "0.56266975", "0.5622971", "0.56207305", "0.55576897", "0.55425584", "0.55334187", "0.5504227", "0.5504085", "0.54892164" ]
0.6625112
1
Takes an atom dict and writes it to an .xyz file in foldername in /Created_QD with filename as name for the file
def dict2file(dict, filename, foldername): if foldername: if not os.path.exists("../Created_QD/" + foldername): os.makedirs("../Created_QD/" + foldername) file = open("../Created_QD/" + foldername + "/" + filename + ".xyz", "w") else: file = open("../Created_QD/" + filename + ".xyz", "w") file.write(" \n\n") for atom, values in dict.items(): file.write(values['element'] + "\t" + str(values['coor'][0]) + "\t\t" + str(values['coor'][1]) + "\t\t" + str(values['coor'][2]) + "\n") file.seek(0) file.write(str(len(dict))) file.close() print("\nQuantum Dot created :)")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_to_xyz(self, filename): \n with open( filename, 'a' ) as F:\n F = open( filename, 'a' )\n F.write( '%d\\n'%self.num_atoms )\n F.write( \"XYZ\\n\" )\n for num,row in enumerate(self.atoms):\n try:\n F.write('%s '%self.species[num])\n except:\n F.write('X%d '%num)\n F.write( mat2str( row, \"%16.10f\" ) )\n F.write( \"\\n\" )", "def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)", "def writexyz(self,fname):\n xyzfile = open(fname + \".xyz\",\"a+\")\n xyzfile.write(str(self.natoms) + \"\\n\\n\")\n for a in self.atoms:\n \tcxyz = a.xyz - np.array(self.pbc_correction(a.xyz))\n\t\t\txyzfile.write(str(a.type) + \"\\t\" + str(cxyz[0]) + \"\\t\" + str(cxyz[1]) + \"\\t\" + str(cxyz[2]) + \"\\n\")\n xyzfile.close()", "def write_xyz_file(allxyz):\n if SAVEXYZ:\n print('+> Saving riverbed topography file...', end='')\n if MODE == 1:\n np.savetxt('kinoshita_topo.xyz', allxyz, fmt='%.6e')\n elif MODE == 2:\n np.savetxt(FNAME.rsplit('.', 1)[0] + '_topo.xyz', allxyz, fmt='%.6e')\n print(' [done]')", "def write_xyz(filename, atoms, coordinates, frame='no default set'):\n\n if len(atoms) != len(coordinates):\n raise ValueError('Number of atoms is different than number of positions')\n\n xyz_file = open(filename,'a')\n xyz_file.write('{}\\n'.format(len(atoms)))\n xyz_file.write('frame {}\\n'.format(frame))\n for i in range(len(atoms)):\n xyz_file.write('{}\\t{}\\t{}\\t{}\\n'.format(atoms[i], coordinates[i][0],\n coordinates[i][1], coordinates[i][2]))\n xyz_file.close()", "def file_write(filename, dic):\n d = dic \n f = open(filename, 'w') \n f.write(str(d))\n f.close()", "def print_xyz(atoms,coordinates,filename):\n coordinates = [[w / angtobh for w in ww] for ww in coordinates] #bh to ang\n xyz = open(filename,\"a\")\n xyz.write(str(len(atoms)))\n xyz.write(\"\\nOptimizer geometry\\n\")\n for i in xrange(len(atoms)):\n\txyz.write(atoms[i] + ' ')\n\txyz.write(\" \".join(str(f) for f in coordinates[i]))\n\txyz.write(\"\\n\")\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n xyz.close()", "def writeFile(self, name, folder, collected_entry_list=[]):\n file_io = open(os.path.join(folder, \"system_%s.json\" % name), \"w\")\n json.dump(collected_entry_list, file_io, sort_keys=True, indent=2)\n file_io.close()", "def create_file(dict):\r\n\r\n workbook = Workbook()\r\n worksheet = workbook.active\r\n\r\n name_cache = {}\r\n col = 2\r\n\r\n # Sorts all the years in dictionary from past -> present\r\n sorted_years = list(dict.keys())\r\n\r\n sorted_years.sort()\r\n\r\n for year in sorted_years:\r\n # Sorts all the months in dictionary[year] from past -> present\r\n sorted_months = list(dict[year].keys())\r\n\r\n for month in range(len(sorted_months)):\r\n # If a single digit add a 0 in front of it so the sort works properly\r\n sorted_months[month].rjust(2, '0')\r\n\r\n sorted_months.sort()\r\n\r\n for month in sorted_months:\r\n # Sorts all the days in dictionary[year][month] from past -> present\r\n sorted_days = list(dict[year][month].keys())\r\n\r\n for day in range(len(sorted_days)):\r\n # If a single digit add a 0 in front of it so the sort works properly\r\n sorted_days[day] = sorted_days[day].rjust(2, '0')\r\n\r\n sorted_days.sort()\r\n\r\n for day in sorted_days:\r\n # Records a new date\r\n worksheet.cell(row=1, column=col, value=year + '/' + month + '/' + day)\r\n\r\n # Records a person\r\n for name in dict[year][month][day]:\r\n try:\r\n name_cache[name]\r\n except:\r\n name_cache[name] = len(name_cache) + 2\r\n\r\n worksheet.cell(row=name_cache[name], column=1, value=name)\r\n\r\n worksheet.cell(row=name_cache[name], column=col, value=dict[year][month][day][name])\r\n\r\n col += 1\r\n\r\n\r\n # Get total msgs/person\r\n max_column = worksheet.max_column\r\n worksheet.cell(row=1, column=max_column + 1, value='Total')\r\n\r\n for row in range(2, worksheet.max_row + 1):\r\n total = total_of_row(worksheet, row)\r\n\r\n worksheet.cell(row=row, column=max_column + 1, value=total)\r\n\r\n # Try to save\r\n try:\r\n workbook.save('database.xlsx')\r\n except:\r\n workbook.save('database1.xlsx')", "def create_newfile():\n date = datetime.today().strftime('%d_%m_%Y').replace(\" \", \"_\")\n file_name = screen_name + '_' + date + \".json\"\n with io.FileIO(file_name, \"w\") as file:\n file.write(\"Json\")\n file.close()\n return file_name", "def write_map_to_file(dir, version, role, map_id, d):\n if not os.path.exists(dir):\n os.makedirs(dir)\n path = build_output_file_path(dir, version, role, map_id)\n with open(path, \"w\") as f:\n json.dump(d, f, sort_keys=True, indent=4)\n f.close()", "def write_xyz(config, filename, mode=\"a+\"):\n\n with open(filename, 'w') as f:\n # number of atoms (spins)\n f.write(\"{}\\n\".format(config['nat']))\n\n # information line\n f.write(\"{} \".format(config['latt_type']))\n f.write(\"{} {} {}\".format(*list(np.diag(config['box']))))\n f.write(\" {} {} {}\".format(*list(config['pbc'])))\n\n dims_intra = config['latt_intra'].shape[-1]\n for i in range(dims_intra):\n f.write(\" 1\")\n\n f.write(\"\\n\")\n\n # coordinates\n for i in range(config['nat']):\n f.write(\"{} \".format(config['atom_types'][i]))\n\n ix, iy, iz = list(map(lambda x: int(round(x)), config['xyz'][i]))\n\n f.write(\"{} {} {}\".format(ix, iy, iz))\n\n for j in range(dims_intra):\n f.write(\" {}\".format(config['latt_intra'][ix, iy, iz, j]))\n\n f.write(\"\\n\")", "def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))", "def store(self, filename):", "def save_dE_string(temp_dict,string,out_directory):\n\n\n file_name = (out_directory + '\\\\' + temp_dict['cik'] + '-' + \n temp_dict['date_details'] + '-0.' + temp_dict['file_type'])\n file_handle=open(file_name,'w')\n file_handle.write(string)\n file_handle.close()\n return", "def write(nmrCalcRun, targetDir):\n \n intIo.writeDataFiles(nmrCalcRun, targetDir)\n \n jsonDict = intIo.makeJsonDict(nmrCalcRun)\n \n \n # write properties file (must be done at the end\n propFile = uniIo.joinPath(targetDir, intIo.propFileName)\n print 'About to write', propFile\n open(propFile,'w').write(json.dumps(jsonDict, sort_keys=True, \n indent=intIo.propIndent))", "def create_file(tmpdir, flowcell, lane, read, file_content):\n\n file_name = f\"S1_FC000{flowcell}_L00{lane}_R_{read}.fastq.gz\"\n file_path = tmpdir / file_name\n file_path.write(file_content)\n return file_path", "def writexyz(atoms, coords, iteraxis, filename):\n \n f = open(filename+'.xyz', 'w')\n \n nstruct = coords.shape[iteraxis]\n natom = len(atoms)\n coords = np.rollaxis(coords, iteraxis)\n \n for i in range(nstruct):\n f.write(str(natom)+'\\n\\n')\n for atom, coord in zip(atoms, coords[i,...]):\n f.write(\"{} {} {} {}\\n\".format(atom, coord[0], coord[1], coord[2]))\n \n f.close()", "def write(chr_dict,filename):\n chr_name_list = sorted(chr_dict.keys())\n file = open(filename,'w+')\n for chr_name in chr_name_list:\n chr_list = chr_dict[chr_name]\n for i in range(len(chr_list)):\n file.write('%s\\t%s\\t%d\\n' % (chr_name,'\\t'.join(map(str,\\\n chr_list[i])),chr_list[i][1]-chr_list[i][0]))\n file.close()", "def save_xyz(self, filename, save_ghosts=True, save_natom=True):\n outfile = open(filename, 'w')\n outfile.write(self.save_string_xyz(save_ghosts, save_natom))\n outfile.close()", "def generate_files(self, output_dir: str) -> None:\n full_filename = os.path.join(output_dir, self.json_file)\n with open(full_filename, 'w', encoding='utf-8') as output_file:\n json.dump(self.zidb, output_file, indent=2)\n print(file=output_file) # add terminating newline\n logging.info(\"Created %s\", full_filename)", "def write_completed_dictionary_to_file(the_dict):\n\ttry:\n\t\toutputLocation = open('usable_dictionary.json','w')\n\t\toutputString = str(the_dict)\n\t\toutputLocation.write(outputString)\n\t\toutputLocation.close()\n\texcept IOError:\n\t\tprint (\"could not open file\")", "def create_file(self, key=None):\n self.make_directory()\n open(self.file_path(key), 'w').close()", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def _StoreMetadataToFile(payload_dir, metadata_obj):\n file_dict = {SHA1_ATTR: metadata_obj.sha1,\n SHA256_ATTR: metadata_obj.sha256,\n SIZE_ATTR: metadata_obj.size,\n ISDELTA_ATTR: metadata_obj.is_delta_format}\n metadata_file = os.path.join(payload_dir, METADATA_FILE)\n with open(metadata_file, 'w') as file_handle:\n json.dump(file_dict, file_handle)", "def save_to_file(result, date):\n try:\n os.mkdir('/Users/yueyang/Downloads/serp-626-75-json', mode=0o744)\n except FileExistsError:\n # print('Directory already exists.')\n pass\n\n filename = '{0}.json'.format(date) #datetime.today().strftime('%m-%d-%Y'), query)\n with open(os.path.join('/Users/yueyang/Downloads/serp-626-75-json', filename), 'w') as f:\n json.dump(result, f, indent=4)\n print('Saved search results to {0}'.format(f.name))", "def write_file(config, key, template, interactive, logger, perm='644'):\n from cannula.utils import write_file as wf\n f = config.get(key)\n name = key.upper()\n if interactive:\n f = raw_input(\"\\nGenerate %s at (%s) \\nor enter new name: \" % (name, f)) or f\n \n directory = os.path.dirname(f)\n if not os.path.isdir(directory):\n logger.info(\"Creating Directory: %s\" % directory)\n os.makedirs(directory, 0700)\n \n # Write out the file\n wf(f, template, config, perm=perm)\n \n config[key] = f\n return config", "def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!", "def generateFile(data, fileName):\n\n\tif type(data) != dict:\n\t\traise TypeError(\"invalid data: dict expected\")\n\telif type(fileName) != str:\n\t\traise TypeError(\"invalid fileName: str expected\")\n\n\tfp = open(fileName, \"w\")\n\tfp.write(str(data))\n\tfp.close()", "def write_xyz(self, out_path: str)->str:\n return self._write_to_file(out_path=out_path, content_str=self.get_xyz())" ]
[ "0.64304036", "0.63667876", "0.6360757", "0.6177127", "0.60185474", "0.59346175", "0.58930415", "0.5840039", "0.58249146", "0.5794842", "0.57787114", "0.5749572", "0.5735917", "0.5710008", "0.57033205", "0.5697638", "0.56691194", "0.56664854", "0.56631005", "0.5657911", "0.56085324", "0.558851", "0.5577527", "0.5514395", "0.55120444", "0.5510049", "0.5502582", "0.5489816", "0.54877025", "0.54763246" ]
0.75428385
0
Finds atoms at the origin in a dict, returns its id
def base_atom(dict): for atom, values in dict.items(): xyz = values["coor"] if xyz[0] == xyz[1] == xyz[2] == 0: return atom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_demand_id(demand_dict, vn_id, fvr_id, svr, nbr):\n #print vn_id, fvr_id, svr, nbr\n for demand_id in demand_dict:\n if vn_id == demand_dict[demand_id]['vn_id'] and \\\n fvr_id == demand_dict[demand_id]['fnode_id'] and \\\n svr == demand_dict[demand_id]['svr'] and \\\n nbr == demand_dict[demand_id]['nbr_id']:\n return demand_id", "def _find_equivalent(searched_dict, dicts_list):\n for id_key in ('id', 'uid', 'name'):\n # Recognize the ID key used, if any\n local_id = searched_dict.get(id_key)\n if local_id:\n # Found an ID\n for other_item in dicts_list:\n if other_item.get(id_key) == local_id:\n # Found an item with the same ID\n return other_item\n \n # Found nothings\n return None", "def selectAtomid(self):\n\n\t\tif len(self.atomid) == 0:\n\t\t\treturn\n\n\t\ttmplist = []\n\t\tfor atom in self.atomlist:\n\t\t\tfound = False\n\t\t\tfor id in self.atomid:\n\t\t\t\tif int(id) == int(atom.file_id):\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\n\n\t\t\tif found and not self.invatomid:\n\t\t\t\ttmplist.append(atom)\n\t\t\tif not found and self.invatomid:\n\t\t\t\ttmplist.append(atom)\n\n\t\tself.atomlist = tmplist", "def find_str_in_dict(s: str, d: dict) -> int:\r\n for i in range(len(d['original_strands'])):\r\n if (d['original_strands'][i] == s):\r\n return i", "def find_object_by_id(stix_objects, obj_id):\n ret_obj = None\n for obj in stix_objects:\n if obj[\"id\"] == obj_id:\n ret_obj = obj\n break\n return ret_obj", "def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break", "def get_equivalent_atom(self, atom):\n try:\n return self.atom_dict[atom.name]\n except KeyError:\n return None", "def print_atom(atom):\n\n return atom[\"id\"]", "def get_species_id( species ):\n\n species = species.strip( ).lower( )\n result = 1 # (non-sensical) fail-safe if there is no match in the loop\n for species_key in Species_Dict:\n if species in Species_Dict[ species_key ]:\n result = Species_Dict[ species_key ][ 0 ] # change assignment if you want to return another list element\n break\n return result", "def get_equivalent_atom(self, atom):\n try:\n return self.chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def eamap_find(*args):\n return _ida_hexrays.eamap_find(*args)", "def find_key(value, diction):\n\n tuples = diction.items()\n for j, k in tuples:\n if value == k:\n return j", "def doFind(self, str):\n for value in self.doId2do.values():\n if repr(value).find(str) >= 0:\n return value", "def get_channel_obj(self, keyfind, valfind, origin=None):\n\n if origin:\n\n origin = origin.lower()\n if keyfind == \"number\":\n matches = [self.list[origin][x].dict[\"id\"] for x in list(self.list[origin].keys()) if self.list[origin][x].number == valfind]\n\n else:\n matches = [self.list[origin][x].dict[\"id\"] for x in list(self.list[origin].keys()) if self.list[origin][x].dict[keyfind] == valfind]\n\n if len(matches):\n return self.list[origin][matches[0]]\n\n else:\n\n matches = []\n for origin in list(self.list.keys()):\n\n if keyfind == \"number\":\n matches = [self.list[origin][x].dict[\"id\"] for x in list(self.list[origin].keys()) if self.list[origin][x].number == valfind]\n\n else:\n matches = [self.list[origin][x].dict[\"id\"] for x in list(self.list[origin].keys()) if self.list[origin][x].dict[keyfind] == valfind]\n\n if len(matches):\n return self.list[origin][matches[0]]\n\n if len(matches):\n return self.list[origin][matches[0]]\n\n return None", "def get_equivalent_atom(self, atom):\n try:\n return self.fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def identify_disease(*arguments):\n\tsymptom_list = []\n\tfor symptom in arguments:\n\t\tsymptom_list.append(symptom)\n\t# Handle key error\n\treturn symptom_map[str(symptom_list)]", "def getPrefixFromIdentifier(self, idDict, pop=False, getAll=False):\n # make sure the request matches the expected form\n if getAll:\n found = []\n else:\n found = None\n # if not collecting many, check all identifiers used\n if not getAll:\n self.checkIdentifiersPresent(idDict)\n # find the match\n toPop = []\n for prefix, info in self._prefixToIdentifiers.items():\n if all(list((v == info[k]) for k, v in idDict.items())):\n if pop:\n toPop.append(prefix)\n if getAll:\n found.append(prefix)\n else:\n found = prefix\n break\n for p in toPop:\n self._prefixToIdentifiers.pop(p)\n\n return found", "def get_geneset_from_dict(geneset_dict, geneid):\n for k, v in geneset_dict.iteritems():\n if geneid in v:\n return k\n return None", "def getAtom(self, atomid):\n\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atm in res.atom:\n\t\t\t\t\tif atomid == int(atm.file_id):\n\t\t\t\t\t\treturn atm\n\n\t\treturn None", "def search_id(self,obj):\r\n ##### create the new id ###########\r\n #for x in self.objectValues('Image'):\r\n for x in obj:\r\n liste_id.append(str(x.id())[0:6])\r\n for digit0 in liste_digit:\r\n for digit1 in liste_digit:\r\n for digit2 in liste_digit:\r\n for digit3 in liste_digit:\r\n for digit4 in liste_digit:\r\n for digit5 in liste_digit:\r\n searched_dict=0\r\n searched=str(digit0)+str(digit1)+str(digit2)+str(digit3)+str(digit4)+str(digit5)\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n for x in self_val:\r\n liste_val.append('_'+x+'.jpeg')\r\n for extension in liste_val:\r\n searched_extension=str(searched)\r\n if searched_extension in liste_id:\r\n searched_dict=searched_dict+1\r\n if searched_dict==0:\r\n return searched\r\n return ''", "def GFID(myDict):\n return (myDict[next(iter(myDict))])", "def find_employee_id(self,name):\n nam = list(self.emp_id.values())\n val = nam.index(name)\n ids = list(self.emp_id.keys())\n id = ids[val]\n return id", "def person_in_list(position: OrderedDict, lst: List[OrderedDict]):\n for p in filter(lambda x: x[\"person\"] == position[\"person\"], lst):\n return p\n return None", "def findCenterSeq(dictofSeq):\n seqLen = len(dictofSeq)\n pwMatrix = [[\"-\"]*seqLen for i in range(seqLen)]\n listofSeq = []\n for key in dictofSeq:\n listofSeq.append(dictofSeq.get(key))\n \n findMin = []\n acc = 0\n for seq in listofSeq:\n for seq2 in listofSeq:\n # in1 gives row, in2 gives column \n in1 = listofSeq.index(seq)\n in2 = listofSeq.index(seq2)\n pwMatrix[in1][in2] = pairwise(seq, seq2)\n acc += pwMatrix[in1][in2]\n #TypeError: 'int' object is not subscriptable\n findMin.append(acc)\n acc = 0\n posSeq = findMin.index(min(findMin))\n refString = listofSeq[posSeq]\n refName = \"\"\n \n for name, seq in dictofSeq.items():\n if seq == refString:\n refName = name\n \n print(refName)\n \n return refName", "def get_equivalent_atom(self, atom):\n try:\n return self.model_dict[atom.model_id].chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def find_match(line,dic):\n seqid = line[0:seqid_len]\n sequence = line[(seqid_len + f_primer_len):(len(line) - r_primer_len)]\n if seqid in dic:\n increment(dic[seqid],sequence,1)\n else:\n dic[seqid] = {sequence:1}", "def udcall_map_find(*args):\n return _ida_hexrays.udcall_map_find(*args)", "def lookup(self, dict_id):\n\n return self.ep.get(\"{0}/{1}\".format(self.endpoint, dict_id))", "def get_residue(self, chainid, resid) :\n if not self._res_dict :\n d = {}\n for r in self.residues :\n d[ (r.chainid, r.resid)] = r\n self._res_dict =d\n \n return self._res_dict[(chainid, resid)]", "def _FindCompoundIndex(self, kegg_id):\n logging.debug('Looking for the index of %s' % kegg_id)\n for i, c in enumerate(self.reactants):\n if c.compound.kegg_id == kegg_id:\n logging.debug('Found %s at index %d' % (kegg_id, i))\n return i\n return None" ]
[ "0.5926452", "0.5634988", "0.5295975", "0.5282709", "0.52257067", "0.5161398", "0.508171", "0.5079885", "0.5073828", "0.5048743", "0.5029526", "0.50288653", "0.5028559", "0.5015856", "0.5008036", "0.49915805", "0.49639514", "0.4963865", "0.49622545", "0.4933749", "0.4920927", "0.4894365", "0.48859012", "0.4880389", "0.48747617", "0.48665512", "0.4861908", "0.48571572", "0.48558277", "0.48530626" ]
0.6143067
0
Converts strings y and n to boolean
def y2true(text): while True: if text == 'y': return True elif text == 'n': return False else: text = input("Wrong input, try again: ")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_bool(s: str):\n if s.strip().lower() == \"y\":\n return True\n else:\n return False", "def _get_bool(string):\n string = string.lower()\n return True if string == 'y' else False", "def eval_y_n(self, question):\n answer = raw_input(question + \" [y/n] : \")\n return answer.lower() in ['y', 'yes']", "def y_n(ch):\r\n chs = ['yes', 'y', 'no', 'n']\r\n ch = check(ch, chs)\r\n\r\n if ch == 'yes' or ch == 'y':\r\n return True\r\n return False", "def s2b(s):\n s = s.lower()\n return s == 'true' or s == 'yes' or s == 'y' or s == '1'", "def str_to_bool(s):\n if len(s) > 0 and s[0] in \"yYtT1\":\n return True\n return False", "def genesis_to_boolean(genesis_str):\n\n if genesis_str == 'Y':\n return True\n else:\n return False", "def cast_str_to_bool(word: str) -> bool:\n word = word.upper()\n if word == \"N\":\n return False\n elif word == \"Y\":\n return True\n raise ValueError(\"%s is not a valid string to boolean!\" % str)", "def yes_no(x):\n \n x = x.lower()\n if x[0] == 'n':\n return 0\n if x[0] == 'y':\n return 1\n return None", "def str2bool(string):\n \n string = \tstring.lower()\n if string == \"true\" or string == \"yes\" or string == \"1\":\n return True\n elif string == \"false\" or string == \"no\" or string == \"0\": \n return False\n else :\n\traise ValueError(\"Could not Interpret the boolean for %s\" %string)", "def str2bool(self, v):\n \tprint('Entering conversion function')\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")", "def str2bool(s: str):\n\n if s.lower() in [\"true\", \"yes\", \"1\"]:\n return True\n elif s.lower() in [\"false\", \"no\", \"0\"]:\n return False\n else:\n print(\"Fehler: Boolean erwartet! %s ist nicht als Boolean interpretierbar\" % s)\n exit(1)", "def str2bool(string: str) -> bool:\n if string.lower() in [\"yes\", \"true\", \"t\", \"y\", \"1\"]:\n return True\n if string.lower() in [\"no\", \"false\", \"f\", \"n\", \"0\"]:\n return False\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")", "def Bool(arg):\n return arg.lower() in ('y', 'true', 't', '1')", "def is_yes(stri):\n return 'y' in stri.lower()", "def str2bool(input_str):\n if input_str.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif input_str.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean Value Expected')", "def str_to_bool(text):\n return text and text.lower() in [\"true\", \"y\", \"yes\", \"1\"]", "def stringToBoolean(text):\n if text in (\"True\", \"1\"):\n return True\n if text in (\"False\", \"0\"):\n return False\n return text", "def str2bool(S):\n return {\n \"0\": False,\n \"n\": False,\n \"N\": False,\n \"f\": False,\n \"F\": False,\n \"1\": True,\n \"y\": True,\n \"Y\": True,\n \"t\": True,\n \"T\": True,\n }[S[0]]", "def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n raise argparse.ArgumentTypeError('Boolean value expected.')", "def toBool( string ):\r\n return string == 'true'", "def string_to_bool(test_string):\n\treturn bool(test_string in [\"True\", \"true\", \"Yes\", \"yes\", \"Y\", \"y\"])", "def truth(text):\n lowered = str(text).lower()\n if lowered in frozenset(['y', 'yes', 'true']):\n return True\n elif lowered in frozenset(['n', 'no', 'false']):\n return False\n else:\n raise Error('Invalid truth value: %r' % text)", "def has_y(self):\n return any(map(lambda s: s.is_y, self))", "def convertStringToBool(nodeText):\n stringsThatMeanTrue = list(['yes','y','true','t','on'])\n val = False\n if nodeText.lower() in stringsThatMeanTrue:\n val = True\n return val", "def str2bool(v):\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")", "def is_y(self, var):\n y_list = ['lat', 'latitude', 'LATITUDE', 'Latitude', 'y']\n if self.get_units(var) == 'degrees_north' or self.get_name(var) in y_list:\n return True\n else:\n return False", "def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "def __str_to_bool(self, s):\n if s == 'True':\n return True\n elif s == 'False':\n return False\n else:\n raise ValueError" ]
[ "0.7319038", "0.69107634", "0.6700293", "0.66846", "0.6606526", "0.6605776", "0.65743244", "0.65112495", "0.65079075", "0.62479234", "0.62007165", "0.61767143", "0.6154451", "0.6152505", "0.61269474", "0.61194664", "0.611237", "0.6106023", "0.6097604", "0.60594904", "0.6041452", "0.6020511", "0.60061723", "0.5993961", "0.59799916", "0.5970218", "0.596983", "0.5967258", "0.5967258", "0.5957045" ]
0.6958512
1
Returns a matrix of map tiles
def createTiles(): Renderer.Clear() map = [] w, h = len(testmap[0]), len(testmap) x, y = 0, 0 for row in testmap: for char in row: map.append(makeTile(char, x, y)) x += 1 y += 1 x = 0 return map, w, h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def render_tiles(output):\n chunks = [output[i:i + 3] for i in range(0, len(output), 3)]\n max_i = max_j = 0\n for i, j, _ in chunks:\n max_i, max_j = max(i, max_i), max(j, max_j)\n\n matrix = [[None] * (max_j + 1) for _ in range(max_i + 1)]\n\n for i, j, tile_id in chunks:\n matrix[i][j] = draw_tile(tile_id)\n\n for i, row in enumerate(matrix):\n matrix[i] = \" \".join(row)\n return matrix", "def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def build_grid(tiles, tile_size, grid_rows=None, grid_cols=None):\n if grid_rows is None or grid_cols is None:\n grid_rows = int(math.sqrt(len(tiles)))\n grid_cols = int(math.ceil(len(tiles) / grid_rows))\n\n grid = np.zeros(\n (grid_rows * tile_size[1], grid_cols * tile_size[0], 3), np.uint8)\n for tile_id, tile in enumerate(tiles):\n assert(tile.shape[0] == tile_size[1] and tile.shape[1] == tile_size[0])\n yy = int(tile_id / grid_cols)\n xx = tile_id % grid_cols\n grid[(yy * tile_size[1]):((yy + 1) * tile_size[1]),\n (xx * tile_size[0]):((xx + 1) * tile_size[0]), :] = tile\n return grid", "def rellenarMatrix(self):\n for i in range(0, 26):\n self.matrixMAPA.append([])\n for j in range(0, 26):\n self.matrixMAPA[i].append((0, str(i)+\"-\"+str(j)))", "def makeMatrix():\n listOfChars = []\n for ascii in range(32, 128):\n listOfChars.append(chr(ascii))\n random.shuffle(listOfChars)\n matrix = Grid(8, 12)\n i = 0\n for row in range(matrix.getHeight()):\n for column in range(matrix.getWidth()):\n matrix[row][column] = listOfChars[i]\n i += 1\n return matrix", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def mask_tile(self):\n if self.size == 1:\n m = np.array([[0]])\n elif self.size ==2:\n m = np.array([[1,2],[4,3]])\n else:\n m = 9 * np.ones((self.size, self.size))\n m[0,0] = 1\n m[0,-1] = 2\n m[-1,-1] = 3\n m[-1,0] = 4 \n m[0,1:-1] = 5 * np.ones(self.size-2)\n m[1:-1,-1] = 6 * np.ones(self.size-2)\n m[-1,1:-1] = 7 * np.ones(self.size-2)\n m[1:-1,0] = 8 * np.ones(self.size-2)\n return m.astype(np.int8)", "def get_tile(lat: float, lon: float, zoom: int) -> List:\n lat_rad = lat * math.pi / 180\n n = math.pow(2, zoom)\n col = n * ((lon + 180) / 360) # Column\n row = n * (1 - (math.log(math.tan(lat_rad) + 1 /\n math.cos(lat_rad)) / math.pi)) / 2 # Row\n\n return [int(col), int(row)]", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def tiles(self, nums, row = 1, spaces = 0):\r\n # We add the (\" \" * 5) to align the rows\r\n # with odd number of values\r\n separator = (\"+---+\" + (\" \" * 5)) * row\r\n space = (\" \" * 5) * spaces\r\n\r\n tile = space + separator + space + \"\\n\"\r\n \r\n tile += space\r\n for i in nums:\r\n # We add the (\" \" * 5) to align the rows\r\n # with odd number of values\r\n tile += f\"| {i} |\" + (\" \" * 5)\r\n tile += space + \"\\n\"\r\n \r\n tile += space + separator + space + \"\\n\"\r\n \r\n return tile", "def parse_map(self, maze, width, height):\r\n tile_width = self.screen_width/width\r\n tile_height = self.screen_height/height\r\n for i in range(0, height):\r\n for j in range(0, width):\r\n if not maze[j].has_key(i):\r\n tile = MazeTile(self)\r\n tile.initialise()\r\n tile.set_size(tile_width, tile_height)\r\n tile.set_position((j*tile_width,i*tile_height))\r\n self.add(tile)\r\n \r\n self.run()", "def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES", "def make_matrix_coord_map(width, height, serpentine=False, offset=0, rotation=0, y_flip=False):\n result = []\n for y in range(height):\n if not serpentine or y % 2 == 0:\n result.append([(width * y) + x + offset for x in range(width)])\n else:\n result.append([width * (y + 1) - 1 - x + offset for x in range(width)])\n\n result = rotate_and_flip(result, rotation, y_flip)\n\n return result", "def copy_tiles(self):\n \n return self.tiles", "def build_map(n=30,m=30, preset=True, filename='/home/sji367/small_grid.mat', key='new_grid'):\n if preset:\n the_map = []\n row = [0] * n\n for i in range(m):\n the_map.append(list(row))\n \n # fillout the map matrix with a '+' pattern\n for x in range(n / 8, n * 7 / 8):\n the_map[m / 2][x] = 1\n for y in range(m/8, m * 7 / 8):\n the_map[y][n / 2] = 1\n \n # randomly select start and finish locations from a list\n sf = []\n sf.append((0, 0, n - 1, m - 1))\n sf.append((0, m - 1, n - 1, 0))\n sf.append((n / 2 - 1, m / 2 - 1, n / 2 + 1, m / 2 + 1))\n sf.append((n / 2 - 1, m / 2 + 1, n / 2 + 1, m / 2 - 1))\n sf.append((n / 2 - 1, 0, n / 2 + 1, m - 1))\n sf.append((n / 2 + 1, m - 1, n / 2 - 1, 0))\n sf.append((0, m / 2 - 1, n - 1, m / 2 + 1))\n sf.append((n - 1, m / 2 + 1, 0, m / 2 - 1))\n (xStart, yStart, xFinish, yFinish) = random.choice(sf)\n else:\n grid = loadmat(filename)\n the_map = grid[key]\n xStart = 19\n yStart = 31\n xFinish = 67\n yFinish = 98\n \n return the_map, xStart, yStart, xFinish, yFinish", "def tile(arrayin, N, M = None):\r\n if M == None :\r\n M = N\r\n Ny, Nx = arrayin.shape\r\n arrayout = np.zeros((Ny * N, Nx * M), dtype = arrayin.dtype) \r\n for i in range(N):\r\n for j in range(M):\r\n arrayout[i * Ny : (i+1) * Nx, j * Ny : (j+1) * Nx] = np.copy(arrayin)\r\n return arrayout", "def fill_tiles(tiles, fill_func):\n return np.array([[fill_func(x) for x in row] for row in tiles])", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def get_tile(board):\n t = [[0,0]]\n for i in range(board.shape[0] -1):\n for j in range(board.shape[1] -1):\n if board[i, j] == board[i +1, j]:\n t.append([i +1, j])\n if board[i, j] == board[i, j+1]:\n t.append([i, j+1])\n if board[i, j] == board[i+1, j+1]:\n t.append([i+1, j+1])\n # print(t)\n t = list(np.unique(t, axis=0))\n return t", "def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def map_reshaper(map):\n a = [[map[int(i / 2), int(j / 2)] for j in range(50)] for i in range(50)]\n return np.array(a)" ]
[ "0.707192", "0.7048794", "0.6917319", "0.6899672", "0.67853016", "0.6704365", "0.6704365", "0.6682728", "0.66672593", "0.6522762", "0.6433973", "0.63403666", "0.6328379", "0.63274586", "0.6298421", "0.62948275", "0.62924397", "0.62750506", "0.6272464", "0.62414765", "0.6236406", "0.61894906", "0.6181458", "0.6162787", "0.6147207", "0.61265266", "0.61220556", "0.6107077", "0.61058205", "0.6103039" ]
0.75113404
0
This method parses poetic movements as specified in the movements_to_scrape list, follows each movement link and yields a request using parse_movement method
def parse(self, response): movements_to_scrape = ["Beat","Black Arts","Black Mountain","Conceptual Poetry","Concrete Poetry", "Confessional Poetry","Contemporary","Dark Room Collective","Formalism","Futurism", "Harlem Renaissance","Jazz Poetry","Language Poetry","Modernism","New Formalism", "New York School","Objectivists","San Francisco Renaissance","Slam/Spoken Word", "Surrealism","Symbolists"] sresponse = scrapy.Selector(response) #sites are selectors found in the school movements table sites = sresponse.xpath('//div[@class = "school_movements"]//ul/li/a') for site in sites: if ''.join(site.xpath('text()').extract()) in movements_to_scrape: movement_name = site.xpath('text()').extract() link = u''.join(site.xpath('@href').extract()) movement_url = urlparse.urljoin("http://www.poets.org",link) yield scrapy.Request(url = urlparse.urljoin("http://www.poets.org",link), callback=self.parse_movement, meta = {'movement_name': movement_name, 'movement_url':movement_url})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_movement(self, response):\n movement_name = response.meta['movement_name']\n movement_url = response.meta['movement_url']\n\n sresponse = scrapy.Selector(response)\n\n #Because each movement page contains a table that has maximum of ten rows, we need to go to the next page\n #in order to extract all of the poets associated for each movement\n poetnextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table = sresponse.xpath('//tbody/tr')\n for row in table:\n item = PoetItem()\n item['movement_name'] = movement_name\n item['movement_url'] = movement_url\n if len(row.xpath('td/a/text()').extract())>0:\n item['poet_name'] = row.xpath('td/a/text()').extract()\n if len(row.xpath('td/a/@href').extract())>0:\n #the link is for the poet bio page on poetry.org website\n link = u''.join(row.xpath('td/a/@href').extract())\n item['poet_url'] = urlparse.urljoin(\"http://www.poets.org\",link)\n if len(row.xpath('td/span/text()').extract()) > 0:\n item['poet_dob2'] = row.xpath('td/span/text()').extract()\n if len(row.xpath('td/text()').extract())>0:\n #a poet may be tagged/associated with multiple movements\n item['poet_tags'] = row.xpath('td/text()').extract()\n yield scrapy.Request(url =urlparse.urljoin(\"http://www.poets.org\",link), callback=self.parse_poet,\n meta = {'item': item})\n\n #if more poets on next page, use this method again\n if len(poetnextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poetnextpagelink),\n callback=self.parse_movement, meta = {'movement_name': movement_name,\n 'movement_url':movement_url})", "def parse_movements(self, response):\n\n # Gets description of all movements\n movements = response.xpath('//tr[contains(@class, \"description\")]')\n for movement in movements:\n row = movement.xpath(\".//td\")\n for i, cell in enumerate(row):\n if i == 1:\n timestamp = cell.xpath(\"text()\").extract_first().strip()\n movement_date = datetime.strptime(timestamp, \"%d/%m/%Y\")\n elif i == 2:\n description = cell.xpath(\".//b/text()\").extract_first().strip()\n elif i == 3:\n category = cell.xpath(\"text()\").extract_first().strip()\n sub_category = cell.xpath(\".//i/text()\").extract_first().strip()\n elif i == 5:\n # If currency is unknown skips current movement\n try:\n amount = convert_amount(\n cell.xpath(\".//b/text()\").extract_first().strip()\n )\n except CurrencyException as exc:\n msg = \"Skipping movement {} of {}. {}\".format(\n description, movement_date, exc\n )\n logger.exception(msg)\n break\n\n # Losses are saved as negative values\n if response.url == self.LOSSES_URL:\n amount = -amount\n\n # Creates new Movement if it doesn't already exists\n Movement.objects.get_or_create(\n date=movement_date,\n description=description,\n category=category,\n sub_category=sub_category,\n amount=amount,\n )\n\n # If last month parsed is current one returns since I might be dead by the night\n today = date.today()\n if (\n response.meta[\"date\"].year >= today.year\n and response.meta[\"date\"].month > today.month\n ):\n return\n\n # A call might fail from time to time since Splash container crashes\n # randomly and needs to restart, if that happens the page can't be\n # scraped so the call must be repeated for that same month\n if response.status == 200:\n next_month = response.meta[\"date\"] + timedelta(weeks=4)\n else:\n next_month = response.meta[\"date\"]\n\n # Creates request to get next month movements\n request = scrapy_splash.SplashRequest(\n response.url,\n callback=self.parse_movements,\n endpoint=\"execute\",\n cache_args=[\"lua_source\"],\n dont_filter=True,\n args={\n \"lua_source\": self.movements_lua,\n \"moneymap_url\": self.MONEYMAP_URL,\n \"meseanno\": response.meta[\"date\"].strftime(\"%m%Y\"),\n \"dopoAggiornamento\": \"false\",\n \"idBrand\": \"\",\n },\n meta={\"date\": next_month},\n )\n return [request]", "def parse(self, response):\n # Collecting all of the links for the spider to enter and extract reviews\n href = response.xpath('//a[@data-clicksource=\"HotelName\"]/@href').extract()\n for hot in href:\n # For each hotel on the page, it will go onto the title link\n yield scrapy.Request(response.urljoin(hot), self.parse_page)\n # This is making sure that we don't go too far with our scrape\n # Recursively calls upon parse to click on the next button on the bottomabs\n # of the page\n try: \n yield response.follow(response.xpath('//link[@rel=\"next\"]/@href').extract_first(),self.parse)\n except: \n print(\"No Page?\")", "def parse(self,response):\n # Collecting all of the links for the spider to enter and extract reviews\n href = response.xpath('//a[@data-clicksource=\"HotelName\"]/@href').extract()\n for hot in href:\n # For each hotel on the page, it will go onto the title link\n yield scrapy.Request(response.urljoin(hot), self.parse_page)\n # This is making sure that we don't go too far with our scrape\n # Recursively calls upon parse to click on the next button on the bottomabs\n # of the page\n \n try: \n yield response.follow(response.xpath('//link[@rel=\"next\"]/@href').extract_first(),self.parse)\n except: \n print(\"No Page?\")\n print(self.count)", "def parse(self, response):\n\n product_page_links = response.css('.detailsLink')\n yield from response.follow_all(product_page_links, self.parse_item)\n\n pagination_links = response.css('span.fleft a')\n yield from response.follow_all(pagination_links, self.parse)", "def scrape_listings():\n scraped_listings = [] # Used to store apartment listings\n links = [] # Used to store links to apartment listings (seperate tag)\n\n # Download \"The Canon\" website\n URL = \"https://www.thecannon.ca/classifieds/housing\"\n headers = program_features.HEADERS\n page = requests.get(URL, headers=headers)\n\n # Parse document\n soup = BeautifulSoup(page.content, \"html.parser\")\n page_listings = list(soup.find_all('td')) # Find all listing information, and store as list\n\n # Used to find URL parameters for each apartment listing\n for link in soup.find_all(\"a\"):\n # URL Format Example: \"<a href=\"/page.php?cid=347306&amp;id=26&amp;t=housing\">1219 Gordon St, Guelph</a>\"\n if link.has_attr('href') and (\"t=housing\" in link.attrs['href']):\n links.append(\"https://www.thecannon.ca\" + link.attrs['href'])\n\n # Iterate list \n for i, listing in enumerate(page_listings, 1):\n # Group every 10 elements into a listing object\n if i % 10 == 0:\n index = int(i / 10) - 1 # Calculate index of link that matches the current listing \n\n # Append listing object to array\n scraped_listings.append(\n # Create listing object\n WebListing(\n page_listings[i - 10].get_text().strip(), # Date post was created\n page_listings[i - 9].get_text().strip(), # Date apartment is available\n page_listings[i - 8].get_text().strip(), # Offering type\n page_listings[i - 7].get_text().strip(), # Housing type\n page_listings[i - 6].get_text().strip(), # Address \n page_listings[i - 5].get_text().strip(), # Price\n page_listings[i - 4].get_text().strip(), # Distance \n page_listings[i - 3].get_text().strip(), # Sublet permission\n page_listings[i - 2].get_text().strip(), # Number of rooms\n page_listings[i - 1].get_text().strip(), # Features\n links[index][38:44], # Listing ID (stored in link)\n links[index] # Listing Link\n )\n )\n\n return scraped_listings # Return listings array", "def parse(self, response):\n for link in response.css(\".page-center .list-supporting-info a\")[:10]:\n link_text = \" \".join(link.css(\"*::text\").extract())\n if \"schedule\" in link_text.lower():\n yield response.follow(\n link.attrib[\"href\"], callback=self._parse_meetings, dont_filter=True\n )", "def parse_index(self, response):\n items = response.css('.item')\n for item in items:\n href = item.css('.top a::attr(href)').extract_first()\n detail_url = response.urljoin(href)\n logger.info('detail url %s', detail_url)\n yield PyppeteerRequest(detail_url, callback=self.parse_detail, wait_for='.item .name')\n \n # next page\n match = re.search(r'page/(\\d+)', response.url)\n if not match: return\n page = int(match.group(1)) + 1\n next_url = f'{self.base_url}/page/{page}'\n yield PyppeteerRequest(next_url, callback=self.parse_index, wait_for='.item .name')", "def parse(self, response):\n product_urls = response.css('.lpPLink::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n variety_urls = response.css('.elementContent a::attr(href)').getall()\n for variety_url in variety_urls:\n yield scrapy.Request(response.urljoin(variety_url))\n\n # TODO: mêmes opérations que précédemment, seule la classe change\n variety_urls = response.css('.elementTitle a::attr(href)').getall()\n for variety_url in variety_urls:\n yield scrapy.Request(response.urljoin(variety_url))", "def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))", "def parse(self, response):\n product_urls = response.css('.item .name a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.LinkNext a::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse(self, response):\n product_urls = response.css('.product-title a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n categorie_urls = response.css('.subcategory-link::attr(href)').getall()\n for categorie_url in categorie_urls:\n yield scrapy.Request(response.urljoin(categorie_url))\n\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse(self, response):\n for href in response.xpath(\"//h2/a/@href\"):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, self.parse_post_content)\n\n # Check for a next page\n next_page_links = response.xpath(\"//a[@class='right']/@href\")\n if len(next_page_links) > 0:\n next_url = response.urljoin(next_page_links[0].extract())\n yield scrapy.Request(next_url, self.parse)", "def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)", "def parse_listing(keyword, place):\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}\".format(keyword, place)\n print(\"retrieving \", url)\n\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.paginegialle.it',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n }\n try:\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page\")\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n else:\n print(\"Failed to process page exit with no results exit code: 213\")\n return []\n except:\n print(\"Failed to process page exit with no results exit code: 222\")\n return []\n\n XPATH_RESULTS = \"//div[@class=' container containerListato ']//span[@class='searchResNum']//text()\"\n raw_RESULTS = listings[0].xpath(XPATH_RESULTS)\n resultsn = ''.join(raw_RESULTS).strip().replace(\"risultati\",\"\") if raw_RESULTS else None\n print(\"results found for query {0} {1} - {2}\".format(keyword,place,resultsn))\n page_number = int(int(resultsn)/20) #20 is the number of result for single web page\n print(\"number of web page to parse: {0}\".format(page_number))\n\n scraped_results = []\n if page_number == 1 or page_number == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n return scraped_results\n if page_number > 1: \n for retry in range(page_number):\n if retry == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n else:\n time.sleep(5)\n try:\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}/p-{2}\".format(keyword,place,retry)\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page {0}\".format(retry))\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n break\n else:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results\n\n except:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results \n return scraped_results", "def parse(self, response):\n content_type = self.get_content_type(response.headers)\n\n sitescan = response.meta.get('sitescan')\n\n if 'text/html' not in self.get_content_type(response.headers):\n\n # For linked content, find the urlscan it linked from\n urlscan = model.URLScan.objects.get(\n site_scan=sitescan,\n page_url_hash=sha256(response.meta['referrer']).hexdigest())\n else:\n # Only create urlscans for text/html\n urlscan, us_created = model.URLScan.objects.get_or_create(\n\n site_scan=sitescan,\n page_url_hash=sha256(response.url).hexdigest(),\n defaults={'page_url': response.url,\n 'timestamp': self.get_now_time()})\n\n # Continue crawling\n # Parse stylesheet links, scripts, and hyperlinks\n hxs = HtmlXPathSelector(response)\n\n # Extract other target links\n try:\n css_links = hxs.select('//link/@href').extract()\n except TypeError:\n css_links = []\n\n try:\n js_links = hxs.select('//script/@src').extract()\n except TypeError:\n js_links = []\n\n try:\n hyperlinks = hxs.select('//a/@href').extract()\n except TypeError:\n hyperlinks = []\n\n # Using a set removes duplicate links.\n all_links = set(hyperlinks + js_links + css_links)\n\n # Examine links, yield requests if they are valid\n for url in all_links:\n\n if not url.startswith('http://'):\n # ensure that links are to real sites\n if url.startswith('javascript:'):\n continue\n else:\n url = urljoin(response.url, url)\n\n ua = response.meta['user_agent']\n\n request = Request(url)\n request.headers.setdefault('User-Agent', ua.ua_string)\n request.meta['referrer'] = response.url\n request.meta['sitescan'] = sitescan\n request.meta['user_agent'] = ua\n request.meta['content_type'] = None\n\n yield request\n\n # The response contains a user agent, we should yield an item\n item = MarkupItem()\n item['content_type'] = self.get_content_type(response.headers)\n item['filename'] = os.path.basename(urlparse(response.url).path)\n item['headers'] = unicode(response.headers)\n item['meta'] = response.meta\n item['raw_content'] = response.body\n item['sitescan'] = sitescan\n item['urlscan'] = urlscan\n item['url'] = response.url\n item['user_agent'] = response.meta.get('user_agent')\n item['redirected_from'] = response.meta.get('redirected_from',\n u'')\n yield item", "def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poems associated with each poet\n nextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table_poems = sresponse.xpath('//tbody/tr')\n\n #poetry.org does not provide text for all of the poems available, some links are for audio versions only,\n #therefore need to avoid storing poemitems that are not text\n regex = re.compile(r'audio')\n\n for row in table_poems:\n if len(row.xpath('td/a/@href').extract()[0]) > 0 :\n poemlink = u''.join(row.xpath('td/a/@href').extract()[0])\n linktext = str(poemlink)\n if regex.search(linktext) is None:\n if len(row.xpath('td//text()').extract())>0:\n poemitem = PoemItem()\n poemitem['poet_poems_url'] = poet_poems_url\n poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1]\n poemitem['poem_title'] = row.xpath('td//text()').extract()[4]\n poemitem['poem_link'] = urlparse.urljoin(\"http://www.poets.org\",poemlink)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poemlink),\n callback=self.parse_poet_poem, meta={'poemitem': poemitem})\n\n #if more poems on next page, use this method again\n if len(nextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",nextpagelink),\n callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})", "def parse_apartment_urls(self):\n\n # Generate soup for starting page\n soup = generate_soup(self.start_url)\n\n # Empties the urls list, in case it wasn't before\n self.apartment_urls = []\n\n # Get apartments in current page and store\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls\n\n # Check if there are more page to pull from\n while self.list_has_next_page(soup):\n soup = self.list_get_next_page(soup)\n\n # Get apartments in current page\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls", "def parse(self, response):\n s = Selector(response)\n\n page_nums = s.xpath('//ul[@class=\"paging-container\"]//a[not(@class=\"current\")]/@data-page').extract()\n\n if page_nums:\n last_page = int(page_nums[-1])\n else:\n last_page = 2\n\n for page in range(1, last_page):\n next_url = change_url_params(page_num=str(page), url=response.url)\n yield scrapy.Request(next_url, callback=self.parse_inner_urls)", "def parse(self, response):\n\n links = response.xpath('//td/font/a[contains(@href,\"chart\")]/@href').extract()\n for href in links:\n url = response.urljoin(href)\n yield scrapy.Request(url, callback=self.parse_director_page)\n\n pages = response.xpath('//font[@size=4]/b/a/@href').extract()\n next_page = \"\"\n\n for page in pages:\n page = response.urljoin(page)\n if page not in self.page_seen:\n next_page = page\n self.page_seen.add(page)\n break\n else:\n next\n\n if len(next_page) > 0:\n yield scrapy.Request(next_page, callback=self.parse)", "def parse(self, response):\n\n links_list = self._get_links(response)\n location = self._parse_location(response)\n ids_list = []\n start_time = self._parse_time(response)\n for item in response.css(\"article p\"):\n start = self._parse_start(item, start_time)\n if not start:\n continue\n meeting = Meeting(\n title=\"SSA #73 Chinatown Board\",\n description=\"\",\n classification=BOARD,\n start=start,\n end=None,\n all_day=False,\n time_notes=\"\",\n location=location,\n links=self._parse_links(item, start, links_list),\n source=response.url,\n )\n\n meeting[\"status\"] = self._get_status(meeting)\n meeting[\"id\"] = self._get_id(meeting)\n if meeting[\"id\"] in ids_list:\n continue\n else:\n ids_list.append(meeting[\"id\"])\n\n yield meeting", "def parse(self, response):\n product_urls = response.css('.product-details > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse(self, response):\n product_urls = response.css('.product__title > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.pagination__item--next a::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse(self, r):\n blocs = r.xpath(\"//article[@itemtype='https://schema.org/Product']//a/@href\").extract()\n if blocs:\n for product_sheet_link in blocs:\n next_page = r.urljoin(f\"http://qazaimmobilier.la-boite-immo.com{product_sheet_link}\")\n yield scrapy.Request(next_page, callback=self.parse_product)\n\n # paginate\n self.page += 1\n yield scrapy.Request(self.base_url + f\"{self.page}\")", "def parse(self, response):\n page_source = self.upwork_controller.get_source_home()\n\n # Hand-off between Selenium and Scrapy happens here\n sel = Selector(text=page_source)\n # Extract data\n sections = sel.xpath(\"//section/div\")\n\n for section in sections:\n selector = Selector(text=section.get())\n jobtitle = selector.xpath(\"//div/div/div/h4/a/text()\")\n jobdescription = selector.xpath(\"//div/div/div/div/div/div/div/span/span/text()\")\n hourlypay = selector.xpath(\"//div/div/div/div/small/span/strong/text()\")\n proposals = selector.xpath(\"//div/div/div/div/div/span/small/strong/text()\")\n country = selector.xpath(\"//div/div/div/div/small/span/span/span/span/strong[@class='text-muted client-location ng-binding']/text()\")\n\n job = Job(jobtitle=jobtitle.get(),\n jobdescription=jobdescription.get(),\n hourlypay=hourlypay.get(),\n proposals=proposals.get(),\n country=country.get())\n job.serialize()\n yield job.dict()", "def scrape_current_players(positions):\n for i in range(len(positions)):\n for page in range(6):\n position = positions[i]\n url = \"http://www.nfl.com/players/search?category=position&playerType=current&conference=ALL&d-447263-p=%s&filter=%s&conferenceAbbr=null\" % (page+1, position)\n try:\n soup = BeautifulSoup(ul.urlopen(url).read(), \"html.parser\")\n links = soup.findAll('a', href=re.compile('^/player/'))\n for j in range(len(links)):\n nameFirstLast = str(links[j]).split('\"')[2].lstrip('>').rstrip('</a>').split(',')[1].lstrip() + \" \" + str(links[j]).split('\"')[2].lstrip('>').rstrip('</a>').split(',')[0]\n link = \"http://www.nfl.com\" + str(links[j]).split('\"')[1].rstrip('profile') + \"gamelogs?season=\"\n outputLine = abbr[position], ',', nameFirstLast, ',', link, '\\n'\n with open(\"../CSV_data/ActivePlayerList.csv\", \"a\") as text_file:\n text_file.writelines(outputLine)\n text_file.close()\n except IOError, e:\n print 'Failed to open url'\n print '-------------------------------------'\n if hasattr(e, 'code'):\n print 'We failed with error code - %s.' % e.code\n elif hasattr(e, 'reason'):\n print \"The error object has the following 'reason' attribute :\"\n print e.reason\n return False", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def parse(self, response):\n next_selector = response.xpath('//div//li/a[@id=\"quotes_content_left_lb_NextPage\"]/@href')\n ticker = re.findall('symbol/(.+?)/', response.url)[0]\n\n for url in next_selector.extract():\n yield Request(url, callback = self.parse)\n \n links = response.xpath('//div//span[@class=\"fontS14px\"]/a/@href').extract()\n for link in links:\n # meta is passed along with the response into the spider\n # allowing it to access what ticker it's using\n yield Request(link, callback = self.parse_articles, meta = {'ticker': ticker})", "def parse(self, response): \n # links in the navbar\n categories = response.css('.sub a::attr(href)').getall()\n for categorie in categories:\n yield scrapy.Request(response.urljoin(categorie))\n \n next_page_url = response.css('.pagination_next a::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))\n \n product_urls = response.css('.product-name::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)", "def loop_pages(self, response):\n\n current_page = response.xpath(\"//a[@class='currentPage ']/text()\")\n print(\"current page: {0}\".format(current_page.extract_first()))\n\n next_page_link = response.xpath(\"//a[@class='text' and contains(., 'Next')]\")\n next_page_link = next_page_link.xpath('@href').extract_first()\n\n # urls_stories is a tuple with a url, and a corresponding Story object\n urls_stories = self.get_thread_urls(response)\n\n if self.generate_test is None:\n # generate requests for -- new -- stories\n for (url, story) in urls_stories:\n yield scrapy.Request(url, callback=self.scan_thread, priority=1, meta={\"story_item\": story})\n\n # generate requests for stories that need to be updated.\n for (url, story) in self.update_list:\n yield scrapy.Request(url, callback=self.update_stories, priority=2, meta={\"story_item\": story})\n\n if next_page_link is not None:\n\n # print(\"next page link: {0}\".format(next_page_link))\n next_page_link = response.urljoin(next_page_link)\n yield scrapy.Request(next_page_link, callback=self.loop_pages, priority=0)\n else:\n \"\"\"\n This section activates if self.generate_test is not None.\n A thread url is required to be provided to generate a test scenario out of that\n thread.\n It scans the site looking for this thread, and scrapes it.\n If it doesn't find it, it scans the next page.\n \"\"\"\n print(\"\\n\\tGENERATING TEST SCENARIO\\n\")\n for (url, story) in urls_stories:\n if url == self.test_url:\n yield scrapy.Request(url, callback=self.scan_thread, priority=0, meta={\"story_item\": story})\n return\n\n for (url, story) in self.update_list:\n if url == self.test_url:\n yield scrapy.Request(url, callback=self.scan_thread, priority=0, meta={\"story_item\": story})\n return\n\n next_page_link = response.urljoin(next_page_link)\n yield scrapy.Request(next_page_link, callback=self.loop_pages, priority=0)" ]
[ "0.75489324", "0.71242", "0.5760647", "0.5611293", "0.55545515", "0.55472314", "0.5544202", "0.5456553", "0.5455188", "0.5418424", "0.54031754", "0.5396842", "0.53465706", "0.53382075", "0.5328522", "0.53110784", "0.529639", "0.52911603", "0.52840555", "0.5279618", "0.52429235", "0.5218977", "0.5190188", "0.5185839", "0.51852757", "0.5165137", "0.5158033", "0.51576805", "0.51552755", "0.51506716" ]
0.74549824
1
This method looks at each movement page and creates a new PoetItem for each poet found in page's table
def parse_movement(self, response): movement_name = response.meta['movement_name'] movement_url = response.meta['movement_url'] sresponse = scrapy.Selector(response) #Because each movement page contains a table that has maximum of ten rows, we need to go to the next page #in order to extract all of the poets associated for each movement poetnextpagelink = u''.join(sresponse.xpath('//a[@title = "Go to next page"]/@href').extract()) table = sresponse.xpath('//tbody/tr') for row in table: item = PoetItem() item['movement_name'] = movement_name item['movement_url'] = movement_url if len(row.xpath('td/a/text()').extract())>0: item['poet_name'] = row.xpath('td/a/text()').extract() if len(row.xpath('td/a/@href').extract())>0: #the link is for the poet bio page on poetry.org website link = u''.join(row.xpath('td/a/@href').extract()) item['poet_url'] = urlparse.urljoin("http://www.poets.org",link) if len(row.xpath('td/span/text()').extract()) > 0: item['poet_dob2'] = row.xpath('td/span/text()').extract() if len(row.xpath('td/text()').extract())>0: #a poet may be tagged/associated with multiple movements item['poet_tags'] = row.xpath('td/text()').extract() yield scrapy.Request(url =urlparse.urljoin("http://www.poets.org",link), callback=self.parse_poet, meta = {'item': item}) #if more poets on next page, use this method again if len(poetnextpagelink) > 0: yield scrapy.Request(url = urlparse.urljoin("http://www.poets.org",poetnextpagelink), callback=self.parse_movement, meta = {'movement_name': movement_name, 'movement_url':movement_url})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_poet(self, response):\n item = response.meta['item']\n\n sresponse = scrapy.Selector(response)\n poetdata = sresponse.xpath('//div[@class=\"view-content\"]')\n\n #TODO: Clear empty strings from poet item fields\n\n item['poet_basicbio'] = poetdata[0].xpath('div/span//text()').extract()\n item['poet_positions'] = poetdata[0].xpath('div//div/text()').extract()\n item['poet_posyears'] = poetdata[0].xpath('div//div/span/text()').extract()\n item['poet_bio'] = sresponse.xpath('//div[@class=\"field-items\"]//p//text()').extract()\n\n #this important link goes to the page of poems for each poet\n poetpoemlink = u''.join(sresponse.xpath('//div[@class=\"view-footer\"]/a/@href').extract())\n poet_poems_url = urlparse.urljoin(\"http://www.poets.org\",poetpoemlink)\n\n item['poet_poems_url'] = poet_poems_url\n\n #PoetItem finishes here\n yield item\n\n #goes to method that parses poems found in the poet_poems_url\n yield scrapy.Request(url=poet_poems_url, callback=self.parse_poet_poems, meta={'poet_poems_url': poet_poems_url })", "def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poems associated with each poet\n nextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table_poems = sresponse.xpath('//tbody/tr')\n\n #poetry.org does not provide text for all of the poems available, some links are for audio versions only,\n #therefore need to avoid storing poemitems that are not text\n regex = re.compile(r'audio')\n\n for row in table_poems:\n if len(row.xpath('td/a/@href').extract()[0]) > 0 :\n poemlink = u''.join(row.xpath('td/a/@href').extract()[0])\n linktext = str(poemlink)\n if regex.search(linktext) is None:\n if len(row.xpath('td//text()').extract())>0:\n poemitem = PoemItem()\n poemitem['poet_poems_url'] = poet_poems_url\n poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1]\n poemitem['poem_title'] = row.xpath('td//text()').extract()[4]\n poemitem['poem_link'] = urlparse.urljoin(\"http://www.poets.org\",poemlink)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poemlink),\n callback=self.parse_poet_poem, meta={'poemitem': poemitem})\n\n #if more poems on next page, use this method again\n if len(nextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",nextpagelink),\n callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})", "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def parse_movements(self, response):\n\n # Gets description of all movements\n movements = response.xpath('//tr[contains(@class, \"description\")]')\n for movement in movements:\n row = movement.xpath(\".//td\")\n for i, cell in enumerate(row):\n if i == 1:\n timestamp = cell.xpath(\"text()\").extract_first().strip()\n movement_date = datetime.strptime(timestamp, \"%d/%m/%Y\")\n elif i == 2:\n description = cell.xpath(\".//b/text()\").extract_first().strip()\n elif i == 3:\n category = cell.xpath(\"text()\").extract_first().strip()\n sub_category = cell.xpath(\".//i/text()\").extract_first().strip()\n elif i == 5:\n # If currency is unknown skips current movement\n try:\n amount = convert_amount(\n cell.xpath(\".//b/text()\").extract_first().strip()\n )\n except CurrencyException as exc:\n msg = \"Skipping movement {} of {}. {}\".format(\n description, movement_date, exc\n )\n logger.exception(msg)\n break\n\n # Losses are saved as negative values\n if response.url == self.LOSSES_URL:\n amount = -amount\n\n # Creates new Movement if it doesn't already exists\n Movement.objects.get_or_create(\n date=movement_date,\n description=description,\n category=category,\n sub_category=sub_category,\n amount=amount,\n )\n\n # If last month parsed is current one returns since I might be dead by the night\n today = date.today()\n if (\n response.meta[\"date\"].year >= today.year\n and response.meta[\"date\"].month > today.month\n ):\n return\n\n # A call might fail from time to time since Splash container crashes\n # randomly and needs to restart, if that happens the page can't be\n # scraped so the call must be repeated for that same month\n if response.status == 200:\n next_month = response.meta[\"date\"] + timedelta(weeks=4)\n else:\n next_month = response.meta[\"date\"]\n\n # Creates request to get next month movements\n request = scrapy_splash.SplashRequest(\n response.url,\n callback=self.parse_movements,\n endpoint=\"execute\",\n cache_args=[\"lua_source\"],\n dont_filter=True,\n args={\n \"lua_source\": self.movements_lua,\n \"moneymap_url\": self.MONEYMAP_URL,\n \"meseanno\": response.meta[\"date\"].strftime(\"%m%Y\"),\n \"dopoAggiornamento\": \"false\",\n \"idBrand\": \"\",\n },\n meta={\"date\": next_month},\n )\n return [request]", "def process_item(self, item, spider):\n if item is None:\n raise DropItem(\"Something went wrong in parsing data...\")\n try:\n self.curr.execute(\n SqlStatements.insert_new_real_estate(),\n (\n item['listing_type'],\n item['property_type'], \n item['price'], \n item['location_city'], \n item['location_city_district'], \n item['area_property'],\n item['area_land'],\n item['construction_type'],\n item['num_floors_building'],\n item['apartment_floor'],\n item['registered'],\n item['heating_type'],\n item['num_rooms'],\n item['num_bathrooms'],\n item['source']\n )\n )\n self.conn.commit()\n except Exception as e:\n print(e)\n self.conn.rollback()\n return item\n self._log_progress()\n return item", "def loot_logic1(self):\n\n # self.statusflag = 'looting'\n corpse_items = list(self.corpse_cont.GetItems())\n precount = len(corpse_items)\n\n for i in corpse_items:\n # print 'Scanning Corpse @ Item.Id: ', i.Id\n if i.Id in tid.loot_gold:\n bpmain_item = inven.GetItemInSlot(3)\n # i.Move(bpmain_item.Location, System.Byte(i.Count))\n # sleep(0.3)\n self.loot_enq_move(i, bpmain_item.Location.ToLocation())\n print 'Looting: ', tid.loot_list[i.Id]\n\n # Can Also try putting into into last slot of container.\n # toloc = list(bpmain.GetItems())[0].Location... change Z to Volume-1\n # (0, +1, ID, containerid+64, +1)\n return\n\n elif i.Id in tid.loot_list: # and not in loot_gold\n if i.Id in tid.loot_rares:\n lootbp = self.loot_bps[self.idx_rare]\n elif i.Id in tid.loot_commons:\n lootbp = self.loot_bps[self.idx_com]\n elif i.Id in tid.loot_stack:\n lootbp = self.loot_bps[self.idx_stk]\n elif i.Id in tid.loot_test: # Test\n lootbp = self.loot_bps[self.idx_rare]\n\n # i.Move(lootbp.Location, System.Byte(i.Count)) # IMPLEMENT PACKET!\n # sleep(0.3)\n self.loot_enq_move(i, lootbp.Location.ToLocation())\n print 'Looting: ', tid.loot_list[i.Id]\n\n # Check for completion:\n postcount = len(list(self.corpse_cont.GetItems()))\n if postcount == precount: # Item did not move\n pass\n elif postcount < precount:\n if i.Id in tid.loot_rares:\n self.ct_rare += 1\n if self.ct_rare == 20:\n self.idx_rare += 1\n print 'changing bp'\n elif i.Id in tid.loot_commons:\n self.ct_com += 1\n if self.ct_com == 20:\n self.idx_com -= 1\n elif i.Id in tid.loot_stack:\n self.ct_stk += 1\n if self.ct_stk == 20:\n self.idx_stk -= 1\n elif i.Id in tid.loot_test: # Test\n self.ct_rare += 1\n if self.ct_rare == 20:\n self.idx_rare += 1\n\n return\n\n elif i == corpse_items[-1]: # At last item, and not in tid.loot_list\n for j in corpse_items:\n if j.Id in tid.loot_subcont:\n # PQI Implementation should not be needed here\n j.OpenAsContainer(System.Byte(self.corpse_cont.Number))\n return\n\n # No subcont\n # PQI Implementation should not be needed here\n self.pqi.tryct[2] = 0\n self.corpse_cont.Close()\n # Consider using an 'islooting' flag\n\n # PQI Implementation should not be needed here\n self.pqi.tryct[2] = 0\n self.corpse_cont.Close() # Should only occur if corpse is empty", "def parse(self, response):\n movements_to_scrape = [\"Beat\",\"Black Arts\",\"Black Mountain\",\"Conceptual Poetry\",\"Concrete Poetry\",\n \"Confessional Poetry\",\"Contemporary\",\"Dark Room Collective\",\"Formalism\",\"Futurism\",\n \"Harlem Renaissance\",\"Jazz Poetry\",\"Language Poetry\",\"Modernism\",\"New Formalism\",\n \"New York School\",\"Objectivists\",\"San Francisco Renaissance\",\"Slam/Spoken Word\",\n \"Surrealism\",\"Symbolists\"]\n\n sresponse = scrapy.Selector(response)\n\n #sites are selectors found in the school movements table\n sites = sresponse.xpath('//div[@class = \"school_movements\"]//ul/li/a')\n for site in sites:\n if ''.join(site.xpath('text()').extract()) in movements_to_scrape:\n movement_name = site.xpath('text()').extract()\n link = u''.join(site.xpath('@href').extract())\n movement_url = urlparse.urljoin(\"http://www.poets.org\",link)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",link), callback=self.parse_movement,\n meta = {'movement_name': movement_name, 'movement_url':movement_url})", "def box_transform_page(page, direction_list=[]):\n\n if len(page.children) > 1:\n\n # For all children of the page\n for idx in range(0, len(page.children)-1):\n\n # Take two children at a time\n p1 = page.get_child(idx)\n p2 = page.get_child(idx+1)\n\n change_proportion = np.random.randint(\n 10,\n cfg.full_page_movement_proportion_limit\n )\n\n change_proportion /= 100\n\n # Randomly move the line between them up or down one side\n if len(direction_list) < 1:\n direction = np.random.choice([\"rup\", \"lup\"])\n else:\n direction = direction_list[idx]\n\n # If the first panel is horizontal therefore the second is too\n if p1.orientation == \"h\":\n\n # Get the maximum amount the line can move\n change_max = min([(p1.x4y4[1] - p1.x1y1[1]),\n (p2.x4y4[1] - p2.x1y1[1])])\n\n change = change_max*change_proportion\n\n # Specify the line to move\n line_top = p2.x1y1\n line_bottom = p2.x2y2\n\n # If the panel has children then recursively\n # find the leaf children and move them to the new line\n if len(p1.children) > 0:\n move_children_to_line(p1,\n (line_top, line_bottom),\n change,\n \"h\",\n direction\n )\n\n # Otherwise move the current panels to line\n else:\n if direction == \"rup\":\n p1.x4y4 = (p1.x4y4[0], p1.x4y4[1] + change)\n p1.refresh_coords()\n else:\n p1.x4y4 = (p1.x4y4[0], p1.x4y4[1] - change)\n p1.refresh_coords()\n\n if len(p2.children) > 0:\n move_children_to_line(p2,\n (line_top, line_bottom),\n change,\n \"h\",\n direction\n )\n else:\n if direction == \"rup\":\n p2.x1y1 = (p2.x1y1[0], p2.x1y1[1] + change)\n p2.refresh_coords()\n else:\n p2.x1y1 = (p2.x1y1[0], p2.x1y1[1] - change)\n p2.refresh_coords()\n\n # If the first panel is vertical therefore the second\n # is too since they are siblings\n else:\n # Get the maximum amount the line can move\n change_max = min([(p1.x2y2[0] - p1.x1y1[0]),\n (p2.x2y2[0] - p2.x1y1[0])])\n\n change = change_max*change_proportion\n\n # Specify the line to move\n line_top = p2.x1y1\n line_bottom = p2.x4y4\n\n # If the panel has children then recursively\n # find the leaf children and move them to the new line\n if len(p1.children) > 0:\n move_children_to_line(p1,\n (line_top, line_bottom),\n change,\n \"v\",\n direction\n )\n\n # Otherwise just move the panel since it's a leaf\n else:\n if direction == \"rup\":\n p1.x2y2 = (p1.x2y2[0] - change, p1.x2y2[1])\n else:\n p1.x2y2 = (p1.x2y2[0] + change, p1.x2y2[1])\n\n if len(p2.children) > 0:\n move_children_to_line(p2,\n (line_top, line_bottom),\n change,\n \"v\",\n direction\n )\n else:\n if direction == \"rup\":\n p2.x1y1 = (p2.x1y1[0] - change, p2.x1y1[1])\n else:\n p2.x1y1 = (p2.x1y1[0] + change, p2.x1y1[1])\n\n return page", "def process_item(self, item, spider):\n writer = csv.writer(self.file, delimiter = '|')\n for apartment in item[\"apartments\"]:\n row = [apartment[\"price\"], apartment[\"size\"], apartment[\"rooms\"], apartment[\"address\"], apartment[\"lat\"],\n apartment[\"lng\"], apartment[\"zone\"], apartment[\"band\"], apartment[\"east\"], apartment[\"north\"],\n apartment[\"date\"]]\n writer.writerow(row)\n self.file.flush()\n print(\"page {} processed.\".format(item[\"page\"]))\n return item", "def place_items(self):\n for item in self.item_kit:\n coords = self.maze.random_coordinates()\n item(coords, self.scale)", "def PosPlanet (self, deltaT):\n\n for planet in self.planets:\n position = planet.position + (planet.velocity * deltaT)\n planet.position = position #Each body's resulting position is updated to the body's information defined in the Particle class.", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # Clean up template\n template = pywikibot.Page(page.site, template,\n ns=10).title(withNamespace=False)\n # We found the template we were looking for\n if template in self.templateTitles:\n for field, value in fielddict.items():\n field = field.strip()\n value = value.strip()\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(\n u'A claim for %s already exists. Skipping'\n % claim.getID())\n # TODO FIXME: This is a very crude way of dupe\n # checking\n else:\n if claim.getType() == 'wikibase-item':\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))\n continue\n elif claim.getType() == 'string':\n claim.setTarget(value.strip())\n else:\n pywikibot.output(\"%s is not a supported datatype.\" % claim.getType())\n continue\n\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget()))\n item.addClaim(claim)\n # A generator might yield pages from multiple sites\n source = self.getSource(page.site)\n if source:\n claim.addSource(source, bot=True)", "def create_objects(cls, table):\n x = 2\n state = State(table[1][4])\n while x < len(table):\n line = table[x]\n if line[5] == \"powiat\" or line[5] == \"miasto na prawach powiatu\":\n county = County(line[4], line[1])\n state.in_state(county)\n elif line[5] == \"miasto\":\n city = City(line[4], line[1], line[2])\n state.in_state(city)\n elif line[5] == \"gmina miejska\":\n city_community = City_Community(line[4], line[1], line[2])\n state.in_state(city_community)\n elif line[5] == \"gmina wiejska\":\n village_community = Village_Community(line[4], line[1], line[2])\n state.in_state(village_community)\n elif line[5] == \"gmina miejsko-wiejska\":\n city_village_community = City_Village_Community(line[4], line[1], line[2])\n state.in_state(city_village_community)\n elif line[5] == \"obszar wiejski\":\n village_square = Village_square(line[4], line[1], line[2])\n state.in_state(village_square)\n elif line[5] == \"delegatura\":\n delagacy = Delegacy(line[4], line[1], line[2])\n state.in_state(delagacy)\n x+=1\n\n for county in state.in_s:#adding community objects to a proper county\n if type(county) == County:\n for community in state.in_s:\n if community.county_number == county.county_number and type(community) != County:\n county.in_county(community)\n\n return state", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # We found the template we were looking for\n if template.replace(u'_', u' ')==self.templateTitle:\n for field, value in fielddict.items():\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))\n #TODO FIXME: This is a very crude way of dupe checking\n else:\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))\n item.addClaim(claim)\n if self.source:\n claim.addSource(self.source, bot=True)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))", "def process_item(self, item, spider):\n # remove SQL support\n # use csv to store data\n #check whether table already exsit in pd_dict\n if item[\"table\"] not in self.pd_dict:\n #check whether csv with table name exit\n file = basePath +'/'+ item[\"table\"]+'.csv'\n if os.path.exists(file):\n df = pd.read_csv(file)\n self.pd_dict.update({item[\"table\"]: df})\n else:\n df = pd.DataFrame(columns = ['animatetitle', 'othertitle', 'cross_s','nums', 'last_title'])\n self.pd_dict.update({item[\"table\"]: df})\n\n if item['animatetitle'] not in self.pd_dict[item[\"table\"]]['animatetitle'].values:\n self.pd_dict[item[\"table\"]] = self.pd_dict[item[\"table\"]].append(\n {'animatetitle' : item['animatetitle'], 'othertitle' : item['othertitle'], 'cross_s' : item['cross'],'nums':item['nums'], 'last_title':item['last_title']}, \n ignore_index = True)\n\n return item", "def generate_raport(table):\n\n item_table = store.get_table()\n item_table = store.check_table(item_table)\n raport = []\n\n id_position = 0\n title_position = 1\n price_position = 3\n earnings = 0\n\n for item in item_table:\n price = item[price_position]\n sold_copies = get_sold_copies(table, item[id_position])\n if price and sold_copies:\n earnings = int(price) * int(sold_copies)\n raport_row = [item[title_position], str(earnings)]\n raport.append(raport_row)\n\n return raport", "def process_item(self, item, spider):\n session = self.Session()\n article = Article()\n restaurant = Restaurant()\n\n # populate article\n article.url = item['article_url']\n article.title = item['article_title']\n article.datetime = item['article_datetime']\n \n # populate restaurant\n restaurant.name = item['restaurant_name']\n restaurant.slug = item['restaurant_slug']\n restaurant.address = item['restaurant_address']\n restaurant.googlemaps_url = item['restaurant_googlemaps']\n restaurant.googlemaps_id = parse_googlemaps_id(restaurant.googlemaps_url)\n restaurant.lat = parse_lat(restaurant.googlemaps_url)\n restaurant.lng = parse_lng(restaurant.googlemaps_url)\n\n # determine if new article\n exist_article = session.query(Article).filter_by(url = article.url).first()\n if exist_article: \n article = exist_article\n\n # determine if new restaurant\n exist_restaurant = session.query(Restaurant).filter_by(slug = restaurant.slug).first()\n if exist_restaurant: \n restaurant = exist_restaurant\n if article not in restaurant.articles: \n restaurant.articles.append(article)\n else:\n # geocode for lat lng if necessary\n if restaurant.googlemaps_id: \n restaurant.lat, restaurant.lng, restaurant.address = convert_id(restaurant.googlemaps_id)\n # add article to restaurant.articles\n restaurant.articles.append(article)\n\n try:\n session.add(restaurant)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item", "def _parse_xml(self):\n self.properties = {}\n pages = self.root.findall('page')\n self.pages = {} \n\n for page_num, page in enumerate(pages): \n\n _, _ , width, height = page.attrib[\"bbox\"].split(\",\")\n width, height = float(width), float(height)\n \n page_object = {\"page\": page_num + 1 , \"width\": width, \"height\": height} \n lines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n print(\"{} Number of Lines in Page {}\".format(len(lines), page_num))\n \n self.bbox = {'x1': [] , 'y1':[], 'x2':[], 'y2':[]}\n textlines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n textlines = sorted(textlines, key= lambda x: -float(x.attrib['bbox'].split(',')[3]))\n \n \n line_objects = []\n for idx, item in enumerate(textlines):\n item_props = self._extract_textline_properties(item)\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3])\n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n\n line_objects.append(item_props)\n page_object[\"lines\"] = line_objects\n \n \n others = [] \n# for key in [\"rect\", \"figure\", \"layout/textgroup\", \"curve\"]: \n for key in [\"curve\", \"rect\", \"figure\"]: \n other_objs = self.root.findall('page[@id=\\'{}\\']/{}'.format(page_num+1, key)) \n for idx, item in enumerate(other_objs):\n \n item_props = {\"type\": key}\n# print(key, ET.tostring(item))\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3]) \n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n others.append(item_props)\n \n page_object[\"others\"] = others\n page = Page(page_object)\n page_object[\"para\"] = page.para\n page_object[\"plines\"] = page.lines\n page_object[\"bigbox\"] = page.bigbox\n page_object[\"components\"] = page.components\n\n self.pages[page_num+1] = page_object", "def process_item(self, item, spider):\n session = self.Session()\n sales = item['sales']\n values = item['values']\n del item['sales']\n del item['values']\n property = Property(**item)\n\n try:\n session.add(property)\n # flush to obtain the id of property to be used as the foreign key\n session.flush()\n\n for sale in sales:\n sale['property_id'] = property.id\n session.add(PropertyTransfer(**sale))\n for value in values:\n value['property_id'] = property.id\n session.add(PropertyValue(**value))\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n return item", "def _generate_pileups(self):\n pass", "def trainer_party(trainer_id):\n\n db.execute('SELECT * FROM pokemon_party WHERE trainer_id= :trainer_id',{'trainer_id': trainer_id})\n pokemon_trainer_list = db.fetchall() \n\n pokemon_party = []\n #If pokemon party_tranier list is empty prompt them to create their own team\n\n for pokemon in pokemon_trainer_list:\n\n db.execute('SELECT * FROM pokemon WHERE pokedex_id= :pokemon_id',{'pokemon_id':pokemon[2] })\n monster = db.fetchone()\n # print(monster)\n #Pokemon Name\n monster_name = monster[1]\n #Pokemon Level\n monster_level = monster[2]\n #First pokemon type\n monster_type1 = db.execute('SELECT type FROM pokemon_type WHERE id= :id', {'id': monster[3]}).fetchone()\n #second pokemon type\n monster_type2 = db.execute('SELECT type FROM pokemon_type WHERE id= :id', {'id': monster[4]}).fetchone()\n #pokemon base hp\n monster_hp = monster[5]\n #pokemon base attack\n monster_atk = monster[6]\n #pokemon base defense\n monster_def = monster[7]\n #pokemon base special attack\n monster_spatk = monster[8] \n #pokemon base special defense\n monster_spdef = monster[9]\n #pokemon base speed\n monster_spe = monster[10]\n\n pkmn = Pokemon(monster_name, monster_level, monster_type1[0], monster_type2[0], monster_hp, monster_atk, monster_def, monster_spatk, monster_spdef, monster_spe)\n #assign all weakness and resistance to pokemon after their creation\n pkmn.pokemon_weak_resist(monster_type1[0],monster_type2[0])\n \n pokemon_party.append(pkmn)\n \n return pokemon_party", "def create(self, pile):\n self.pile_list = pile", "def create_planetes():\n planetes = {}\n for i in range(constants.MAXPLANET):\n while True:\n planete = make_planet()\n if (planete.name not in planetes and all(planete.distance(p) >= constants.MIN_DISTANCE for p in planetes.values())):\n PriceSlip(planete)\n planetes[planete.name] = planete\n\n break\n\n # pick one, set to homeworld\n planete = random.choice(list(planetes.values()))\n planete.homeworld = True\n planete.visited = True\n\n return list(planetes.values())", "def _move_particle(self, ip):\n # RANDOM WALK\n # ADVANCE ONLY THE PARTICLES THAT ARE \"ON\" (i.e. ABOVE STTHR).\n #\n particle = self.particles[ip] # get particle\n props = [\"state\", \"type\", \"x\", \"y\", \"ux\", \"vy\", \"factor\", \"tmem\"]\n state, pType, x, y, ux, vy, factor, tmem = particle.get_from_keys(props)\n if state > STTHR and pType == 1:\n DU = -(ux - UXM)*2.0*TFREQ*self.DT + CLANG*self.SQRTDT*normal()\n DV = -(vy - VYM)*2.0*TFREQ*self.DT + CLANG*self.SQRTDT*normal()\n UXP = ux + DU\n VYP = vy + DV\n XP = x + UXP*self.DT*factor\n YP = y + VYP*self.DT*factor\n STP = state*np.exp(-self.DT/tmem)\n particle.update(ux=UXP, vy=VYP, x=XP, y=YP, state=STP)\n elif (state > STTHR) and pType == 2:\n DU = ULAM*normal()\n DV = ULAM*normal()\n XP = x + DU*self.DT\n YP = y + DV*self.DT\n STP = state*np.exp(-self.DT/ TMEMRAD)\n particle.update(x=XP, y=YP, state=STP)\n if x > self.grid.XMAX - self.grid.DX:\n particle.update(x=self.grid.XMAX - self.grid.DX, state=0.)\n elif x < self.grid.XMIN + self.grid.DX:\n particle.update(x=self.grid.XMIN + self.grid.DX, state=0.)\n if y > self.grid.YMAX - self.grid.DY:\n particle.update(y=self.grid.YMAX - self.grid.DY, state=0.)\n elif y < self.grid.YMIN + self.grid.DY:\n particle.update(y=self.grid.YMIN + self.grid.DY, state=0.)", "def move_info(self,req):\n def copy_item(st,dt):\n \"copy info from st to dt\"\n # move plays\n# print \"moving plays for \",st.uid,\" to \",dt.uid\n execute(\"update %s.plays set page=%s where page=%s \" % (self.Config.database,dt.uid,st.uid))\n # move tags \n execute(\"update %s.tags set page=%s where page=%s \" % (self.Config.database,dt.uid,st.uid))\n # copy info\n dt.name=st.name\n dt.when=st.when\n dt.composer=st.composer\n dt.artist=st.artist\n dt.text=st.text\n dt.rating=st.rating\n dt.prefs=st.prefs\n #dt.score=st.score\n dt.flush()\n st.name=st.name+\" (old version)\"\n st.rating= -4 # set to X \n st.flush()\n # move images\n st.get_images() # create st.images\n for i in st.images:\n i.parent=dt.uid\n i.set_lineage(pob=dt)\n i.flush()\n try:\n dob=self.get(safeint(req.to))\n except:\n dob=None\n if (not dob):\n return \"specify destination as ?to=[UID]\"\n elif (self.kind!=dob.kind):\n return \"source is a %s but destination is a %s\" % (self.kind,dob.kind) \n if self.parent!=dob.parent:\n return \"source and destination parent mismatch\"\n if self.kind=='album':\n copy_item(self,dob)\n# for st in self.list(parent=self.uid,kind='track',where=\"rating>=0\",orderby=\"uid\"):\n for st in self.list(parent=self.uid,kind='track',orderby=\"uid\"):\n dt=dob.list(parent=dob.uid,kind=\"track\",seq=st.seq) # get corresponding track from dob\n if dt:\n copy_item(st,dt[0])\n elif self.kind=='track':\n copy_item(self,dob)\n else:\n return \"not an album or track...\"\n req.message=\"info copied/moved to %s\" % dob.uid\n return self.view(req)", "def compute_trips(self):\n #_logger.info(TripTemplate.compute_trips.__name__)\n trip_templates = self.search([])\n\n # we delete commercial next week if exist\n for trip_template in trip_templates:\n commercial_trips = trip_template.get_commercial_next_week_trips()\n\n if commercial_trips:\n commercial_trips.unlink()\n\n # we get all the commercial trip_template\n\n #we add the new trip(s)\n for trip_template in trip_templates:\n current_year = trip_template.get_today_year()\n next_week_number = trip_template.get_today_week_number() + 1\n weekday = trip_template.get_trip_day_weekday()\n\n trip_day = trip_template.compute_tripday(current_year, next_week_number, weekday)\n\n customers_ids = [customer.id for customer in trip_template.customer_ids]\n # we create new next week trips\n trip_map = {\n # 'name': f\"TOURNEE DU {trip_day} de {commercial_trip_template.commercial_id.name}\",\n 'name': \"TOURNEE DU {0} de {1}\".format(trip_day, trip_template.commercial_id.name),\n 'trip_day': trip_day,\n 'commercial_id': trip_template.commercial_id.id,\n 'customer_ids': [(6, False, customers_ids)]\n }\n\n record = self.env['sales_trip.trip'].create(trip_map)", "def migration(self):\n\n coordinates = self.get_random_coordinates()\n for coordinate in coordinates:\n if isinstance(self.cells[coordinate], (Jungle, Savannah, Desert)):\n self.cell_move_herbivores(coordinate)\n self.cell_move_carnivores(coordinate)\n\n for coordinate in coordinates:\n if isinstance(self.cells[coordinate], (Jungle, Savannah, Desert)):\n self.cells[coordinate].move_new_animals()", "def parse_poet_poem(self, response):\n poemitem = response.meta['poemitem']\n sresponse = scrapy.Selector(response)\n poemitem['poem_text'] = sresponse.xpath('//div[@property = \"content:encoded\"]//text()').extract()\n poemitem['poem_copyright'] = sresponse.xpath('//div[@class = \"poem-credit\"]//p//text()').extract()\n\n yield poemitem", "def distribute(self, page):\n self.crawl_page(self.keyword, self.since, self.to, page)", "def create_game_order():\n game_match_ups = []\n for match_up in match_ups:\n game_order = GameOrder(\n away_position=AwayPlayPosition.objects.get(name='Player {}'.format(num2words(match_up[0]))),\n home_position=HomePlayPosition.objects.get(name='Player {}'.format(num2words(match_up[1]))),\n name=num2words(len(game_match_ups) + 1),\n )\n game_order.save()\n game_match_ups.append(game_order)\n return game_match_ups" ]
[ "0.6383938", "0.62331283", "0.548556", "0.5451658", "0.52535385", "0.5233169", "0.51950914", "0.5124749", "0.5098735", "0.50874716", "0.5037494", "0.5027248", "0.49692222", "0.4942993", "0.49003536", "0.48962796", "0.48873606", "0.48739573", "0.4872106", "0.48439267", "0.4840501", "0.47641274", "0.47608632", "0.47529632", "0.47334617", "0.47274733", "0.47147793", "0.47033784", "0.47031984", "0.4701653" ]
0.71895266
0
This method scrapes data (bio, url of all poems) from each poet page to continue creating the poet item
def parse_poet(self, response): item = response.meta['item'] sresponse = scrapy.Selector(response) poetdata = sresponse.xpath('//div[@class="view-content"]') #TODO: Clear empty strings from poet item fields item['poet_basicbio'] = poetdata[0].xpath('div/span//text()').extract() item['poet_positions'] = poetdata[0].xpath('div//div/text()').extract() item['poet_posyears'] = poetdata[0].xpath('div//div/span/text()').extract() item['poet_bio'] = sresponse.xpath('//div[@class="field-items"]//p//text()').extract() #this important link goes to the page of poems for each poet poetpoemlink = u''.join(sresponse.xpath('//div[@class="view-footer"]/a/@href').extract()) poet_poems_url = urlparse.urljoin("http://www.poets.org",poetpoemlink) item['poet_poems_url'] = poet_poems_url #PoetItem finishes here yield item #goes to method that parses poems found in the poet_poems_url yield scrapy.Request(url=poet_poems_url, callback=self.parse_poet_poems, meta={'poet_poems_url': poet_poems_url })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poems associated with each poet\n nextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table_poems = sresponse.xpath('//tbody/tr')\n\n #poetry.org does not provide text for all of the poems available, some links are for audio versions only,\n #therefore need to avoid storing poemitems that are not text\n regex = re.compile(r'audio')\n\n for row in table_poems:\n if len(row.xpath('td/a/@href').extract()[0]) > 0 :\n poemlink = u''.join(row.xpath('td/a/@href').extract()[0])\n linktext = str(poemlink)\n if regex.search(linktext) is None:\n if len(row.xpath('td//text()').extract())>0:\n poemitem = PoemItem()\n poemitem['poet_poems_url'] = poet_poems_url\n poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1]\n poemitem['poem_title'] = row.xpath('td//text()').extract()[4]\n poemitem['poem_link'] = urlparse.urljoin(\"http://www.poets.org\",poemlink)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poemlink),\n callback=self.parse_poet_poem, meta={'poemitem': poemitem})\n\n #if more poems on next page, use this method again\n if len(nextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",nextpagelink),\n callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})", "def parse_poet_poem(self, response):\n poemitem = response.meta['poemitem']\n sresponse = scrapy.Selector(response)\n poemitem['poem_text'] = sresponse.xpath('//div[@property = \"content:encoded\"]//text()').extract()\n poemitem['poem_copyright'] = sresponse.xpath('//div[@class = \"poem-credit\"]//p//text()').extract()\n\n yield poemitem", "def parse_movement(self, response):\n movement_name = response.meta['movement_name']\n movement_url = response.meta['movement_url']\n\n sresponse = scrapy.Selector(response)\n\n #Because each movement page contains a table that has maximum of ten rows, we need to go to the next page\n #in order to extract all of the poets associated for each movement\n poetnextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table = sresponse.xpath('//tbody/tr')\n for row in table:\n item = PoetItem()\n item['movement_name'] = movement_name\n item['movement_url'] = movement_url\n if len(row.xpath('td/a/text()').extract())>0:\n item['poet_name'] = row.xpath('td/a/text()').extract()\n if len(row.xpath('td/a/@href').extract())>0:\n #the link is for the poet bio page on poetry.org website\n link = u''.join(row.xpath('td/a/@href').extract())\n item['poet_url'] = urlparse.urljoin(\"http://www.poets.org\",link)\n if len(row.xpath('td/span/text()').extract()) > 0:\n item['poet_dob2'] = row.xpath('td/span/text()').extract()\n if len(row.xpath('td/text()').extract())>0:\n #a poet may be tagged/associated with multiple movements\n item['poet_tags'] = row.xpath('td/text()').extract()\n yield scrapy.Request(url =urlparse.urljoin(\"http://www.poets.org\",link), callback=self.parse_poet,\n meta = {'item': item})\n\n #if more poets on next page, use this method again\n if len(poetnextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poetnextpagelink),\n callback=self.parse_movement, meta = {'movement_name': movement_name,\n 'movement_url':movement_url})", "def read_poems(poet, start, end):\r\n\r\n failed = []\r\n\r\n for i in range(start, end + 1):\r\n url = URL + str(i)\r\n try:\r\n info_dict = process_poem(url)\r\n write_file(poet, info_dict)\r\n if info_dict['multipage']:\r\n keep_going = True\r\n pagenum = 2\r\n while keep_going:\r\n try:\r\n tempurl = url + '&lim=20&pageno=' + str(pagenum)\r\n info_dict = process_poem(tempurl)\r\n print('here')\r\n write_file(poet, info_dict)\r\n pagenum = pagenum + 1\r\n except:\r\n keep_going = False\r\n\r\n except:\r\n failed.append(i)\r\n\r\n print('Failed for %d out of %d pages'%( len(failed), end - start + 1 ), failed)", "def retrieving_data():\n for x in range(1):\n page_number=random.randint(1,500)\n page_num=str(page_number)\n url = 'http://www.tastespotting.com/browse/'+page_num\n req = http.request('GET', url)\n data = BeautifulSoup(req.data,'html.parser')\n for each_div in data.find_all(\"div\", { \"class\": \"trendspotted-item\"}):\n for each_recipe in each_div.find_all('a', href=True):\n \"\"\"links starting with /clicks are the links of recipe to their original sites, so just retrieve those links\"\"\"\n if each_recipe['href'].startswith('/click'):\n retrieving_data.recipe_link=each_recipe['href'][16:-12]\n for each_img in each_recipe.find_all('img', alt=True):\n retrieving_data.recipe_image=each_img['src']\n for each_caption in each_div.find(\"p\", { \"class\": \"photo_caption\"}):\n retrieving_data.recipe_title=each_caption", "def process_poem(url):\r\n\r\n response = get(url)\r\n html_soup = BeautifulSoup(response.text, 'html.parser')\r\n beyts = html_soup.find_all('span', class_ = 'verse')\r\n beyts = [beyt.text for beyt in beyts]\r\n info_dict = process_key_items(html_soup)\r\n info_dict['beyts'] = beyts\r\n\r\n return info_dict", "def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)", "def parse_items(self,response):\n sel = Selector(response)\n item = response.meta['job_item']\n company_item = response.meta['company_item']\n company_item['introduction'] = sel.xpath('//div[@class=\"job-item main-message noborder\"]/div[@class=\"content content-word\"]/text()').extract_first()\n company_item['address'] = sel.xpath('//div[@class=\"company-infor\"]/p/text()').extract_first()\n item['link'] = response.url\n item['requirement'] = sel.xpath('//div[@class=\"content content-word\"][1]/text()').extract_first()\n item['website_id'] = 7\n item['company'] = company_item\n print item\n yield item", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def parse_item(self, response):\n self.check_Tor_time()\n print(\"Looking\", response.url)\n # Create the loader using the response\n l = ItemLoader(item=PropertiesItem(), response=response)\n l.default_output_processor = TakeFirst()\n try:\n self.fill_from_Json(l)\n except Exception as e:\n print('exception->', e)\n print('1')\n for node in response.css('div.padding-phone-only > .padding-small-top'):\n try:\n title = node.xpath('div[1]/h6/text()').extract()\n except Exception as e:\n print 1, e\n print('title:', title)\n try:\n val = node.xpath('div[2]/text()').extract()\n except Exception as e:\n print 2, e\n try:\n if \"code\" in title[0]:\n l.add_value('unique_id', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Bedrooms\" in title[0]:\n l.add_value('property_rooms_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Construction\" in title[0]:\n l.add_value('construction_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Modified\" in title[0]:\n l.add_value('on_site_date', node.xpath('div[2]/time/text()').extract()[0],\n MapCompose(\n lambda i: parse(i, fuzzy=True)))\n print(node.xpath('div[2]/time/text()').extract())\n except Exception as e:\n print 3, e\n print('2')\n # Housekeeping fields\n l.add_value('url', response.url)\n # l.add_value('spider', self.name)\n l.add_value('source', self.allowed_domains[0])\n l.add_value('imported_date', datetime.now())\n l.add_value('asset_type', 'realestate')\n l.add_value('transaction_type', 'commercial')\n tp = response.xpath(\n '//*[@id=\\\"breadCrumbs\\\"]/a[1]/text()').extract()[0]\n print('3')\n if \"Sales\" in tp:\n l.replace_value('property_buy_or_rent', \"sale\")\n else:\n l.replace_value('property_buy_or_rent', \"rent\")\n if \"residential\" in tp:\n l.add_value('category_major', \"residential\")\n elif \"commercial\" in tp:\n l.add_value('category_major', \"commercial\")\n else:\n l.add_value('category_major', \"land\")\n # a = l.load_item()\n # print(a)\n # return\n print('4')\n\n print(l)\n return l.load_item()", "def parse_webpage(self, response):\n item = response.meta['item']\n print(\"Request url {}, actual requested url {}\".format(item['url'], response.request.url))\n # website url\n item['website_url'] = response.request.url\n\n item['name'] = self.guess_company_name(response)\n item['domain'] = self.get_domain(response)\n\n # get website title\n item['website_title'] = self.get_webpage_title(response)\n # get description from website\n item['website_desc'] = self.get_webpage_description(response)\n\n # get keywords from website\n item['keywords'] = self.get_webpage_keywords(response)\n\n # try to get email and phones\n item['email'] = self.extract_email(response)\n item['phone'] = self.extract_phone(response)\n\n if not item['email']:\n # try to get contact info\n # check if there is kontakt link on the page\n item = self.check_webpage_for_contact_details(item, response, \"impressum\")\n\n if not item['email']:\n try:\n # try Contact\n item = self.check_webpage_for_contact_details(item, response, \"kontakt\")\n\n except Exception as e:\n print(\"Exception\", e)\n\n if item['email']:\n item['email'] = item['email'].replace(\"(at)\", \"@\")\n yield item", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def parse_item(self, response):\n NewhouseSpider.crawled_urls.append(response.url)\n item = FocusedScrapyCrawlerItem()\n item['url'] = response.url\n item['link_text'] = response.meta.get('link_text', '') if response.meta else ''\n soup = BeautifulSoup(response.body, 'html.parser')\n\n item['body_p_tags'] = self._getBodyText(soup)\n item['head_title'] = self._getHeadTitle(soup)\n item['last_crawled'] = time.time()\n links = self._getLinks(response, soup)\n\n # get score of the page based upon classifier\n if self.classifier:\n score = self.classifier.score(item['link_text'], item['head_title'], item['body_p_tags'])\n else:\n score = 0.0\n\n item['score'] = score\n yield item\n if score <= 0:\n self.log(\"item={} does not belong to new home so stop crawling\".format(item),\n logging.INFO)\n else:\n for link in links:\n req = Request(link, priority=int(score * 1000000), # after the request is done, run parse_item to train the apprentice\n callback=self.parse_item)\n yield req", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # Clean up template\n template = pywikibot.Page(page.site, template,\n ns=10).title(withNamespace=False)\n # We found the template we were looking for\n if template in self.templateTitles:\n for field, value in fielddict.items():\n field = field.strip()\n value = value.strip()\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(\n u'A claim for %s already exists. Skipping'\n % claim.getID())\n # TODO FIXME: This is a very crude way of dupe\n # checking\n else:\n if claim.getType() == 'wikibase-item':\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))\n continue\n elif claim.getType() == 'string':\n claim.setTarget(value.strip())\n else:\n pywikibot.output(\"%s is not a supported datatype.\" % claim.getType())\n continue\n\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget()))\n item.addClaim(claim)\n # A generator might yield pages from multiple sites\n source = self.getSource(page.site)\n if source:\n claim.addSource(source, bot=True)", "def _scrape(self):", "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))", "def _parse_page_contents (self, page_soup):\n netflix_page_data = self.extract_inline_netflix_page_data(page_soup=page_soup)\n self.user_data = self._parse_user_data(netflix_page_data=netflix_page_data)\n self.esn = self._parse_esn_data(netflix_page_data=netflix_page_data)\n self.api_data = self._parse_api_base_data(netflix_page_data=netflix_page_data)\n self.profiles = self._parse_profile_data(netflix_page_data=netflix_page_data)\n self.log(msg='Found ESN \"' + self.esn + '\"')\n return netflix_page_data", "def parse_details(self, response):\n items = response.xpath(\"//*[@id='all']//div[@class='prdct-box']\")\n for i in items:\n image_url = response.urljoin(i.xpath(\".//div[@class='prdct-box1']/a[1]/@href\").get())\n description = i.xpath(\".//div[@class='prdct-box2']//a[1]/text()\").get()\n item_no = i.xpath(\".//div[@class='prdct-box2']//text()[3]\").get(default='').strip()\n upc = i.xpath(\".//*[contains(text(),'UPC')]/following-sibling::text()[1]\").extract()[0].strip()\n category = i.xpath(\"//*[@id='all']//*[@class='products']/text()\").get()\n case = i.xpath(\".//*[contains(text(),'Case')]/following-sibling::text()[1]\").extract()[0]\n yield {\n \"VENDORID\":1068,\n \"VENDOR\":'UPD',\n \"ITEMNO\":item_no,\n \"UPC\":upc,\n \"CATEGORY\":category,\n \"DESCRIPTION\":description,\n \"IMAGE_URL\":image_url,\n \"CASEPACK\":case,\n \"PAGE_TITLE\":response.css('title::text').get(),\n \"PAGE_URL\":response.request.url\n }\n\n next_page = response.xpath(\"//p[@class='page-num']//a/@href\").extract()\n if next_page is not None:\n for n in next_page:\n next_page_url = response.urljoin(n)\n yield scrapy.Request(next_page_url, callback=self.parse_details)", "def parse(self, response):\n for sel in response.xpath('//*[@id=\"J_goodsList\"]/ul/li[@class=\"gl-item\"]'):\n \"\"\"iterate all items in this page\"\"\"\n sku = sel.xpath('.//@data-sku').extract_first()\n price = float(sel.xpath('.//div/div[3]/strong/i/text()').extract_first())\n name = ''.join(sel.xpath('.//div/div[4]/a/em/descendant-or-self::node()/text()').extract())\n seller = sel.xpath('.//div/div[7]/span/a/text()').extract_first()\n sku_url = \"http:\" + sel.xpath('.//div/div[1]/a/@href').extract_first()\n\n yield Request(sku_url,\n callback=self.parse_item,\n meta = {'sku' : sku,\n 'price' : price,\n 'name' : name,\n 'seller' : seller})\n #make the request of individual page", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # We found the template we were looking for\n if template.replace(u'_', u' ')==self.templateTitle:\n for field, value in fielddict.items():\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))\n #TODO FIXME: This is a very crude way of dupe checking\n else:\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))\n item.addClaim(claim)\n if self.source:\n claim.addSource(self.source, bot=True)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def parse(self, response):\n content = response.body\n if not content:\n return\n sel = Selector(response)\n #print sel.xpath('//table[@class=\"board-list tiz\"]/tr').extract()\n for job in sel.xpath('//ul[@class=\"sojob-list\"]/li'):\n #print 'd',job\n info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"job-info\"]')\n com_info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"company-info nohover\"]')\n title = info.xpath('h3/a/text()').extract_first().lower()\n if title.find('python') != -1:\n url = info.xpath('h3/a/@href').extract_first()\n request = scrapy.Request(url=url,\n callback=self.parse_items,\n headers=self.spider.headers,\n cookies=self.cookies)\n company_item, job_item = CompanyItem(), JobItem()\n company_item['name'] = com_info.xpath('p[@class=\"company-name\"]/a/text()').extract_first()\n company_item['homepage'] = com_info.xpath('p[@class=\"company-name\"]/a/@href').extract_first()\n job_item['pub_time'] = info.xpath('p[@class=\"time-info clearfix\"]/time/text()').extract_first()\n year = str(date.today().year)\n if str(year) not in job_item['pub_time']:\n if job_item['pub_time'] == u'昨天':\n job_item['pub_time'] = (date.today()-timedelta(days=1)).strftime(\"%Y-%m-%d\")\n elif job_item['pub_time'] == u'前天':\n job_item['pub_time'] = (date.today() - timedelta(days=2)).strftime(\"%Y-%m-%d\")\n else:\n job_item['pub_time'] = date.today().strftime(\"%Y-%m-%d\")\n job_item['title'] = title\n job_item['welfare'] = ' '.join(com_info.xpath('p[@class=\"temptation clearfix\"]/span/text()').extract())\n job_item['salary'] = info.xpath('p[@class=\"condition clearfix\"]/span[@class=\"text-warning\"]/text()').extract_first()\n request.meta['company_item'] = company_item\n request.meta['job_item'] = job_item\n yield request", "def _getParentPage(self):\n page = {}\n tag=[]\n \n data= self.soup.findAll('div','span8')\n for d in data:\n tag=d.findAll('div','pd-comment')\n \n try:\n # page['title'] = stripHtml(self.soup.find('div','breadcrumbs')\\\n # .findAll('a')[-1].renderContents())\n for t in tag:\n title=(t.find('h4'))\n page['title'] = title\n log.info(self.log_msg(\"title:%s\"%page['title']))\n except:\n log.exception(self.log_msg(\"Title not fetched\"))\n return False\n \n try:\n#==============================================================================\n# rating_tag = self.soup.find('div','reviews-ratingcombined')\n# page['ef_product_rating_overall'] = float(rating_tag.b.renderContents())\n# for each in rating_tag.findParent('div').findAll('div','reviews-rating'):\n# key = 'ef_product_rating_' + stripHtml(each.label.renderContents\\\n# ()).lower().split('/')[0].replace(' ','_')\n# page[key] = float(each.b.renderContents())\n#==============================================================================\n for r in tag:\n rating_tag=(r.find('div','badge pd-review-score')).replace('Overall','')\n page['rating_tag'] = rating_tag\n \n except:\n log.exception(self.log_msg(\"Specifications not found!!\"))\n \n try:\n self.updateParentExtractedEntities(page) \n if checkSessionInfo(self.genre, self.session_info_out, \\\n self.task.instance_data['uri'],self.task.instance_data.get('update')):\n log.info(self.log_msg('Check Session info return True'))\n return False\n result = updateSessionInfo(self.genre, self.session_info_out,\\\n self.task.instance_data['uri'], get_hash(page) ,'Post',\\\n self.task.instance_data.get('update'))\n if not result['updated']:\n return False\n page['uri'] = self.task.instance_data['uri']\n page['data'] = ''\n page['path'] = [self.task.instance_data['uri']]\n page['parent_path'] = []\n page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1])\n page['priority'] = self.task.priority\n page['level'] = self.task.level\n page['last_updated_time'] = page['posted_date'] = page['pickup_date'] = \\\n datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\n page['connector_instance_log_id'] = self.task.connector_instance_log_id\n page['connector_instance_id'] = self.task.connector_instance_id\n page['workspace_id'] = self.task.workspace_id\n page['client_id'] = self.task.client_id\n page['client_name'] = self.task.client_name\n page['versioned'] = False\n page['task_log_id'] = self.task.id\n page['entity'] = 'Post'\n page['category'] = self.task.instance_data.get('category','')\n self.pages.append(page)\n log.info(self.log_msg('Parent Page added'))\n return True\n except:\n log.exception(self.log_msg(\"Exception while adding parent Page info\"))\n return False", "def grab_mApe_results (searchType) :\n\n mape_main_url = 'https://www.mightyape.co.nz/'\n #Defining the url paths for search types\n mape_mv_category_url = 'movies-tv/movies?q='\n mape_mv_format_search_url = 'movieformat~blu-ray'\n\n #This is the final url string\n searchUrl = ''\n\n #Checking search type\n if searchType is SEARCH_BD_MV_TYPE :\n searchUrl = mape_main_url+mape_mv_category_url+mape_mv_format_search_url\n elif searchType is 'Title' :\n searchUrl = 'https://www.mightyape.co.nz/movies-tv/movies/all?sort=2&q=movieformat~blu-ray'\n\n\n #Using a dictionary to store data, as contains list with objects\n mape_list = {}\n\n page = requests.get(searchUrl)\n tree = html.fromstring(page.content)\n\n data = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]/div[@class=\"title\"]/a') #<--- WORKS\n\n data_alt = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]')\n\n print('Getting results from url:',searchUrl)\n print('Number of objects=',len(data_alt))\n count = 1\n\n for item in data_alt :\n simple_item = item.xpath('div[@class=\"title\"]/a')\n title = simple_item[0].text\n link = simple_item[0].get('href')\n format = item.xpath('div[@class=\"format\"]/text()')\n rating = item.xpath('div[@class=\"customer-rating\"]/span/span[@class=\"average\"]/text()')\n base_price = item.xpath('div[@class=\"price\"]/s/text()')\n hot_price = item.xpath('div[@class=\"price\"]/span[@class=\"price hot\"]/text()')\n normal_price = item.xpath('div[@class=\"price\"]/span[@class=\"price\"]/text()')\n if len(rating) > 0 :\n #temp_mv = Movie_object(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n print(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n #mape_list[title] = temp_mv\n else :\n print(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n #temp_mv = Movie_object(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n #mape_list[title] = temp_mv\n\n\n count += 1\n\n return mape_list", "def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list", "def parse(self, response):\n self.driver.get(response.url)\n product_category=response.meta[\"category_text\"]\n products=response.xpath(\"//*[(@class='list-item')]\")\n \n # item containers for storing product\n items = CrawlingECommerceItem()\n \n # iterating over search results\n # for product in products:\n # # Defining the XPaths\n # XPATH_PRODUCT_LINK=\".//*[contains(concat( ' ', @class, ' ' ), concat( ' ', 'goods-tit', ' ' ))]//a\"\n # XPATH_PRODUCT_NAME=\".//div[@class='goods-introudce']//a/@href\"\n # XPATH_PRODUCT_PRICE=\".//div[@class='catalog-detail']//div[@class='detail-right']//p/text()\"\n # XPATH_PRODUCT_IMAGE_LINK=\".//img\"\n\n # raw_product_name=product.xpath(XPATH_PRODUCT_NAME).get()\n # raw_product_price=product.xpath(XPATH_PRODUCT_PRICE).get()\n # raw_product_image_link=product.xpath(XPATH_PRODUCT_IMAGE_LINK).extract()\n # raw_product_link=product.xpath(XPATH_PRODUCT_LINK).get()\n\n # # cleaning the data\n # product_name=''.join(raw_product_name).strip(\n # ) if raw_product_name else None\n # product_price=''.join(raw_product_price).strip(\n # ) if raw_product_price else None\n # product_image_link=''.join(raw_product_image_link).strip(\n # ) if raw_product_image_link else None\n # product_link=''.join(raw_product_link).strip(\n # ) if raw_product_link else None\n\n # # storing item\n # yield CrawlingECommerceItem (\n # product_name=product_name,\n # product_price=product_price,\n # product_url=product_link,\n # product_category=product_category,\n # image_urls=raw_product_image_link\n # )\n\n # # yield items\n \n # XPATH_PRAGINATION_LINK=\"//*[(@class='next right')]/a/@href\"\n\n yield response.follow(str(response.request.url), callback = self.parse, meta = {\"category_text\": product_category})", "def scrap_data_companies(self):\n list_job_offers = self.driver.find_elements_by_class_name(\n \"jobContainer\")\n jobs = []\n if len(list_job_offers) == 0:\n print(\"There is nothing to scrap for \", conf.URL_TO_SCRAPE,\n \"that was requested\")\n return\n\n for i, elt in enumerate(list_job_offers):\n\n self.remove_sign_up_prompt()\n self.remove_recommended_jobs()\n html_job_container = elt.get_attribute('innerHTML')\n time.sleep(2)\n name_company = get_name_company(elt.text)\n city_job = get_city_job(html_job_container)\n job_id = get_job_id(html_job_container)\n position_job = get_position(html_job_container)\n job_description = get_summary_job(position_job)\n\n if job_id is not None and name_company is not None:\n company = Company.Company(name_company)\n company_and_id_job = name_company + \"-\" + job_id\n self.current_path = os.path.join(self.date_path,\n company_and_id_job)\n os.mkdir(self.current_path)\n\n if i != 0:\n click_on_job_offer(\n elt) # link since we are already seeing it\n\n self.scrape_data_company(elt, company)\n company_id = company.insert_to_db(self.db_connection)\n job = JobOffer.JobOffer(job_id, company=company, city=city_job,\n position=position_job,\n description=job_description)\n job.insert_to_db(company_id, self.db_connection)\n jobs.append(job)\n print(job)\n else:\n logger.error(\"Job Id not found\")\n JobOffer.print_jobs(jobs)", "def mine(self):\n collections = []\n # Getting HTML snapshot with selenium, storing a soup object in .data\n self.scrape()\n # Returns only the parts of the soup that surround each collection\n collection_elements = self.get_collection_elements()\n # Turns each soup element into a CollectionElement object\n collections = self.get_info_from_collections(collection_elements)\n # NOTE THE RETURN VALUE IS MERELY TO PASS TESTING< MUST BE CHANGED\n return self.data" ]
[ "0.7948013", "0.68461853", "0.67603666", "0.66625357", "0.61643696", "0.6150909", "0.5885224", "0.5870858", "0.58600146", "0.578315", "0.5738709", "0.57362324", "0.57036006", "0.5684646", "0.5677119", "0.5675798", "0.56603056", "0.565232", "0.5634309", "0.56316173", "0.56219465", "0.55369085", "0.5535251", "0.55215037", "0.55155706", "0.5512849", "0.55126154", "0.55071676", "0.5492794", "0.54858273" ]
0.82065254
0
This method parses the poems found in the page of all poems available for a specific poet The poet poems url is the foreign key to poets collection
def parse_poet_poems(self, response): poet_poems_url = response.meta['poet_poems_url'] sresponse = scrapy.Selector(response) #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next # page in order to extract all of the poems associated with each poet nextpagelink = u''.join(sresponse.xpath('//a[@title = "Go to next page"]/@href').extract()) table_poems = sresponse.xpath('//tbody/tr') #poetry.org does not provide text for all of the poems available, some links are for audio versions only, #therefore need to avoid storing poemitems that are not text regex = re.compile(r'audio') for row in table_poems: if len(row.xpath('td/a/@href').extract()[0]) > 0 : poemlink = u''.join(row.xpath('td/a/@href').extract()[0]) linktext = str(poemlink) if regex.search(linktext) is None: if len(row.xpath('td//text()').extract())>0: poemitem = PoemItem() poemitem['poet_poems_url'] = poet_poems_url poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1] poemitem['poem_title'] = row.xpath('td//text()').extract()[4] poemitem['poem_link'] = urlparse.urljoin("http://www.poets.org",poemlink) yield scrapy.Request(url = urlparse.urljoin("http://www.poets.org",poemlink), callback=self.parse_poet_poem, meta={'poemitem': poemitem}) #if more poems on next page, use this method again if len(nextpagelink) > 0: yield scrapy.Request(url = urlparse.urljoin("http://www.poets.org",nextpagelink), callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_poet(self, response):\n item = response.meta['item']\n\n sresponse = scrapy.Selector(response)\n poetdata = sresponse.xpath('//div[@class=\"view-content\"]')\n\n #TODO: Clear empty strings from poet item fields\n\n item['poet_basicbio'] = poetdata[0].xpath('div/span//text()').extract()\n item['poet_positions'] = poetdata[0].xpath('div//div/text()').extract()\n item['poet_posyears'] = poetdata[0].xpath('div//div/span/text()').extract()\n item['poet_bio'] = sresponse.xpath('//div[@class=\"field-items\"]//p//text()').extract()\n\n #this important link goes to the page of poems for each poet\n poetpoemlink = u''.join(sresponse.xpath('//div[@class=\"view-footer\"]/a/@href').extract())\n poet_poems_url = urlparse.urljoin(\"http://www.poets.org\",poetpoemlink)\n\n item['poet_poems_url'] = poet_poems_url\n\n #PoetItem finishes here\n yield item\n\n #goes to method that parses poems found in the poet_poems_url\n yield scrapy.Request(url=poet_poems_url, callback=self.parse_poet_poems, meta={'poet_poems_url': poet_poems_url })", "def process_poem(url):\r\n\r\n response = get(url)\r\n html_soup = BeautifulSoup(response.text, 'html.parser')\r\n beyts = html_soup.find_all('span', class_ = 'verse')\r\n beyts = [beyt.text for beyt in beyts]\r\n info_dict = process_key_items(html_soup)\r\n info_dict['beyts'] = beyts\r\n\r\n return info_dict", "def read_poems(poet, start, end):\r\n\r\n failed = []\r\n\r\n for i in range(start, end + 1):\r\n url = URL + str(i)\r\n try:\r\n info_dict = process_poem(url)\r\n write_file(poet, info_dict)\r\n if info_dict['multipage']:\r\n keep_going = True\r\n pagenum = 2\r\n while keep_going:\r\n try:\r\n tempurl = url + '&lim=20&pageno=' + str(pagenum)\r\n info_dict = process_poem(tempurl)\r\n print('here')\r\n write_file(poet, info_dict)\r\n pagenum = pagenum + 1\r\n except:\r\n keep_going = False\r\n\r\n except:\r\n failed.append(i)\r\n\r\n print('Failed for %d out of %d pages'%( len(failed), end - start + 1 ), failed)", "def parse_poet_poem(self, response):\n poemitem = response.meta['poemitem']\n sresponse = scrapy.Selector(response)\n poemitem['poem_text'] = sresponse.xpath('//div[@property = \"content:encoded\"]//text()').extract()\n poemitem['poem_copyright'] = sresponse.xpath('//div[@class = \"poem-credit\"]//p//text()').extract()\n\n yield poemitem", "def parse_movement(self, response):\n movement_name = response.meta['movement_name']\n movement_url = response.meta['movement_url']\n\n sresponse = scrapy.Selector(response)\n\n #Because each movement page contains a table that has maximum of ten rows, we need to go to the next page\n #in order to extract all of the poets associated for each movement\n poetnextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table = sresponse.xpath('//tbody/tr')\n for row in table:\n item = PoetItem()\n item['movement_name'] = movement_name\n item['movement_url'] = movement_url\n if len(row.xpath('td/a/text()').extract())>0:\n item['poet_name'] = row.xpath('td/a/text()').extract()\n if len(row.xpath('td/a/@href').extract())>0:\n #the link is for the poet bio page on poetry.org website\n link = u''.join(row.xpath('td/a/@href').extract())\n item['poet_url'] = urlparse.urljoin(\"http://www.poets.org\",link)\n if len(row.xpath('td/span/text()').extract()) > 0:\n item['poet_dob2'] = row.xpath('td/span/text()').extract()\n if len(row.xpath('td/text()').extract())>0:\n #a poet may be tagged/associated with multiple movements\n item['poet_tags'] = row.xpath('td/text()').extract()\n yield scrapy.Request(url =urlparse.urljoin(\"http://www.poets.org\",link), callback=self.parse_poet,\n meta = {'item': item})\n\n #if more poets on next page, use this method again\n if len(poetnextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poetnextpagelink),\n callback=self.parse_movement, meta = {'movement_name': movement_name,\n 'movement_url':movement_url})", "def parse_apartment_urls(self):\n\n # Generate soup for starting page\n soup = generate_soup(self.start_url)\n\n # Empties the urls list, in case it wasn't before\n self.apartment_urls = []\n\n # Get apartments in current page and store\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls\n\n # Check if there are more page to pull from\n while self.list_has_next_page(soup):\n soup = self.list_get_next_page(soup)\n\n # Get apartments in current page\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def test_poets_get(self):\n pass", "def shelterGetPets(url, URL_JSON_KEY, shelter_id):\n \n method = \"shelter.getPets?\"\n count = \"&count=100\"\n url+= method + URL_JSON_KEY + shelter_id + count\n petJson = urlopen(url)\n petInfo = load(reader(petJson))\n return petInfo", "def _get_apt_urls_per_page(self, soup):\n\n # identify the tag that contains apt URL\n apartments = soup.find_all('div', class_='listing-item__tab-content')\n apt_urls = [apt.find('a')['href'] for apt in apartments]\n # formulate a complete apartment URL\n apt_urls = [f'{CONST.ELLIMAN_HEADER}{url}' for url in apt_urls]\n return apt_urls", "def pizza():\n url = str(request.args.get('url'))\n html = urlopen(url).read()\n pizzas = parseHtml(html)\n return pizzas", "def parsing_all_page(url):\n html_doc = get_html(url)\n# html_doc = get_html_local()\n page_count = get_html_count(html_doc)\n print 'All have find pages %d' % page_count\n\n projects = []\n\n for page in range(1, page_count + 1):\n print 'Parsing %d%%' % (page*100/page_count)\n\n url = BASE_URL + '?page=%d' % page\n projects.extend(process_page(url))\n\n return projects", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def parse_listing(keyword,place):\n\turl = \"https://www.yellowpages.com/search?search_terms={0}&geo_location_terms={1}\".format(keyword,place)\n\tprint(\"retrieving \",url)\n\n\theaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n\t\t\t\t'Accept-Encoding':'gzip, deflate, br',\n\t\t\t\t'Accept-Language':'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n\t\t\t\t'Cache-Control':'max-age=0',\n\t\t\t\t'Connection':'keep-alive',\n\t\t\t\t'Host':'www.yellowpages.com',\n\t\t\t\t'Upgrade-Insecure-Requests':'1',\n\t\t\t\t'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n\t\t\t}\n\t# Adding retries\n\tfor retry in range(10):\n\t\ttry:\n\t\t\tresponse = requests.get(url,verify=False, headers = headers )\n\t\t\tprint(\"parsing page\")\n\t\t\tif response.status_code==200:\n\t\t\t\tparser = html.fromstring(response.text)\n\t\t\t\t#making links absolute\n\t\t\t\tbase_url = \"https://www.yellowpages.com\"\n\t\t\t\tparser.make_links_absolute(base_url)\n\n\t\t\t\tXPATH_LISTINGS = \"//div[@class='search-results organic']//div[@class='v-card']\"\n\t\t\t\tlistings = parser.xpath(XPATH_LISTINGS)\n\t\t\t\tscraped_results = []\n\n\t\t\t\tfor results in listings:\n\t\t\t\t\tXPATH_BUSINESS_NAME = \".//a[@class='business-name']//text()\"\n\n\t\t\t\t\tXPATH_WEBSITE = \".//div[@class='info']//div[contains(@class,'info-section')]//div[@class='links']//a[contains(@class,'website')]/@href\"\n\n\t\t\t\t\traw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n\n\t\t\t\t\traw_website = results.xpath(XPATH_WEBSITE)\n\n\n\t\t\t\t\tbusiness_name = ''.join(raw_business_name).strip() if raw_business_name else None\n\n\t\t\t\t\twebsite = ''.join(raw_website).strip() if raw_website else None\n\n\n\n\n\n\t\t\t\t\tbusiness_details = {\n\t\t\t\t\t\t\t\t\t\t'business_name':business_name,\n\n\t\t\t\t\t\t\t\t\t\t'website':website\n\n\t\t\t\t\t}\n\t\t\t\t\tscraped_results.append(business_details)\n\t\t\t\t\tprint(scraped_results)\n\t\t\t\treturn scraped_results\n\n\t\t\telif response.status_code==404:\n\t\t\t\tprint(\"Could not find a location matching\",place)\n\t\t\t\t#no need to retry for non existing page\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Failed to process page\")\n\t\t\t\treturn []\n\n\t\texcept:\n\t\t\tprint(\"Failed to process page\")\n\t\t\treturn []", "def sentientPlanets():\n\n url = \"https://swapi-api.hbtn.io/api/species/\"\n planets = []\n while url is not None:\n r = requests.get(url)\n results = r.json()[\"results\"]\n for specie in results:\n if (specie[\"designation\"] == \"sentient\" or\n specie[\"classification\"] == \"sentient\"):\n\n planet_url = specie[\"homeworld\"]\n if planet_url is not None:\n p = requests.get(planet_url).json()\n planets.append(p[\"name\"])\n url = r.json()[\"next\"]\n return planets", "def parse(self, response):\n product_urls = response.css('.lpPLink::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n variety_urls = response.css('.elementContent a::attr(href)').getall()\n for variety_url in variety_urls:\n yield scrapy.Request(response.urljoin(variety_url))\n\n # TODO: mêmes opérations que précédemment, seule la classe change\n variety_urls = response.css('.elementTitle a::attr(href)').getall()\n for variety_url in variety_urls:\n yield scrapy.Request(response.urljoin(variety_url))", "def parse_single_page(self, url):\n\n logging.info(\"Parsing %s\", url)\n\n # Generate a soup instance for this url\n soup = generate_soup(self.base_url_apartments + url)\n\n # Dictionary to store data in\n apartment_dict = {\n 'url': url,\n 'name': 0,\n 'address': 0,\n 'bedrooms': 0,\n 'bathrooms': 0,\n 'price': 0,\n 'leasing_period': 0,\n 'description': 0,\n 'amenities': 0,\n 'image_urls': 0,\n 'floorplan_url': 0,\n 'lat': 0,\n 'lng': 0\n }\n\n # Parse the page for the relevant information\n self.get_apartment_name(soup, apartment_dict)\n self.get_apartment_address(soup, apartment_dict)\n self.get_apartment_stats(soup, apartment_dict)\n self.get_apartment_description(soup, apartment_dict)\n self.get_apartment_amenities(soup, apartment_dict)\n self.get_apartment_images(soup, apartment_dict)\n self.get_apartment_floorplan(soup, apartment_dict)\n self.get_apartment_latlng(soup, apartment_dict)\n\n # Check if we failed to find any of the parameters\n skip=False\n for key, value in apartment_dict.iteritems():\n if value == 0:\n logging.warn(\"Failed parsing %s\", key)\n if key == 'lat' or key == 'lng':\n skip = True\n\n print(apartment_dict)\n # Store apartment data in list\n if skip is False:\n self.apartment_data.append(apartment_dict)", "def parse_page(url):\n\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n\n listings = []\n\n # Loop throuhg all prices\n for offer in soup.findAll(\"div\", {\"class\": \"regular-ad\"}): # Scan regular-ad class to avoid featured ads realted to Kijiji Ads\n \n current_listing_dict = {}\n\n # Parse title\n title_list = offer.find_all(href=True)[0].text.split(\" \")\n title = [i for i in title_list if i]\n title = \" \".join(title).rstrip().strip(\"\\n\").strip(\" \")\n\n # Append title to dict\n current_listing_dict['title'] = title\n\n # Parse price\n price = \"\".join(offer.findAll(\"div\", {\"class\": \"price\"})[0].text.split(\" \")).rstrip().strip('\\n')\n\n if '$' in price:\n price = price.split('$')[-1].replace(',','')\n\n # Append price to dict\n current_listing_dict['price'] = price\n \n # Parse link\n link = offer.find_all(href=True)[0]['href']\n\n # Append link to dict\n current_listing_dict['link'] = link\n\n # Append to global listings list\n listings.append(current_listing_dict)\n\n return listings", "def get_posts(self, url=None):\n if not url:\n url = self.base_url\n\n self.log.debug(\"Getting URL: %s\", url)\n page_data = json.loads(urlopen(url).read().decode(\"utf-8\"))\n\n for post in page_data.get(\"data\", []):\n if \"message\" not in post:\n continue\n\n for word in self.keywords:\n if word in post[\"message\"]:\n self.log.debug(\"Emitting post: %s\", post[\"id\"])\n yield post\n break\n\n paging = page_data.get(\"paging\", {})\n\n if \"next\" in paging:\n for post in self.get_posts(paging[\"next\"]):\n yield post\n\n return", "def parse(self, url):\n pass", "def _get_apt_urls_per_page(self,\n pg_num,\n sales_type,\n htype=['house', \n 'multi-family']):\n\n if sales_type.lower() == 'buy':\n # only buy section cares about house type\n webpage = self._get_buy_webpage(pg_num, htype)\n\n if sales_type.lower() == 'rent':\n webpage = self._get_rent_webpage(pg_num)\n\n if sales_type.lower() == 'sold':\n webpage = self._get_sold_webpage(pg_num)\n\n browser = self._browser\n browser.get(webpage)\n time.sleep(3)\n try:\n robot_check = browser.find_element_by_xpath(\"//div[@class='content center']\")\n if 'I am not a robot' in robot_check.text:\n self._recaptcha(browser)\n except:\n pass\n \n # main content tag, need to be constantly updated\n apt_class = 'PropertyCard__PropertyCardContainer-sc-1ush98q-2 gKJaNz Box-sc-8ox7qa-0 jIGxjA'\n apt_tags = browser.find_elements_by_xpath(\"//div[@class='PropertyCard__PropertyCardContainer-sc-1ush98q-2 gKJaNz Box-sc-8ox7qa-0 jIGxjA']\")\n\n # scrape all the apartment URLs\n apt_link_tags = [tag.find_element_by_tag_name('a') for tag in apt_tags]\n apt_urls = [f\"{tag.get_attribute('href')}\" for tag in apt_link_tags]\n return apt_urls", "def get_pagetree(url):\n response = requests.get(url)\n pagetree = html.fromstring(response.text)\n return pagetree", "def parse_listing(keyword, place):\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}\".format(keyword, place)\n print(\"retrieving \", url)\n\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.paginegialle.it',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n }\n try:\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page\")\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n else:\n print(\"Failed to process page exit with no results exit code: 213\")\n return []\n except:\n print(\"Failed to process page exit with no results exit code: 222\")\n return []\n\n XPATH_RESULTS = \"//div[@class=' container containerListato ']//span[@class='searchResNum']//text()\"\n raw_RESULTS = listings[0].xpath(XPATH_RESULTS)\n resultsn = ''.join(raw_RESULTS).strip().replace(\"risultati\",\"\") if raw_RESULTS else None\n print(\"results found for query {0} {1} - {2}\".format(keyword,place,resultsn))\n page_number = int(int(resultsn)/20) #20 is the number of result for single web page\n print(\"number of web page to parse: {0}\".format(page_number))\n\n scraped_results = []\n if page_number == 1 or page_number == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n return scraped_results\n if page_number > 1: \n for retry in range(page_number):\n if retry == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n else:\n time.sleep(5)\n try:\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}/p-{2}\".format(keyword,place,retry)\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page {0}\".format(retry))\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n break\n else:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results\n\n except:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results \n return scraped_results", "def parse_url(self, url: str):\n time.sleep(0.1)\n resp = requests.get(url, timeout=5).content.decode('windows-1250')\n selector = Selector(text=resp)\n name_addresses = []\n if not self.is_right_page(selector):\n return []\n\n company = self.parse_business_name(selector)\n name_addresses += self.parse_management_body(selector)\n name_addresses += self.parse_partners(selector)\n\n ret = []\n for name_address in name_addresses:\n name_address = [re.sub(r'[\",;]', '', n).strip() for n in name_address]\n print(\"Found name: \", name_address)\n is_russian = self.RUSSIA in name_address[1]\n ret.append([re.sub(r'[\",;]', '', company).strip()] + name_address + [is_russian])\n return ret", "def summarize_page(url, sent_count=default_sents, kp_count=default_kp):\n import bs4\n import requests\n\n try:\n data = requests.get(url).text\n soup = bs4.BeautifulSoup(data, \"html.parser\")\n # Find the tag with most paragraph tags as direct children\n body = max(soup.find_all(),\n key=lambda tag: len(tag.find_all('p', recursive=False)))\n\n paragraphs = map(lambda p: p.text, body('p'))\n text = '\\n'.join(paragraphs)\n return summarize(text, sent_count, kp_count)\n except Exception as e:\n return \"Something went wrong: {}\".format(str(e)), []", "def _get_apt_urls_per_page(self, pg_num):\n\n # get the URL for the specific page given its page number \n pg_url = self._get_page_url(pg_num)\n response = requests.get(pg_url)\n # scrape the HTML web content from rent.com\n results = response.content \n # a list that contains all the apartment URLs\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n apts = soup.find_all('a', attrs={'data-tid': 'property-title'})\n apt_urls = [apt['href'] for apt in apts]\n\n return apt_urls", "def parse_page(self, page):\n if self.domain == extract_domain(page[\"url\"]) and page[\"valid_content_type\"]:\n parent = page[\"url\"]\n parser = Parser(self.config)\n links = parser.feed_me(page[\"data\"])\n new_links = [x for x in links if x not in self.visited]\n full_links = [parse.urljoin(parent, l) for l in new_links]\n for l in full_links:\n if l not in self.visited:\n li = {\"parent\": parent, \"url\": l}\n self.TO_PROCESS.put(li)", "def _visit_paragraph(self,elem):\n # only add this p if we don't already have a descriptor for the site\n if self._curr_url not in self._url_paragraphs:\n try:\n paragraph_text = self._text_of_para(elem).strip()\n paragraph_text = strip_tags(paragraph_text)\n paragraph_text = (paragraph_text[:1001] + '...') if len(paragraph_text) > 1000 else paragraph_text\n self._url_paragraphs[self._curr_url] = paragraph_text\n print \"description of url:\" + repr(paragraph_text)\n except:\n print \"Failed to get paragraph text\"", "def get_webpage(url):\n response = requests.get(url)\n data = response.text\n soup = BeautifulSoup(data, features=\"html.parser\")\n\n for tag in INVALID_TAGS:\n for match in soup.findAll(tag):\n match.replaceWithChildren()\n\n paragraphs = [str.join('', paragraph.children) for paragraph in soup.findAll('p')]\n return paragraphs", "def retrieving_data():\n for x in range(1):\n page_number=random.randint(1,500)\n page_num=str(page_number)\n url = 'http://www.tastespotting.com/browse/'+page_num\n req = http.request('GET', url)\n data = BeautifulSoup(req.data,'html.parser')\n for each_div in data.find_all(\"div\", { \"class\": \"trendspotted-item\"}):\n for each_recipe in each_div.find_all('a', href=True):\n \"\"\"links starting with /clicks are the links of recipe to their original sites, so just retrieve those links\"\"\"\n if each_recipe['href'].startswith('/click'):\n retrieving_data.recipe_link=each_recipe['href'][16:-12]\n for each_img in each_recipe.find_all('img', alt=True):\n retrieving_data.recipe_image=each_img['src']\n for each_caption in each_div.find(\"p\", { \"class\": \"photo_caption\"}):\n retrieving_data.recipe_title=each_caption" ]
[ "0.76509404", "0.6316497", "0.6232622", "0.60066766", "0.59613264", "0.55365926", "0.5433117", "0.5388524", "0.53653985", "0.5345765", "0.5338251", "0.5269565", "0.52671176", "0.5202305", "0.51900584", "0.51822567", "0.51615363", "0.5134702", "0.5116707", "0.5097585", "0.509344", "0.50851935", "0.5079829", "0.50709045", "0.5043738", "0.50319874", "0.50241995", "0.49855113", "0.49777198", "0.4975725" ]
0.80227727
0
This method parses each poem on poem pages and finally yields the poemitems
def parse_poet_poem(self, response): poemitem = response.meta['poemitem'] sresponse = scrapy.Selector(response) poemitem['poem_text'] = sresponse.xpath('//div[@property = "content:encoded"]//text()').extract() poemitem['poem_copyright'] = sresponse.xpath('//div[@class = "poem-credit"]//p//text()').extract() yield poemitem
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poems associated with each poet\n nextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table_poems = sresponse.xpath('//tbody/tr')\n\n #poetry.org does not provide text for all of the poems available, some links are for audio versions only,\n #therefore need to avoid storing poemitems that are not text\n regex = re.compile(r'audio')\n\n for row in table_poems:\n if len(row.xpath('td/a/@href').extract()[0]) > 0 :\n poemlink = u''.join(row.xpath('td/a/@href').extract()[0])\n linktext = str(poemlink)\n if regex.search(linktext) is None:\n if len(row.xpath('td//text()').extract())>0:\n poemitem = PoemItem()\n poemitem['poet_poems_url'] = poet_poems_url\n poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1]\n poemitem['poem_title'] = row.xpath('td//text()').extract()[4]\n poemitem['poem_link'] = urlparse.urljoin(\"http://www.poets.org\",poemlink)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poemlink),\n callback=self.parse_poet_poem, meta={'poemitem': poemitem})\n\n #if more poems on next page, use this method again\n if len(nextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",nextpagelink),\n callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})", "def parse_poet(self, response):\n item = response.meta['item']\n\n sresponse = scrapy.Selector(response)\n poetdata = sresponse.xpath('//div[@class=\"view-content\"]')\n\n #TODO: Clear empty strings from poet item fields\n\n item['poet_basicbio'] = poetdata[0].xpath('div/span//text()').extract()\n item['poet_positions'] = poetdata[0].xpath('div//div/text()').extract()\n item['poet_posyears'] = poetdata[0].xpath('div//div/span/text()').extract()\n item['poet_bio'] = sresponse.xpath('//div[@class=\"field-items\"]//p//text()').extract()\n\n #this important link goes to the page of poems for each poet\n poetpoemlink = u''.join(sresponse.xpath('//div[@class=\"view-footer\"]/a/@href').extract())\n poet_poems_url = urlparse.urljoin(\"http://www.poets.org\",poetpoemlink)\n\n item['poet_poems_url'] = poet_poems_url\n\n #PoetItem finishes here\n yield item\n\n #goes to method that parses poems found in the poet_poems_url\n yield scrapy.Request(url=poet_poems_url, callback=self.parse_poet_poems, meta={'poet_poems_url': poet_poems_url })", "def read_poems(poet, start, end):\r\n\r\n failed = []\r\n\r\n for i in range(start, end + 1):\r\n url = URL + str(i)\r\n try:\r\n info_dict = process_poem(url)\r\n write_file(poet, info_dict)\r\n if info_dict['multipage']:\r\n keep_going = True\r\n pagenum = 2\r\n while keep_going:\r\n try:\r\n tempurl = url + '&lim=20&pageno=' + str(pagenum)\r\n info_dict = process_poem(tempurl)\r\n print('here')\r\n write_file(poet, info_dict)\r\n pagenum = pagenum + 1\r\n except:\r\n keep_going = False\r\n\r\n except:\r\n failed.append(i)\r\n\r\n print('Failed for %d out of %d pages'%( len(failed), end - start + 1 ), failed)", "def _paragraphs_raw(self):\n for par in self.parsed.find_all(\"p\")[self.PAR_START:]:\n yield par", "def process_poem(url):\r\n\r\n response = get(url)\r\n html_soup = BeautifulSoup(response.text, 'html.parser')\r\n beyts = html_soup.find_all('span', class_ = 'verse')\r\n beyts = [beyt.text for beyt in beyts]\r\n info_dict = process_key_items(html_soup)\r\n info_dict['beyts'] = beyts\r\n\r\n return info_dict", "def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def iter_items(items):\n for it in items:\n text = nlp(it[\"text\"].lower())\n\n # Stop word removal\n token_list = [\n token.lemma_\n for token in text\n if not token.is_stop and not token.is_punct\n ]\n\n it[\"text\"] = \" \".join(token_list)\n\n children_items = it.get(\"content\", {}).get(\"items\")\n if children_items:\n iter_items(children_items)", "def iterate_on_items(pagecode):\n parser = etree.HTMLParser()\n \n tree = etree.parse(StringIO(pagecode), parser)\n\n # xpath = \"/html/body/div[3]/div[3]/div[3]/ul/li[83]/a/span/span[2]\"\n span_class = \"wb-itemlink-id\"\n request = tree.xpath('//span[@class=\"{}\"]'.format(span_class))\n for span in request:\n yield span.text", "def parse_proposal_page(self, page_name):\n self.page_name = page_name\n text = self.get_page_over_api(PROPERTY_PROPOSAL_PREFIX+page_name)\n wikicode = mwparserfromhell.parse(cleanup_text(text.encode('utf-8')))\n\n for node in wikicode.filter(forcetype=(Template,Heading)):\n if isinstance(node, Heading):\n self.latest_labels = self.parse_translatable(node.title)\n elif isinstance(node, Template):\n template = node\n if (unicode(template.name).strip() == 'Property proposal' and\n template.get('status').value.strip() == 'ready'):\n self.parse_proposal_template(template)\n self.users = self.extract_users(wikicode)\n break\n self.orig_wikicode = wikicode", "def parse(self, response):\n\n #下面这种写法使用生成器方式比较好\n \"\"\" items = []\n for i in response.css('div.quote'):\n item = ScrapequoteItem()\n item['tag'] = i.css('span.text[itemprop]::text').get()\n item['author'] = i.css('small.author::text').get()\n items.append(item)\n return items \"\"\"\n\n for i in response.css('div.quote'):\n item = ScrapequoteItem()\n item['tag'] = i.css('span.text[itemprop]::text').get()\n item['author'] = i.css('small.author::text').get()\n yield item\n\n #以下循环获取其他页面\n next_page = response.css('li.next a::attr(href)').get()\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse) #返回一个Request instance", "def _parse_xml(self):\n self.properties = {}\n pages = self.root.findall('page')\n self.pages = {} \n\n for page_num, page in enumerate(pages): \n\n _, _ , width, height = page.attrib[\"bbox\"].split(\",\")\n width, height = float(width), float(height)\n \n page_object = {\"page\": page_num + 1 , \"width\": width, \"height\": height} \n lines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n print(\"{} Number of Lines in Page {}\".format(len(lines), page_num))\n \n self.bbox = {'x1': [] , 'y1':[], 'x2':[], 'y2':[]}\n textlines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n textlines = sorted(textlines, key= lambda x: -float(x.attrib['bbox'].split(',')[3]))\n \n \n line_objects = []\n for idx, item in enumerate(textlines):\n item_props = self._extract_textline_properties(item)\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3])\n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n\n line_objects.append(item_props)\n page_object[\"lines\"] = line_objects\n \n \n others = [] \n# for key in [\"rect\", \"figure\", \"layout/textgroup\", \"curve\"]: \n for key in [\"curve\", \"rect\", \"figure\"]: \n other_objs = self.root.findall('page[@id=\\'{}\\']/{}'.format(page_num+1, key)) \n for idx, item in enumerate(other_objs):\n \n item_props = {\"type\": key}\n# print(key, ET.tostring(item))\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3]) \n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n others.append(item_props)\n \n page_object[\"others\"] = others\n page = Page(page_object)\n page_object[\"para\"] = page.para\n page_object[\"plines\"] = page.lines\n page_object[\"bigbox\"] = page.bigbox\n page_object[\"components\"] = page.components\n\n self.pages[page_num+1] = page_object", "def parse_items(self):", "def parse(self, response):\n item = NewsScraperItem()\n containers = response.xpath(\"//div[contains(@class,'largeTitle')]/article[contains(@class,\"\n \"'articleItem')]/div[contains(@class,'textDiv')]\")\n for info in containers:\n\n try:\n date = info.xpath(\".//div[contains(@class,'articleDetails')]/span[contains(@class,'date')]/text()\").extract_first()\n date = re.sub(r'\\xa0-\\xa0', '', date)\n # Convert 'minutes ago' to datetime\n date = datetime.now() - timedelta(minutes=int(re.sub(r'[^0-9]', '', date))) # Regex = Where not numeric\n item['date'] = date.strftime(\"%Y/%m/%d %H:%M:%S\")\n earn_id = re.search(r'[0-9]{4,}', info.xpath(\".//a/@onclick\").extract_first())\n item['id'] = earn_id.group()\n item['title'] = info.xpath(\".//a/text()\").extract_first()\n item['author'] = info.xpath(\".//div[contains(@class,'articleDetails')]/span/text()\").extract_first()\n item['text'] = info.xpath(\".//p/text()\").extract_first()\n item['link'] = info.xpath(\".//a/@href\").extract_first()\n yield item\n\n except:\n print(\"Unusual format detected\")\n logging.warning(\"Item skipped due to unusual format\")", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def __iter__(self):\n while self.has_next_page():\n response = self.get_next_page_response()\n for item in self.get_items_from_response(response):\n yield item", "def parse_items(self,response):\n sel = Selector(response)\n item = response.meta['job_item']\n company_item = response.meta['company_item']\n company_item['introduction'] = sel.xpath('//div[@class=\"job-item main-message noborder\"]/div[@class=\"content content-word\"]/text()').extract_first()\n company_item['address'] = sel.xpath('//div[@class=\"company-infor\"]/p/text()').extract_first()\n item['link'] = response.url\n item['requirement'] = sel.xpath('//div[@class=\"content content-word\"][1]/text()').extract_first()\n item['website_id'] = 7\n item['company'] = company_item\n print item\n yield item", "def parse_movement(self, response):\n movement_name = response.meta['movement_name']\n movement_url = response.meta['movement_url']\n\n sresponse = scrapy.Selector(response)\n\n #Because each movement page contains a table that has maximum of ten rows, we need to go to the next page\n #in order to extract all of the poets associated for each movement\n poetnextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table = sresponse.xpath('//tbody/tr')\n for row in table:\n item = PoetItem()\n item['movement_name'] = movement_name\n item['movement_url'] = movement_url\n if len(row.xpath('td/a/text()').extract())>0:\n item['poet_name'] = row.xpath('td/a/text()').extract()\n if len(row.xpath('td/a/@href').extract())>0:\n #the link is for the poet bio page on poetry.org website\n link = u''.join(row.xpath('td/a/@href').extract())\n item['poet_url'] = urlparse.urljoin(\"http://www.poets.org\",link)\n if len(row.xpath('td/span/text()').extract()) > 0:\n item['poet_dob2'] = row.xpath('td/span/text()').extract()\n if len(row.xpath('td/text()').extract())>0:\n #a poet may be tagged/associated with multiple movements\n item['poet_tags'] = row.xpath('td/text()').extract()\n yield scrapy.Request(url =urlparse.urljoin(\"http://www.poets.org\",link), callback=self.parse_poet,\n meta = {'item': item})\n\n #if more poets on next page, use this method again\n if len(poetnextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poetnextpagelink),\n callback=self.parse_movement, meta = {'movement_name': movement_name,\n 'movement_url':movement_url})", "def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)", "def parse(self, response):\n\n product_page_links = response.css('.detailsLink')\n yield from response.follow_all(product_page_links, self.parse_item)\n\n pagination_links = response.css('span.fleft a')\n yield from response.follow_all(pagination_links, self.parse)", "def parse_page(self, page):\n if self.domain == extract_domain(page[\"url\"]) and page[\"valid_content_type\"]:\n parent = page[\"url\"]\n parser = Parser(self.config)\n links = parser.feed_me(page[\"data\"])\n new_links = [x for x in links if x not in self.visited]\n full_links = [parse.urljoin(parent, l) for l in new_links]\n for l in full_links:\n if l not in self.visited:\n li = {\"parent\": parent, \"url\": l}\n self.TO_PROCESS.put(li)", "def get_posts(self, url=None):\n if not url:\n url = self.base_url\n\n self.log.debug(\"Getting URL: %s\", url)\n page_data = json.loads(urlopen(url).read().decode(\"utf-8\"))\n\n for post in page_data.get(\"data\", []):\n if \"message\" not in post:\n continue\n\n for word in self.keywords:\n if word in post[\"message\"]:\n self.log.debug(\"Emitting post: %s\", post[\"id\"])\n yield post\n break\n\n paging = page_data.get(\"paging\", {})\n\n if \"next\" in paging:\n for post in self.get_posts(paging[\"next\"]):\n yield post\n\n return", "def iteratePageItems(self, page, func=dict):\n\n for item in page.items:\n yield func(**item)\n\n if page.nextPageUrl:\n res = self.getRequest(page.nextPageUrl)\n nextPage = vsdModels.Pagination(**res)\n for nextItem in self.iteratePageItems(nextPage, func=func):\n yield nextItem", "def parse(self, response):\n products = response.xpath('//a[@class=\"tile\"]')\n for product in products:\n href = product.xpath('@href').extract_first()\n yield response.follow(href, callback=self.parse_product)\n\n # Follow next page if it exists\n next_page = response.xpath('//span[@class=\"pager_next\"]/a')\n if next_page:\n href = next_page.xpath('@href').extract_first()\n yield response.follow(href)", "def _convert(self):\n root = cElementTree.fromstring(self.html)\n for el in root.getiterator():\n if el in self.visited:\n continue\n self.visited.update([el])\n if el.tag == 'p':\n parser = ParagraphParser(el)\n self.document_state.append(parser.tag)\n self.visited.update(el.getiterator())", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def get_items(self):\n export_file = self.cmdline_args.file # see setup_parser\n for a in get_articles(export_file):\n yield node(\n heading=dt_heading(\n a.added,\n # 'pocket' permalink is pretty convenient to jump straight into Pocket app\n link(title='pocket', url=a.pocket_link) + ' · ' + link(title=a.title, url=a.url),\n ),\n children=[node( # comments are displayed as org-mode child entries\n heading=dt_heading(hl.created, hl.text)\n ) for hl in a.highlights]\n )", "def parse(self, response):\n result = ujson.loads(response.text)\n cards = result.get('data', {}).get('cards')\n if cards:\n current_page = response.meta['page']\n if current_page >= self.upper_bound:\n page_num = current_page + 1\n yield FormRequest(\n method='GET',\n url=self.url,\n formdata={'containerid': response.meta['id'],\n 'page': str(page_num)},\n meta={'page': page_num,\n 'id': response.meta['id']},\n callback=self.parse)\n\n card_groups = genCardGroup(cards)\n mblogs = genMblog(card_groups)\n yield from (KeywordItem(idstr=mblog.get('idstr'),\n mblog=mblog,\n query=self.query,\n weibo_type=self.weibo_type)\n for mblog in mblogs)", "def read_page(bs, adj):\n paragraphs = bs.find('div',{'id':'bodyContent'}).find_all('p')\n for p in paragraphs:\n EntryParser.count_name(p.text, adj)\n return adj" ]
[ "0.7213079", "0.71406233", "0.6475927", "0.59090114", "0.5766759", "0.5761037", "0.5740958", "0.57350755", "0.57176733", "0.56667167", "0.5645379", "0.56218153", "0.56191784", "0.5588704", "0.5571916", "0.5538668", "0.5532088", "0.55089444", "0.55086166", "0.54917306", "0.5487516", "0.5476489", "0.5444266", "0.54334474", "0.5432304", "0.54275066", "0.5423822", "0.5412203", "0.5409", "0.54026294" ]
0.7146734
1
Creates a coroutine that does nothing for when no sleep is needed.
async def no_sleep_coro(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def awaitable(obj):\n yield from asyncio.sleep(0)\n return obj", "def run_no_args(self):\n while True:\n if self.cancelled:\n return\n self.func()\n time.sleep(self.sleep_time / 1000.00)", "def without_wait(self):\n return self.temp_implicit_wait(0)", "def test_no_sideeffects(self):\n c = EventLoop(\n lambda: None,\n lambda f, g: 1 / 0,\n lambda *args: 1 / 0,\n watchdog_thread=object(),\n reapAllProcesses=lambda: 1 / 0)\n del c", "def sleep(duration):\n f = Future()\n IOLoop.current().call_later(duration, lambda: f.set_result(None))\n return f", "def sleep(secs: float) -> Coroutine[None, None, None]:\n return time_sleep_coro(secs)", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n cr.send(None)\n return cr\n return start", "def functionThatShouldNotTimeout():\n return None", "def fake_sleep(time_to_sleep):\n if time_to_sleep:\n global sleep_allowance\n sleep_allowance -= 1\n if not other_threads_are_active():\n if sleep_allowance < -1:\n raise RuntimeError(\"Sleeping for no reason.\")\n else:\n return # Forgive the thread for calling this for one time.\n sleep_allowance = allowable_empty_sleeps\n\n cr = Coroutine.get_current()\n for ft in fake_threads:\n if ft['greenlet'].id == cr.id:\n ft['next_sleep_time'] = time_to_sleep\n\n cr.sleep()", "def NoDelay(self) -> bool:", "def NoDelay(self) -> bool:", "def handle_sleep(_):\n loop.sleep()", "async def awaitable(obj):\n await asyncio.sleep(0)\n return obj", "def run_nop(self):\n pass", "def yield_(cls):\n cls.sleep(0.0)\n #note that we don't use stackless.schedule() here anymore. This would still hog the CPU, never getting\n #never getting into the libevent loop again. by using sleep we prevent this", "def anti_idle_timer_handler(self):\n if not self.connected(): return\n self.send_nop()\n self.start_anti_idle_timer()", "async def restless_sleep(duration):\n\n while duration > 0:\n await asyncio.sleep(1)\n\n # Poll for program running state\n if Program.is_running():\n duration = duration - 1\n continue\n\n # Otherwise, program is done running, raise an exception to be caught\n raise ProgramShutdownError", "def basic_noisy_selfloop_subsystem():\n net = basic_noisy_selfloop_network()\n state = basic_state()\n return Subsystem(net, state)", "def blip(wait=0.01):\n yield from asyncio.sleep(wait)\n return True", "def set_sleep_time_on_iter_none(self, sleep_time_s: Union[int, float] = 0.1):\n self._sleep_time_on_iter_none_s = sleep_time_s", "async def async_generator() -> Generator[float, None, None]:\n for _ in range(10):\n await asyncio.sleep(1)\n yield random.random() * 10", "async def sleep_fake(*args, **kwargs):\n sleep_sync_mock(*args, **kwargs)", "def noop():", "def test_no_listeners(testloop, testchannel):\n\n async def run():\n \"\"\"run\"\"\"\n async for i in aiter(range(10)):\n await testchannel.send(i)\n await asyncio.sleep(0)\n\n with testchannel.open():\n testchannel.start(asyncfunc=False)\n testloop.run_until_complete(run())", "async def async_generator() -> Generator[float, None, None]:\n\n for i in range(10):\n yield random.random()\n await asyncio.sleep(1)", "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "async def async_generator() -> Generator[float, None, None]:\n for i in range(10):\n yield (random.uniform(0, 10))\n await asyncio.sleep(1)", "def nothing():\n pass", "def fake_spawn(time_from_now_in_seconds, func, *args, **kw):\n def thread_start():\n # fake_sleep(time_from_now_in_seconds)\n return func(*args, **kw)\n\n cr = Coroutine(thread_start)\n fake_threads.append({'sleep': time_from_now_in_seconds,\n 'greenlet': cr,\n 'name': str(func)})" ]
[ "0.62436175", "0.6190582", "0.61719257", "0.6137821", "0.5900524", "0.58804405", "0.5874823", "0.58503664", "0.58102584", "0.57648695", "0.57648695", "0.5745751", "0.5739838", "0.57116777", "0.5652093", "0.5645594", "0.5622938", "0.56145954", "0.5602458", "0.55889374", "0.5586225", "0.558502", "0.55810255", "0.55748135", "0.5566441", "0.5547029", "0.5547029", "0.5529265", "0.5519507", "0.5495111" ]
0.8199025
0
A replacement sleep for Windows. Note that unlike `time.sleep` this may sleep for slightly less than the specified time. This is generally not an issue for Textual's use case. In order to create a timer that _can_ be cancelled on Windows, we need to create a timer and a separate event, and then we wait for either of the two things. When Textual wants to quit, we set the cancel event.
def sleep(secs: float) -> Coroutine[None, None, None]: # Subtract a millisecond to account for overhead sleep_for = max(0, secs - 0.001) if sleep_for < 0.0005: # Less than 0.5ms and its not worth doing the sleep return no_sleep_coro() timer = kernel32.CreateWaitableTimerExW( None, None, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS, ) if not timer: return time_sleep_coro(sleep_for) if not kernel32.SetWaitableTimer( timer, ctypes.byref(LARGE_INTEGER(int(sleep_for * -10_000_000))), 0, None, None, 0, ): kernel32.CloseHandle(timer) return time_sleep_coro(sleep_for) cancel_event = kernel32.CreateEventExW(None, None, 0, TIMER_ALL_ACCESS) if not cancel_event: kernel32.CloseHandle(timer) return time_sleep_coro(sleep_for) def cancel_inner(): """Sets the cancel event so we know we can stop waiting for the timer.""" kernel32.SetEvent(cancel_event) async def cancel(): """Cancels the timer by setting the cancel event.""" await asyncio.get_running_loop().run_in_executor(None, cancel_inner) def wait_inner(): """Function responsible for waiting for the timer or the cancel event.""" if ( kernel32.WaitForMultipleObjects( 2, ctypes.pointer((HANDLE * 2)(cancel_event, timer)), False, INFINITE, ) == WAIT_FAILED ): time_sleep(sleep_for) async def wait(): """Wraps the actual sleeping so we can detect if the thread was cancelled.""" try: await asyncio.get_running_loop().run_in_executor(None, wait_inner) except asyncio.CancelledError: await cancel() raise finally: kernel32.CloseHandle(timer) kernel32.CloseHandle(cancel_event) return wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sleep(self, sleep_time: float = 10) -> None:\n sleep_until_interrupt(sleep_time, lambda: self.stopped, interval=0.5)", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def sleep(secs=1.0):\n time.sleep(secs)", "def set_sleep_timer(self, option, time):\n params = [\n ('option', option),\n ('sleeptime', int(time)),\n ]\n\n self.get(COMMAND_UIC, 'SetSleepTimer', params)", "def sleep(seconds):\n\n return Sleep(seconds)", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def sleep(seconds):\n\n # Check seconds to ensure it is a valid type.\n if type(seconds) not in [long, float, int]:\n raise RepyArgumentError(\"Invalid type \" + str(type(seconds)))\n\n # Using getruntime() in lieu of time.time() because we want elapsed time \n # regardless of the oddities of NTP\n start = nonportable.getruntime()\n sleeptime = seconds\n\n # Return no earlier than the finish time\n finish = start + seconds\n\n while sleeptime > 0.0:\n time.sleep(sleeptime)\n\n # If sleeptime > 0.0 then I woke up early...\n sleeptime = finish - nonportable.getruntime()", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def sleep(min_seconds=1, max_seconds=10):\n time.sleep(randint(min_seconds, max_seconds))", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "async def _sleep(self, sleep_time: float = 10) -> None:\n async def _interrupt() -> bool:\n return self.stopped\n await async_sleep_until_interrupt(sleep_time, _interrupt, interval=0.5)", "def pause(seconds: float) -> None:\n time.sleep(cast(float, seconds))", "def sleep(self, seconds):\n\n # We schedule an alarm signal for x=seconds out in the future.\n # noinspection PyUnusedLocal\n def handle_alarm(signal_num, frame):\n pass\n\n signal.signal(signal.SIGALRM, handle_alarm)\n signal.alarm(seconds)\n\n # Wait for either the alarm to go off or for us to receive a SIGINT.\n signal.pause()\n\n # Remove the alarm if it is still pending.\n signal.alarm(0)", "def thread_sleep(seconds, event):\n for i in range(seconds):\n if event and event.is_set():\n return 1\n sleep(1)\n return 0", "def deepsleep(time_ms: int = None) -> None:", "def pause(seconds):\n time.sleep(seconds);", "def wait_inner():\n if (\n kernel32.WaitForMultipleObjects(\n 2,\n ctypes.pointer((HANDLE * 2)(cancel_event, timer)),\n False,\n INFINITE,\n )\n == WAIT_FAILED\n ):\n time_sleep(sleep_for)", "def lightleep(time_ms: int = None) -> None:", "def sleep(self, timeout):\n try:\n self._wait_in_process_loop(lambda: (False,None),timeout=timeout)\n except threadprop.TimeoutThreadError:\n pass", "def pytest_timeout_cancel_timer(item):\n tle.lib.cancel()\n return True", "def pause(*args, seconds: int=0, **kwargs)->None:\n pass", "def sleep(self):\n for i in range(10):\n if cancelled: return False\n time.sleep(1)\n return True", "def TimeDelay (self, delay, cancel = None):\n if self.Disposed:\n return RaisedFuture (FutureCanceled ('Core is stopped'))\n\n return self.timer.Await (time () + delay, cancel)", "def timeout_syscall(seconds):\n def timeout_handler(signum, frame):\n raise InterruptedError\n\n original_handler = signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(seconds)\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, original_handler)", "def sleep_sim_time(world, seconds, state_break=[False]):\n start = world.last_time if world.last_time else Time()\n remain = seconds\n\n while remain > 0 and not state_break[0]:\n yield From(trollius.sleep(0.1))\n now = world.last_time if world.last_time else Time()\n remain = seconds - float(now - start)", "def sleep(self):\n if self._stop is not None:\n timeLeft = max(self._stop - time.time(), 0) \n sleep = min(self._sleep, timeLeft)\n else:\n sleep = self._sleep\n time.sleep(sleep)", "def sleep(seconds: typing.Union[float, int]):\n if seconds == 0:\n yield\n elif seconds == inf:\n yield from sleepinf()\n else:\n end = monotonic() + seconds\n while end >= monotonic():\n yield", "def sleep(interval):\n time.sleep(interval) # pragma: no cover" ]
[ "0.6478523", "0.6467439", "0.62972337", "0.62152714", "0.61905533", "0.61398786", "0.6105847", "0.61010325", "0.6100269", "0.609553", "0.6091412", "0.6091412", "0.6072209", "0.60244656", "0.6000396", "0.59438556", "0.59020305", "0.5901713", "0.5888825", "0.58390737", "0.58360493", "0.5828961", "0.580311", "0.5788876", "0.5776818", "0.5738386", "0.5647423", "0.5628671", "0.5627849", "0.5608824" ]
0.6513097
0
Sets the cancel event so we know we can stop waiting for the timer.
def cancel_inner(): kernel32.SetEvent(cancel_event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def cancel(self):\n self.waiter.set_result_if_pending([])\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel(self):\n self.on_cancel()", "def cancel_time(self, cancel_time):\n\n self._cancel_time = cancel_time", "def OnCancel(self, event):\n pass", "def OnCancel(self, event):\n pass", "def cancel(self):\n self.cancelled.set()", "def cancel(self):\n self.__canceled = True", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.cancelled = True", "def cancel_callback(self):\n pass", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def cancel(self):\n if self._timer:\n self._timer.cancel()\n self._timer = None\n else:\n raise Exception('Cannot cancel timer. No timer started.')", "def cancel_wait(self):\n self.lib.CancelWait()", "def cancel():", "def _cancel(self):\n now = LOOP_TIME()\n next_ = self.last + USER_CHUNK_TIMEOUT\n if next_ > now:\n self.timer = KOKORO.call_at(next_, type(self)._cancel, self)\n else:\n self.timer = None\n self.waiter.set_result_if_pending(False)", "async def wait_for_cancel(self):\n await self._cancel", "def on_cancel(self) -> None:\n pass", "def on_cancel(self) -> None:\n pass", "def cancel(self):\n pass", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def timer_canceled(self, timer):\n try:\n try:\n timer.impltimer.stop()\n del timer.impltimer\n except (AttributeError, TypeError):\n pass\n finally:\n super(Hub, self).timer_canceled(timer)", "def _async_cancel_timer(self) -> None:\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def cancel(self):", "def cancel(self):", "def cancel(self):", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def cancel(self):\n if self._timerID is not None:\n self._tkWdg.after_cancel(self._timerID)\n self._timerID = None\n return True\n return False" ]
[ "0.7676816", "0.7467314", "0.7397475", "0.733986", "0.7244259", "0.71759206", "0.71301645", "0.71301645", "0.7041629", "0.70007837", "0.6995933", "0.6995933", "0.69819576", "0.6949455", "0.69289273", "0.68336475", "0.68163085", "0.67847276", "0.6758899", "0.67556834", "0.67556834", "0.6751718", "0.66788155", "0.6637695", "0.6616702", "0.66130143", "0.66130143", "0.66130143", "0.6585344", "0.65556973" ]
0.7688794
0
Function responsible for waiting for the timer or the cancel event.
def wait_inner(): if ( kernel32.WaitForMultipleObjects( 2, ctypes.pointer((HANDLE * 2)(cancel_event, timer)), False, INFINITE, ) == WAIT_FAILED ): time_sleep(sleep_for)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(self, timeoout=None, state=\"C-completed\"):", "async def wait_for_cancel(self):\n await self._cancel", "def _wait_for_completion(self):\n if self.do_timing:\n self.timer.start(\"Running.\")\n\n while self.state != State.COMPLETED:\n self._update_state()\n\n if self.do_timing:\n self.timer.stop()", "def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "async def wait():\n try:\n await asyncio.get_running_loop().run_in_executor(None, wait_inner)\n except asyncio.CancelledError:\n await cancel()\n raise\n finally:\n kernel32.CloseHandle(timer)\n kernel32.CloseHandle(cancel_event)", "def cancel_wait(self):\n self.lib.CancelWait()", "def cancel(self):\n self.waiter.set_result_if_pending([])\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def do_wait(self):\n pass", "def wait(self):\n self.event.wait()", "def test_wait_cancel(self):\n skill = create_skill()\n\n converser = Thread(target=create_converse_responder('cancel', skill))\n converser.start()\n validator = mock.Mock()\n validator.return_value = False\n on_fail = mock.Mock()\n\n def is_cancel(utterance):\n return utterance == 'cancel'\n\n response = skill._wait_response(is_cancel, validator, on_fail, 1)\n self.assertEqual(response, None)\n converser.join()", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def wait(self) -> None:\n\n self.event_.wait()", "def wait():\n pass", "def wait(self):\n while not self.done:\n self.device._handle_events(1000)", "def wait_for_event(event):\r\n return event.accept()", "def wait(self):\n pass", "def wait(self):\n pass", "def wait_or_fail(self, evt, timeout=2.0, msg=''):\n res = evt.wait(timeout)\n if not res:\n self.fail(msg)", "def wait(self, time):\n self._wait = Event()\n return not self._wait.wait(time)", "def wait_for_event_timeout(event):\n received = event.wait(2)\n name = threading.current_thread().getName()\n print \"Waited with timeout, got {}, name {}\".format(received, name)", "def __call__(self, event):\n self.waiter.set_result_if_pending(event.users)\n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()\n \n return True", "def wait_for_completion(self, timeout=10):\n cur_status = self.runtime_status()\n while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:\n time.sleep(0.2)\n timeout -= 0.2\n cur_status = self.runtime_status()\n if timeout < 0:\n break\n\n return timeout > 0", "def _wait_on_condition(self, timeout):\n self.__condition.wait(timeout)", "def wait(self, timeout):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def wait_until_done(self, timeout=10.0):\r\n cfunc = lib_importer.windll.DAQmxWaitUntilTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle, ctypes.c_double]\r\n\r\n error_code = cfunc(self._handle, timeout)\r\n check_for_error(error_code)", "def answer_waiting_call(self) -> None:", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass" ]
[ "0.7174734", "0.71252257", "0.68507195", "0.68419737", "0.67769885", "0.67605805", "0.67037636", "0.66891533", "0.66845816", "0.66050535", "0.65404123", "0.65313905", "0.64974064", "0.6473606", "0.64717025", "0.64504385", "0.6397383", "0.63804114", "0.63804114", "0.63766485", "0.6376169", "0.636437", "0.63166964", "0.62892705", "0.6285999", "0.62808526", "0.626137", "0.6256492", "0.62544084", "0.6253747" ]
0.76650566
0
Check {sysproid}.{appname}.{cell} is running.
def _test_app_running(self, running_set, sysproid, cell, appname): full_app_name = '%s.%s.%s' % (sysproid, appname, cell) self.assertIn(full_app_name, running_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_running(program):\n \n #cmd = [\"xdotool\", \"search\", \"--name\", program]\n cmd = [\"xdotool\", \"search\", \"--name\", \"--class\", \"--classname\", program]\n try:\n subprocess.check_output(cmd)\n return True\n except:\n return False", "def is_sm_running() -> bool:\n initd = '/etc/init.d'\n print(\"Checking SUSE Manager running...\")\n\n # Get tomcat\n tomcat = \"\"\n for cmd in os.listdir(initd):\n if cmd.startswith('tomcat'):\n tomcat = initd + \"/\" + cmd\n break\n\n return os.popen(tomcat + \" status 2>&1\").read().strip().find('dead') == -1", "def is_running(program):\n return program in get_running()", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def _is_running(self, package_name):\n cmd = r' |echo $(grep -E {package_name})'.format(package_name=package_name)\n if self.device.sdk_version > 25:\n cmd = r'ps -A' + cmd\n else:\n cmd = r'ps' + cmd\n processes = self.adb_client.shell(cmd).splitlines()\n for ps in processes:\n if ps:\n ps = ps.split()\n return ps[1]\n return None", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def is_running(self) -> bool:\n return False", "def is_running(self):\n qstat = self._grep_qstat('running')\n if qstat:\n return True\n return False", "def test():\n\n zkclient = context.GLOBAL.zk.conn\n cell_name = context.GLOBAL.cell\n admin_cell = admin.Cell(context.GLOBAL.ldap.conn)\n\n # get cell attribute from ldap object\n cell = admin_cell.get(cell_name)\n sysproid = cell['username']\n\n running = zkclient.get_children(z.RUNNING)\n # prefilter treadmill apps to improve efficiency\n running_set = set([name.split('#')[0] for name in running])\n\n class SystemAppTest(unittest.TestCase):\n \"\"\"System apps checkout.\"\"\"\n\n for appname in ['app-dns', 'cellapi', 'adminapi', 'stateapi', 'wsapi']:\n\n @chk.T(SystemAppTest, running_set=running_set, sysproid=sysproid,\n cell=cell_name, appname=appname)\n def _test_app_running(self, running_set, sysproid, cell, appname):\n \"\"\"Check {sysproid}.{appname}.{cell} is running.\"\"\"\n full_app_name = '%s.%s.%s' % (sysproid, appname, cell)\n self.assertIn(full_app_name, running_set)\n\n return SystemAppTest", "def _is_running(self):\n return self._run_state.is_running()", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def check_running(self, fail_on_error=True):\n status = True\n state = self.check_mount_state(self.running_hosts)\n if state[\"unmounted\"] or state[\"nodirectory\"]:\n self.log.error(\n \"Error: dfuse not running on %s\",\n str(state[\"unmounted\"].union(state[\"nodirectory\"])))\n status = False\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return status", "def check_ups(self, ups): #pylint: disable=no-self-use\n try:\n args = [\n MONITOR_COMMAND,\n f\"{ups}@localhost\"\n ]\n #pylint: disable=unused-variable\n results = subprocess.run(args, check=True, capture_output=True)\n return True\n except subprocess.CalledProcessError:\n return False", "def spotify_running():\n stdout = (\n subprocess.check_output([\"osascript\", \"-e\", 'application \"Spotify\" is running'])\n .decode(\"utf-8\")\n .strip()\n )\n\n return stdout == \"true\"", "def is_ghidra_running() -> bool:\n if os.name == \"nt\":\n find_ghidra = \"WMIC path win32_process get Commandline\"\n else:\n find_ghidra = \"ps -ax\"\n out = subprocess.check_output(find_ghidra.split())\n logger.debug(\"Running %s\", find_ghidra)\n if b\"ghidrarun\" in out.lower():\n return True\n return False", "def is_geth_running(self) -> bool:\r\n command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec \"admin.nodeInfo\"' % self.name\r\n result = self.ip.exec_command(command)\r\n return False if result.split(':')[0] == 'Fatal' else True", "def checkProcess(self):\n process = subprocess.Popen(\"ps -A | grep g13d\", stdout=subprocess.PIPE, shell=True)\n out, err = process.communicate()\n if out != '':\n self.ui.but_activate.setEnabled(False)\n self.ui.lab_active.setText(\"Running ok\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : green; }\");\n else:\n self.ui.but_activate.setEnabled(True)\n self.ui.lab_active.setText(\"Not Started\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : red; }\");", "def check_running(self, fail_on_error=True):\n self._update_mount_state()\n if self._mount_state[\"unmounted\"] or self._mount_state[\"nodirectory\"]:\n self.log.error(\n \"dfuse not running on %s\",\n str(self._mount_state[\"unmounted\"].union(self._mount_state[\"nodirectory\"])))\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return False\n if self._mount_state[\"rogue\"]:\n self.log.error(\"rogue dfuse processes on %s\", str(self._mount_state[\"rogue\"]))\n if fail_on_error:\n raise CommandFailure(\"rogue dfuse processes detected\")\n return False\n return True", "def is_running(self):\n\t\treturn self in _running", "def check_running_from_automator():\n if (\n platform.system() == \"Darwin\"\n and os.environ.get(\"XPC_SERVICE_NAME\") == \"com.apple.automator.xpc.runner\"\n ):\n return True\n return False", "def check_if_notebook_has_run(cell):\n for cell in json_data['cells']:\n if cell['cell_type']=='code' and cell['execution_count'] != None:\n return True\n return False", "def check_for_ds9(ds9_name):\n ds9 = False\n if platform.system().lower() == 'darwin':\n processes = os.popen('ps aux | grep {}'.format(ds9_name)).readlines()\n else:\n processes = os.popen('ps aux --cols 1024 | grep {}'.format(ds9_name)).readlines()\n for process in processes:\n if \"ds9\" in process and \" -title {}\".format(ds9_name) in process:\n print('Hurray! Found DS9 window process')\n print('Waiting 20s to be sure ds9 is open...')\n time.sleep(20)\n ds9 = True\n break\n else:\n print('Boo! Where the hell is the DS9 window?')\n return ds9", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "async def is_running(self, **kwargs: Any) -> bool:\n return True", "def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )", "async def is_running(self, **kwargs: Any) -> bool:\n ...", "def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1", "def is_running(self):\n # type: () -> bool\n return self._run_state.is_running()", "def is_running(self):\n\n command = ('ssh {0} -q -o StrictHostKeyChecking=no -o '\n 'UserKnownHostsFile=/dev/null \"pgrep httperf \"').format(self.client)\n\n if subprocess.call(command, stdout=subprocess.PIPE, shell=True) == 0:\n return True\n else:\n return False" ]
[ "0.66739565", "0.6461373", "0.6416072", "0.63748014", "0.6368051", "0.6345569", "0.6295888", "0.62739915", "0.62605065", "0.6186457", "0.61560464", "0.609521", "0.60186034", "0.6016657", "0.6009209", "0.6008331", "0.6001014", "0.59857357", "0.5974706", "0.595932", "0.5954081", "0.5951451", "0.5938165", "0.5918368", "0.59107757", "0.5901071", "0.5879929", "0.58788615", "0.5860575", "0.58308816" ]
0.6727489
0
This method is called during a move's `action_done`. It'll actually move a quant from the source location to the destination location, and unreserve if needed in the source location. This method is intended to be called on all the move lines of a move. This method is not intended to be called when editing a `done` move (that's what the override of `write` here is done.
def _action_done(self): # First, we loop over all the move lines to do a preliminary check: `qty_done` should not # be negative and, according to the presence of a picking type or a linked inventory # adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlink # the line. It is mandatory in order to free the reservation and correctly apply # `action_done` on the next move lines. ml_to_delete = self.env['stock.move.line'] for ml in self: # Check here if `ml.qty_done` respects the rounding of `ml.product_uom_id`. uom_qty = float_round(ml.qty_done, precision_rounding=ml.product_uom_id.rounding, rounding_method='HALF-UP') precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure') qty_done = float_round(ml.qty_done, precision_digits=precision_digits, rounding_method='HALF-UP') if float_compare(uom_qty, qty_done, precision_digits=precision_digits) != 0: raise UserError(_('The quantity done for the product "%s" doesn\'t respect the rounding precision \ defined on the unit of measure "%s". Please change the quantity done or the \ rounding precision of your unit of measure.') % (ml.product_id.display_name, ml.product_uom_id.name)) qty_done_float_compared = float_compare(ml.qty_done, 0, precision_rounding=ml.product_uom_id.rounding) if qty_done_float_compared > 0: if ml.product_id.tracking != 'none': picking_type_id = ml.move_id.picking_type_id if picking_type_id: if picking_type_id.use_create_lots: # If a picking type is linked, we may have to create a production lot on # the fly before assigning it to the move line if the user checked both # `use_create_lots` and `use_existing_lots`. if ml.lot_name and ml.date_reference and not ml.lot_id: lot = self.env['stock.production.lot'].create( {'name': ml.lot_name, 'product_id': ml.product_id.id, 'date_refer': ml.date_reference} ) ml.write({'lot_id': lot.id}) data_dates = ml.lot_id._get_dattes(ml.product_id.id,ml.date_reference) for field, value in data_dates.items(): setattr(ml.lot_id, field, value) elif not picking_type_id.use_create_lots and not picking_type_id.use_existing_lots: # If the user disabled both `use_create_lots` and `use_existing_lots` # checkboxes on the picking type, he's allowed to enter tracked # products without a `lot_id`. continue elif ml.move_id.inventory_id: # If an inventory adjustment is linked, the user is allowed to enter # tracked products without a `lot_id`. continue if not ml.lot_id: raise UserError(_('You need to supply a lot/serial number for %s.') % ml.product_id.name) elif qty_done_float_compared < 0: raise UserError(_('No negative quantities allowed')) else: ml_to_delete |= ml ml_to_delete.unlink() # Now, we can actually move the quant. done_ml = self.env['stock.move.line'] for ml in self - ml_to_delete: if ml.product_id.type == 'product': Quant = self.env['stock.quant'] rounding = ml.product_uom_id.rounding # if this move line is force assigned, unreserve elsewhere if needed if not ml.location_id.should_bypass_reservation() and float_compare(ml.qty_done, ml.product_qty, precision_rounding=rounding) > 0: extra_qty = ml.qty_done - ml.product_qty ml._free_reservation(ml.product_id, ml.location_id, extra_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, ml_to_ignore=done_ml) # unreserve what's been reserved if not ml.location_id.should_bypass_reservation() and ml.product_id.type == 'product' and ml.product_qty: try: Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, strict=True) except UserError: Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True) # move what's been actually done quantity = ml.product_uom_id._compute_quantity(ml.qty_done, ml.move_id.product_id.uom_id, rounding_method='HALF-UP') available_qty, in_date = Quant._update_available_quantity(ml.product_id, ml.location_id, -quantity, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id) if available_qty < 0 and ml.lot_id: # see if we can compensate the negative quants with some untracked quants untracked_qty = Quant._get_available_quantity(ml.product_id, ml.location_id, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True) if untracked_qty: taken_from_untracked_qty = min(untracked_qty, abs(quantity)) Quant._update_available_quantity(ml.product_id, ml.location_id, -taken_from_untracked_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id) Quant._update_available_quantity(ml.product_id, ml.location_id, taken_from_untracked_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id) Quant._update_available_quantity(ml.product_id, ml.location_dest_id, quantity, lot_id=ml.lot_id, package_id=ml.result_package_id, owner_id=ml.owner_id, in_date=in_date) done_ml |= ml # Reset the reserved quantity as we just moved it to the destination location. (self - ml_to_delete).with_context(bypass_reservation_update=True).write({ 'product_uom_qty': 0.00, 'date': fields.Datetime.now(), })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loot_enq_move(self, item, toloc):\n itemloc = item.Location.ToLocation()\n self.pqi.enq(2, ['move', [[itemloc.X, itemloc.Y, itemloc.Z],\n item.Id, itemloc.Z,\n [toloc.X, toloc.Y, toloc.Z],\n item.Count\n ]])", "def _move_item(self, src, dst):\n \"Does nothing\"", "def _move_cleanup(self, ok, elog, start_pos, goal_pos):\n if not ok:\n self.stop()\n if elog:\n self._record_elog_move(start_pos, goal_pos)\n return ok", "def quants_move(self, quants, move, location_to, location_from=False, lot_id=False, owner_id=False,\n src_package_id=False, dest_package_id=False, entire_pack=False):\n # TDE CLEANME: use ids + quantities dict\n if location_to.usage == 'view':\n raise UserError(_('You cannot move to a location of type view %s.') % (location_to.name))\n\n quants_reconcile_sudo = self.env['stock.quant'].sudo()\n quants_move_sudo = self.env['stock.quant'].sudo()\n check_lot = False\n for quant, qty in quants:\n if not quant:\n # If quant is None, we will create a quant to move (and potentially a negative counterpart too)\n quant = self._quant_create_from_move(\n qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id,\n dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to)\n if move.picking_id.min_date:\n quant.write({'in_date': move.picking_id.min_date})\n check_lot = True\n else:\n \n quant._quant_split(qty)\n #_logger.info(quant)\n quants_move_sudo |= quant\n quants_reconcile_sudo |= quant\n\n if quants_move_sudo:\n moves_recompute = quants_move_sudo.filtered(lambda self: self.reservation_id != move).mapped(\n 'reservation_id')\n quants_move_sudo._quant_update_from_move(move, location_to, dest_package_id, lot_id=lot_id,\n entire_pack=entire_pack)\n moves_recompute.recalculate_move_state()\n\n if location_to.usage == 'internal':\n # Do manual search for quant to avoid full table scan (order by id)\n self._cr.execute(\"\"\"\n SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND\n ((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1\n \"\"\", (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id))\n if self._cr.fetchone():\n quants_reconcile_sudo._quant_reconcile_negative(move)\n\n # In case of serial tracking, check if the product does not exist somewhere internally already\n # Checking that a positive quant already exists in an internal location is too restrictive.\n # Indeed, if a warehouse is configured with several steps (e.g. \"Pick + Pack + Ship\") and\n # one step is forced (creates a quant of qty = -1.0), it is not possible afterwards to\n # correct the inventory unless the product leaves the stock.\n picking_type = move.picking_id and move.picking_id.picking_type_id or False\n if check_lot and lot_id and move.product_id.tracking == 'serial' and (\n not picking_type or (picking_type.use_create_lots or picking_type.use_existing_lots)):\n other_quants = self.search([('product_id', '=', move.product_id.id), ('lot_id', '=', lot_id),\n ('qty', '>', 0.0), ('location_id.usage', '=', 'internal')])\n if other_quants:\n # We raise an error if:\n # - the total quantity is strictly larger than 1.0\n # - there are more than one negative quant, to avoid situations where the user would\n # force the quantity at several steps of the process\n if sum(other_quants.mapped('qty')) > 1.0 or len([q for q in other_quants.mapped('qty') if q < 0]) > 1:\n lot_name = self.env['stock.production.lot'].browse(lot_id).name\n raise UserError(_('The serial number %s is already in stock.') % lot_name + _(\n \"Otherwise make sure the right stock/owner is set.\"))", "def move(self, from_location, to_location, item, quantity, batch_number, reason=None):\n # dont use a transaction here it is already called inside a transaction block only\n logger.info('ReleaseDiscard item move initiated')\n try:\n shipment = self.Shipment()\n from_location = from_location\n to_location = to_location\n item = item\n quantity = quantity\n batch_number = batch_number\n shipment.from_location = from_location\n shipment.to_location = to_location\n mv = self.ShipmentMoves()\n mv.from_location = from_location\n mv.to_location = to_location\n mv.product = item\n mv.quantity = float(quantity)\n mv.batch_number = batch_number\n mv.uom = item.template.default_uom\n if reason:\n mv.reason_for_discard = reason\n shipment.moves = (mv,)\n shipment.save()\n shipment.wait((shipment,))\n shipment.done((shipment,))\n shipment.assign_wizard((shipment,))\n state = shipment.assign_try((shipment,))\n if not state:\n shipment.assign_force((shipment,))\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def move(self):\n pass", "def move(self, move):\n raise NotImplementedError()", "def _update_finished_moves(self, production, _, __):\n modification = {}\n for move in production.move_finished_ids:\n if move.state in ('done', 'cancel'):\n continue\n modification[move] = ( production.product_qty + move.quantity_done - production.qty_produced, move.product_uom_qty )\n move.write({'product_uom_qty': production.product_qty + move.quantity_done - production.qty_produced})\n return modification", "def step(self, move):", "def do_transfer_stock_move(self,stock_moves):\n\t\tstock_moves.stock_move_action_done()\n\t\treturn True", "def process_move(self, retrieval, destination):\n self.board.attempt_move([retrieval[0], retrieval[1], destination[0]])", "def move_raw(self, pos):\n return self.put_par(\"raw_drive\", pos)", "def stock_move_action_done(self):\n\t\tself.filtered(lambda move: move.state == 'draft').action_confirm()\n\n\t\tUom = self.env['product.uom']\n\t\tQuant = self.env['stock.quant']\n\n\t\tpickings = self.env['stock.picking']\n\t\tprocurements = self.env['procurement.order']\n\t\toperations = self.env['stock.pack.operation']\n\n\t\tremaining_move_qty = {}\n\n\t\tfor move in self:\n\t\t\tif move.picking_id:\n\t\t\t\tpickings |= move.picking_id\n\t\t\tremaining_move_qty[move.id] = move.product_qty\n\t\t\tfor link in move.linked_move_operation_ids:\n\t\t\t\toperations |= link.operation_id\n\t\t\t\tpickings |= link.operation_id.picking_id\n\n\t\t# Sort operations according to entire packages first, then package + lot, package only, lot only\n\t\toperations = operations.sorted(\n\t\t\tkey=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (\n\t\t\tx.pack_lot_ids and -1 or 0))\n\n\t\tfor operation in operations:\n\n\t\t\t# product given: result put immediately in the result package (if False: without package)\n\t\t\t# but if pack moved entirely, quants should not be written anything for the destination package\n\t\t\tquant_dest_package_id = operation.product_id and operation.result_package_id.id or False\n\t\t\tentire_pack = not operation.product_id and True or False\n\n\t\t\t# compute quantities for each lot + check quantities match\n\t\t\tlot_quantities = dict((pack_lot.lot_id.id, operation.product_uom_id._compute_quantity(pack_lot.qty,\n\t\t\t operation.product_id.uom_id)\n\t\t\t ) for pack_lot in operation.pack_lot_ids)\n\n\t\t\tqty = operation.product_qty\n\t\t\tif operation.product_uom_id and operation.product_uom_id != operation.product_id.uom_id:\n\t\t\t\tqty = operation.product_uom_id._compute_quantity(qty, operation.product_id.uom_id)\n\t\t\tif operation.pack_lot_ids and float_compare(sum(lot_quantities.values()), qty,\n\t\t\t precision_rounding=operation.product_id.uom_id.rounding) != 0.0:\n\t\t\t\traise UserError(_(\n\t\t\t\t\t'You have a difference between the quantity on the operation and the quantities specified for the lots. '))\n\n\t\t\tquants_taken = []\n\t\t\tfalse_quants = []\n\t\t\tlot_move_qty = {}\n\n\t\t\tprout_move_qty = {}\n\t\t\tfor link in operation.linked_move_operation_ids:\n\t\t\t\tprout_move_qty[link.move_id] = prout_move_qty.get(link.move_id, 0.0) + link.qty\n\n\t\t\t# Process every move only once for every pack operation\n\t\t\tfor move in prout_move_qty.keys():\n\t\t\t\t# TDE FIXME: do in batch ?\n\t\t\t\tmove.check_tracking(operation)\n\n\t\t\t\t# TDE FIXME: I bet the message error is wrong\n\t\t\t\t# if not remaining_move_qty.get(move.id):\n\t\t\t\t# \traise UserError(_(\n\t\t\t\t# \t\t\"The roundings of your unit of measure %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. \") % (\n\t\t\t\t# \t move.product_uom.name, move.product_id.uom_id.name))\n\n\t\t\t\tif not operation.pack_lot_ids:\n\t\t\t\t\tpreferred_domain_list = [[('reservation_id', '=', move.id)], [('reservation_id', '=', False)],\n\t\t\t\t\t ['&', ('reservation_id', '!=', move.id),\n\t\t\t\t\t ('reservation_id', '!=', False)]]\n\t\t\t\t\tquants = Quant.quants_get_preferred_domain(\n\t\t\t\t\t\tprout_move_qty[move], move, ops=operation, domain=[('qty', '>', 0)],\n\t\t\t\t\t\tpreferred_domain_list=preferred_domain_list)\n\t\t\t\t\tQuant.quants_move(quants, move, operation.location_dest_id, location_from=operation.location_id,\n\t\t\t\t\t lot_id=False, owner_id=operation.owner_id.id,\n\t\t\t\t\t src_package_id=operation.package_id.id,\n\t\t\t\t\t dest_package_id=quant_dest_package_id, entire_pack=entire_pack)\n\t\t\t\telse:\n\t\t\t\t\t# Check what you can do with reserved quants already\n\t\t\t\t\tqty_on_link = prout_move_qty[move]\n\t\t\t\t\trounding = operation.product_id.uom_id.rounding\n\t\t\t\t\tfor reserved_quant in move.reserved_quant_ids:\n\t\t\t\t\t\tif (reserved_quant.owner_id.id != operation.owner_id.id) or (\n\t\t\t\t\t\t\treserved_quant.location_id.id != operation.location_id.id) or \\\n\t\t\t\t\t\t\t\t(reserved_quant.package_id.id != operation.package_id.id):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif not reserved_quant.lot_id:\n\t\t\t\t\t\t\tfalse_quants += [reserved_quant]\n\t\t\t\t\t\telif float_compare(lot_quantities.get(reserved_quant.lot_id.id, 0), 0,\n\t\t\t\t\t\t precision_rounding=rounding) > 0:\n\t\t\t\t\t\t\tif float_compare(lot_quantities[reserved_quant.lot_id.id], reserved_quant.qty,\n\t\t\t\t\t\t\t precision_rounding=rounding) >= 0:\n\t\t\t\t\t\t\t\tlot_quantities[reserved_quant.lot_id.id] -= reserved_quant.qty\n\t\t\t\t\t\t\t\tquants_taken += [(reserved_quant, reserved_quant.qty)]\n\t\t\t\t\t\t\t\tqty_on_link -= reserved_quant.qty\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tquants_taken += [(reserved_quant, lot_quantities[reserved_quant.lot_id.id])]\n\t\t\t\t\t\t\t\tlot_quantities[reserved_quant.lot_id.id] = 0\n\t\t\t\t\t\t\t\tqty_on_link -= lot_quantities[reserved_quant.lot_id.id]\n\t\t\t\t\tlot_move_qty[move.id] = qty_on_link\n\n\t\t\t\tremaining_move_qty[move.id] -= prout_move_qty[move]\n\n\t\t\t# Handle lots separately\n\t\t\tif operation.pack_lot_ids:\n\t\t\t\t# TDE FIXME: fix call to move_quants_by_lot to ease understanding\n\t\t\t\tself._move_quants_by_lot(operation, lot_quantities, quants_taken, false_quants, lot_move_qty,\n\t\t\t\t quant_dest_package_id)\n\n\t\t\t# Handle pack in pack\n\t\t\tif not operation.product_id and operation.package_id and operation.result_package_id.id != operation.package_id.parent_id.id:\n\t\t\t\toperation.package_id.sudo().write({'parent_id': operation.result_package_id.id})\n\n\t\t# Check for remaining qtys and unreserve/check move_dest_id in\n\t\tmove_dest_ids = set()\n\t\tfor move in self:\n\t\t\tif float_compare(remaining_move_qty[move.id], 0,\n\t\t\t precision_rounding=move.product_id.uom_id.rounding) > 0: # In case no pack operations in picking\n\t\t\t\tmove.check_tracking(False) # TDE: do in batch ? redone ? check this\n\n\t\t\t\tpreferred_domain_list = [[('reservation_id', '=', move.id)], [('reservation_id', '=', False)],\n\t\t\t\t ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]]\n\t\t\t\tquants = Quant.quants_get_preferred_domain(\n\t\t\t\t\tremaining_move_qty[move.id], move, domain=[('qty', '>', 0)],\n\t\t\t\t\tpreferred_domain_list=preferred_domain_list)\n\t\t\t\tQuant.quants_move(\n\t\t\t\t\tquants, move, move.location_dest_id,\n\t\t\t\t\tlot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id)\n\n\t\t\t# If the move has a destination, add it to the list to reserve\n\t\t\tif move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):\n\t\t\t\tmove_dest_ids.add(move.move_dest_id.id)\n\n\t\t\tif move.procurement_id:\n\t\t\t\tprocurements |= move.procurement_id\n\n\t\t\t# unreserve the quants and make them available for other operations/moves\n\t\t\tmove.quants_unreserve()\n\n\t\t# Check the packages have been placed in the correct locations\n\t\tself.mapped('quant_ids').filtered(lambda quant: quant.package_id and quant.qty > 0).mapped(\n\t\t\t'package_id')._check_location_constraint()\n\n\t\t# set the move as done\n\t\tself.write({'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n\t\tprocurements.check()\n\t\t# assign destination moves\n\t\tif move_dest_ids:\n\t\t\t# TDE FIXME: record setise me\n\t\t\tself.browse(list(move_dest_ids)).action_assign_stock_move()\n\n\t\tpickings.filtered(lambda picking: picking.state == 'done' and not picking.date_done).write(\n\t\t\t{'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n\n\t\treturn True", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def move(self, agent, action):\n\t\tpass", "def action_assign_stock_move(self, no_prepare=False):\n\t\t# TDE FIXME: remove decorator once everything is migrated\n\t\t# TDE FIXME: clean me, please\n\t\tmain_domain = {}\n\n\t\tQuant = self.env['stock.quant']\n\t\tUom = self.env['product.uom']\n\t\tmoves_to_assign = self.env['stock.move']\n\t\tmoves_to_do = self.env['stock.move']\n\t\toperations = self.env['stock.pack.operation']\n\t\tancestors_list = {}\n\n\t\t# work only on in progress moves\n\t\tmoves = self.filtered(lambda move: move.state in ['confirmed', 'waiting', 'assigned'])\n\t\tmoves.filtered(lambda move: move.reserved_quant_ids).do_unreserve()\n\t\tfor move in moves:\n\t\t\tif move.location_id.usage in ('supplier', 'inventory', 'production'):\n\t\t\t\tmoves_to_assign |= move\n\t\t\t\t# TDE FIXME: what ?\n\t\t\t\t# in case the move is returned, we want to try to find quants before forcing the assignment\n\t\t\t\tif not move.origin_returned_move_id:\n\t\t\t\t\tcontinue\n\t\t\t# if the move is preceeded, restrict the choice of quants in the ones moved previously in original move\n\t\t\tancestors = move.find_move_ancestors()\n\t\t\tif move.product_id.type == 'consu' and not ancestors:\n\t\t\t\tmoves_to_assign |= move\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tmoves_to_do |= move\n\n\t\t\t\t# we always search for yet unassigned quants\n\t\t\t\tmain_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]\n\n\t\t\t\tancestors_list[move.id] = True if ancestors else False\n\t\t\t\tif move.state == 'waiting' and not ancestors:\n\t\t\t\t\t# if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock\n\t\t\t\t\tmain_domain[move.id] += [('id', '=', False)]\n\t\t\t\telif ancestors:\n\t\t\t\t\tmain_domain[move.id] += [('history_ids', 'in', ancestors.ids)]\n\n\t\t\t\t# if the move is returned from another, restrict the choice of quants to the ones that follow the returned move\n\t\t\t\tif move.origin_returned_move_id:\n\t\t\t\t\tmain_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]\n\t\t\t\tfor link in move.linked_move_operation_ids:\n\t\t\t\t\toperations |= link.operation_id\n\n\t\t# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest\n\t\toperations = operations.sorted(\n\t\t\tkey=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (\n\t\t\t\tx.pack_lot_ids and -1 or 0))\n\t\tfor ops in operations:\n\t\t\t# TDE FIXME: this code seems to be in action_done, isn't it ?\n\t\t\t# first try to find quants based on specific domains given by linked operations for the case where we want to rereserve according to existing pack operations\n\t\t\tif not (ops.product_id and ops.pack_lot_ids):\n\t\t\t\tfor record in ops.linked_move_operation_ids:\n\t\t\t\t\tmove = record.move_id\n\t\t\t\t\tif move.id in main_domain:\n\t\t\t\t\t\tqty = record.qty\n\t\t\t\t\t\tdomain = main_domain[move.id]\n\t\t\t\t\t\tif qty:\n\t\t\t\t\t\t\tquants = Quant.quants_get_preferred_domain(qty, move, ops=ops, domain=domain,\n\t\t\t\t\t\t\t preferred_domain_list=[])\n\t\t\t\t\t\t\tQuant.quants_reserve(quants, move, record)\n\t\t\telse:\n\t\t\t\tlot_qty = {}\n\t\t\t\trounding = ops.product_id.uom_id.rounding\n\t\t\t\tfor pack_lot in ops.pack_lot_ids:\n\t\t\t\t\tlot_qty[pack_lot.lot_id.id] = ops.product_uom_id._compute_quantity(pack_lot.qty,\n\t\t\t\t\t ops.product_id.uom_id)\n\t\t\t\tfor record in ops.linked_move_operation_ids:\n\t\t\t\t\tmove_qty = record.qty\n\t\t\t\t\tmove = record.move_id\n\t\t\t\t\tdomain = main_domain[move.id]\n\t\t\t\t\tfor lot in lot_qty:\n\t\t\t\t\t\tif float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(move_qty,\n\t\t\t\t\t\t 0,\n\t\t\t\t\t\t precision_rounding=rounding) > 0:\n\t\t\t\t\t\t\tqty = min(lot_qty[lot], move_qty)\n\t\t\t\t\t\t\tquants = Quant.quants_get_preferred_domain(qty, move, ops=ops, lot_id=lot, domain=domain,\n\t\t\t\t\t\t\t preferred_domain_list=[])\n\t\t\t\t\t\t\tQuant.quants_reserve(quants, move, record)\n\t\t\t\t\t\t\tlot_qty[lot] -= qty\n\t\t\t\t\t\t\tmove_qty -= qty\n\n\t\t# Sort moves to reserve first the ones with ancestors, in case the same product is listed in\n\t\t# different stock moves.\n\t\tfor move in sorted(moves_to_do, key=lambda x: -1 if ancestors_list.get(x.id) else 0):\n\t\t\t# then if the move isn't totally assigned, try to find quants without any specific domain\n\t\t\tif move.state != 'assigned' and not self.env.context.get('reserve_only_ops'):\n\t\t\t\tqty_already_assigned = move.reserved_availability\n\t\t\t\tqty = move.product_qty - qty_already_assigned\n\n\t\t\t\tquants = Quant.quants_get_preferred_domain(qty, move, domain=main_domain[move.id],\n\t\t\t\t preferred_domain_list=[])\n\t\t\t\tQuant.quants_reserve(quants, move)\n\n\t\t# force assignation of consumable products and incoming from supplier/inventory/production\n\t\t# Do not take force_assign as it would create pack operations\n\t\tif moves_to_assign:\n\t\t\tmoves_to_assign.write({'state': 'assigned'})\n\t\tif not no_prepare:\n\t\t\tself.check_recompute_pack_op()", "def move(self):\n raise NotImplementedError", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def move(self, dst, src): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def updateDest(self):\n\n\t\t# if end is reached stop calling\n\t\tif self.i == self.numSteps:\n\t\t\treturn False\n\n\t\t# controller\n\t\tpoint = self.control.nextUpPD(self.i)\n\t\tcommand_string = 'id1 mav.waypoint_actuator setdest [%s, %s, %s, %s, 0.2] \\n' % (\n\t\t\tpoint[0], point[1], point[2], point[3])\n\t\tcomm.write(bytes(command_string, 'utf8'))\n\n\t\tself.i = self.i + 1\n\t\treturn GLib.SOURCE_CONTINUE", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def action_done(self):\n result = super(StockPicking, self).action_done()\n if self.picking_type_code == 'outgoing' and self.group_id.sale_id:\n for line in self.move_line_ids.filtered(\n lambda x: x.lot_id and x.lot_id.plan_type == 'sim'):\n line.lot_id.write({'partner_id': self.partner_id.id})\n\n return result", "def action_scrap(self, cr, uid, ids, quantity, location_id,\n restrict_lot_id=False, restrict_partner_id=False,\n context=None):\n res = super(stock_move, self).\\\n action_scrap(cr, uid, ids, quantity,\n location_id=location_id,\n restrict_lot_id=restrict_lot_id,\n restrict_partner_id=restrict_partner_id,\n context=context)[0]\n if context.get('reason'):\n reason = context.get('reason')\n notes_reason = context.get('notes_reason') or False\n if reason:\n if int(reason) == -1:\n reason = '-1'\n else:\n reason = int(reason)\n self.write(cr, uid, res, {\n 'reason': reason,\n 'notes_reason': notes_reason,\n }, context=context)\n return res", "def move(self, state):\n raise NotImplementedError(\"Need to implement this method\")", "def move(self, t, s):\n raise NotImplementedError", "def move(self, source, dest, dry_run=False, after=False, force=False,\n include=None, exclude=None):\n eh = SimpleErrorHandler()\n self._client.execute('move', source, dest, n=dry_run, A=after,\n f=force, I=include, X=exclude, eh=eh)\n\n return bool(eh)", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def handleMove(self):\n pass" ]
[ "0.6281393", "0.6151487", "0.6090694", "0.60468554", "0.6036133", "0.5997963", "0.5967796", "0.59452647", "0.59211004", "0.58850086", "0.5854653", "0.5854297", "0.58422697", "0.5832115", "0.5814126", "0.5808458", "0.5793776", "0.5744125", "0.57013524", "0.569954", "0.5652009", "0.564682", "0.56467336", "0.56413704", "0.5597113", "0.55539864", "0.55493224", "0.5542008", "0.552051", "0.55061084" ]
0.6330825
0
The response iterable as writeonly stream.
def stream(self): return ResponseStream(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def response_as_stream(self) -> Any:\n raise NotImplementedError # pragma: no cover", "def stream(self, write, request):\n raise NotImplementedError(\"%s.stream\" % reflect.qual(self.__class__))", "def getOutputStream(self):\r\n self._setHeaders()\r\n return self._response.getOutputStream()", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def writeResponse(response):", "def get_response(self):\n res = IODWriteRes()\n for field in [\"seqNum\", \"ARUUID\", \"API\", \"slotNumber\",\n \"subslotNumber\", \"index\"]:\n res.setfieldval(field, self.getfieldval(field))\n return res", "def get_response(self):\n res = IODWriteMultipleRes()\n for field in [\"seqNum\", \"ARUUID\", \"API\", \"slotNumber\",\n \"subslotNumber\", \"index\"]:\n res.setfieldval(field, self.getfieldval(field))\n\n # append all block response\n res_blocks = []\n for block in self.getfieldval(\"blocks\"):\n res_blocks.append(block.get_response())\n res.setfieldval(\"blocks\", res_blocks)\n return res", "def _get_streaming_response(self):\n\n def get_json_string(item):\n \"\"\"Returns the ``item`` Instance instance as a JSON string.\"\"\"\n return json.dumps(item.json if isinstance(item, Instance) else item)\n\n if self.kwargs.get(\"format\") == \"xml\":\n response = StreamingHttpResponse(\n renderers.InstanceXMLRenderer().stream_data(\n self.object_list, self.get_serializer\n ),\n content_type=\"application/xml\",\n )\n else:\n response = StreamingHttpResponse(\n json_stream(self.object_list, get_json_string),\n content_type=\"application/json\",\n )\n\n # calculate etag value and add it to response headers\n if hasattr(self, \"etag_hash\"):\n self.set_etag_header(None, self.etag_hash)\n\n # set headers on streaming response\n for k, v in self.headers.items():\n response[k] = v\n\n return response", "def simple_sink(riter):\n for r in riter:\n pass", "def write(self, ostream):\n tstream = utils.BytearrayStream()\n\n if self._unique_identifier:\n self._unique_identifier.write(tstream)\n\n for attribute_name in self._attribute_names:\n attribute_name.write(tstream)\n\n self.length = tstream.length()\n super(GetAttributeListResponsePayload, self).write(ostream)\n ostream.write(tstream.buffer)", "def server_streaming(self) -> global___Snippet.ServerStreaming:", "def writelines(self, iterable):\n for line in iterable:\n for stream in self.streams:\n stream.write(line)", "def __iter__(self):\n return self.stream_chunker", "def stream(self):\n d = self.dictionary()\n # binary data comes after dict\n self.maybe_spaces_or_comments()\n return self._stream(d)", "def on_writable(self):\n if self.down is True:\n return\n\n if self.response is None:\n return\n\n if self.response.is_header_sent() is False:\n self.response.response_header(self)\n elif self.response.is_body_sent() is False:\n self.response.response_body(self)\n else:\n self.mark_down()", "def outstream(self):\r\n #noinspection PyUnresolvedReferences\r\n return self._outstream", "def write(self, s):\r\n for result in self.writeAsync(s):\r\n pass", "def to_response(self):\n raise NotImplementedError(\"Must define to_response on `%s`\" % self.__class__.__name__)", "def client_streaming(self) -> global___Snippet.ClientStreaming:", "def export(self, stream):\n pass", "def write_all(self):\r\n pass", "def get_response_content_iterator(self):\n return self.__response.iter_lines()", "def iter_any(self) -> AsyncStreamIterator[bytes]:\n ...", "def write(self):\n pass", "def write(self):\n pass", "def pipe_to_file(response, path):\n # TODO: Indicate progress.\n with open(path, 'wb') as file:\n while True:\n chunk = response.read(4096)\n if not chunk:\n break\n file.write(chunk)", "def write(self, ostream):\n tstream = utils.BytearrayStream()\n\n if self._unique_identifier:\n self._unique_identifier.write(tstream)\n\n self.length = tstream.length()\n super(GetAttributeListRequestPayload, self).write(ostream)\n ostream.write(tstream.buffer)", "def write(self):\n raise NotImplementedError", "def read_all(self):\n data = \"\".join(self.content_iter)\n self.content_iter = iter([data])\n return data" ]
[ "0.7330909", "0.6544413", "0.64807034", "0.6401678", "0.6401678", "0.6224532", "0.59967285", "0.59855986", "0.5966709", "0.594459", "0.58853096", "0.5849615", "0.58353543", "0.57926196", "0.5761642", "0.5744629", "0.5694836", "0.5675186", "0.56467724", "0.5643159", "0.5639434", "0.563548", "0.56140065", "0.5597266", "0.55715567", "0.55715567", "0.55609596", "0.5559645", "0.55239767", "0.5516839" ]
0.6642246
1
Returns any live games currently happening with the API
def _get_live_games(self): response = requests.get(self._get_score_url()) if response.status_code == 200: return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def fetch_games(self):\n return await self.http.get_game_list()", "def get_games_from_database (self):\n r = requests.get (self.url_endpoint)\n if (r.status_code != 200):\n print (\"Failed to get games:\\n\", r.text)\n return r\n \n games = json.loads (r.text)['games']\n return_list = []\n for game in games:\n return_list.append (game['game_state'])\n return return_list", "def get_live_league_games(self):\n url = self.__build_url(urls.GET_LIVE_LEAGUE_GAMES)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)", "def retrieveGames():\n result = cs411_game.getGames()\n return prepJSON(result)", "def get_all_games():\n games = brain.get_all_games()\n return games", "def get_free_games(self) -> List[Game]:", "def get_games():\n\n return jsonify({\"games\": list(map(make_public_game, games))})", "def get_games():\n feed = feedparser.parse(FEED_URL)\n\n games = []\n for entry in feed['entries']:\n game = Game(entry.title, entry.link)\n games.append(game)\n\n return games", "def get_video_games(self, **kwargs):\n return self.get('video_games.json', **kwargs)", "def get_games():\r\n feed = feedparser.parse(FEED_URL)\r\n games = []\r\n for entry in feed.entries:\r\n games.append(Game(title = entry['title']\r\n , link = entry['link']\r\n ))\r\n return games", "async def check_games(self, ctx):\n print(self.data)\n print(self.games_info)", "def get_owned_games(api_key='', steam_id=''):\n api_url = ['https://api.steampowered.com/'\n 'IPlayerService/GetOwnedGames/v0001/'\n '?include_played_free_games=1&format=json',\n '&key=', api_key,\n '&steamid=', steam_id]\n url = ''.join([url_str for url_str in api_url])\n try:\n request = urllib2.urlopen(url)\n except urllib2.URLError, e:\n if hasattr(e, 'reason'):\n print >> sys.stderr, 'We failed to reach the server.'\n print >> sys.stderr, 'Reason: ', e.reason\n elif hasattr(e, 'code'):\n print >> sys.stderr, 'The server couldn\\'t fulfill the request.'\n print >> sys.stderr, 'Error code: ', e.code\n sys.exit(1)\n response = json.load(request)\n return response['response']", "def get_games_data(games):\n\n games_response = requests.get(\n url=f'{settings.GAME_SETUP_URL}/games-data/',\n params={'game_id': games},\n timeout=5 # in sec\n )\n if games_response.status_code == 200:\n return games_response.json().get('games')\n return {}", "def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]", "def get_user_games(self, request):\n return games_ctrl.get_user_games(request.user_name)", "def get_top_live_games(self, partner='', **kwargs):\n if 'partner' not in kwargs:\n kwargs['partner'] = partner\n url = self.__build_url(urls.GET_TOP_LIVE_GAME, **kwargs)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)", "def get_games():\n page: int = int(flask.request.args.get(\"page\", 1))\n size: int = int(flask.request.args.get(\"size\", 10))\n\n request = GetPageRequest(page, size)\n response = minesweeper_service.get_game_page(request)\n return flask.jsonify(response)", "def get_user_games(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist!')\n games = Game.query(Game.user == user.key)\n games = games.filter(Game.game_over == False)\n if games.count() > 0:\n return GameForms(items=[game.to_form(\"{}'s active games.\".format(\n request.user_name)) for game in games])\n else:\n raise endpoints.NotFoundException('This user has no active games!')", "def get_user_games(self, req):\n return models.BattleShip.getUserGames(req.user_name)", "def own_games(self):\r\n return sorted(self.games + self.finals, key=lambda g: (g.datetime, g.pitch.rank))", "def get_games_in_progress(self):\n gip_url = 'scores/json/AreAnyGamesInProgress?key=<key>'\n contents = urllib.request.urlopen(self._base_url + gip_url.replace('<key>', self._ak, 1))\n return contents.getcode(), contents.read().decode(\"utf-8\")", "def get_winners_of_game(self):\n return self.game_winners", "def get_all_games(season):\n url = BASE_URL.format(season)\n json_data = requests.get(url, headers=HEADERS).json()\n all_games = json_data[\"resultSets\"][0][\"rowSet\"]\n return all_games", "def get_games(msg: telebot.types.Message):\n games = Game.select()\n m = ''\n for game in games:\n m += f'{game.id}: {jsonpickle.encode(game)}\\n'\n\n bot.send_message(\n msg.from_user.id,\n m\n )", "def get_games(self, start_game_id, end_game_id):\n games = []\n \n num_games = end_game_id - start_game_id + 1\n \n for game_id in range(start_game_id, end_game_id + 1):\n try:\n game = self.get_game(game_id)\n games.append(game)\n except:\n print ('game_id =', game_id, 'failed')\n \n time.sleep(0.4)\n \n update_progress(game_id - start_game_id + 1, num_games)\n \n return games", "def get_bga_game_list():\n result = requests.get(\"https://www.boardgamegeek.com/xmlapi2/geeklist/252354\")\n return result.text", "def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)", "def game_list_full(self, uid=0):\n games = session.query(Game).all()\n return games", "def get_player_games(self, year, use_local=True):", "def get_games(url):\n \n import urllib\n import urllib2\n import re\n\n response = urllib2.urlopen(url + 'matches')\n html = response.read()\n\n games_html = re.findall('<tr class=\"match-row custom\">(.*?)</tr', html, flags=re.MULTILINE|re.DOTALL)\n\n games = []\n\n for game_html in games_html:\n game_match = re.search('.*?<td>(.*?)</td>.*?<td class=\"align-center\">(.*?)</td>.*?<span class=\"match-win\">(.*?)</span>.*?<td class=\"align-right\">[\\r\\n\\t]*(.*?)[\\r\\n\\t]*</td>', game_html, flags=re.MULTILINE|re.DOTALL)\n \n game = {}\n \n if game_match:\n game['map'] = game_match.group(1)\n game['type'] = game_match.group(2)\n game['outcome'] = game_match.group(3)\n game['date'] = game_match.group(4)\n games.append(game)\n \n return games" ]
[ "0.7730199", "0.7471135", "0.7325539", "0.7287605", "0.7253883", "0.71185577", "0.71165675", "0.69969124", "0.68289065", "0.6684004", "0.6620107", "0.65803075", "0.6573545", "0.6527177", "0.6521868", "0.6480549", "0.6474347", "0.6449517", "0.6438656", "0.64343774", "0.6428631", "0.6399015", "0.63887006", "0.6338779", "0.6326189", "0.6324196", "0.63036096", "0.629195", "0.62694305", "0.62680554" ]
0.84799916
0
Gets the current team's score from the API
def _get_current_teams_score(self): for game in self._get_live_games(): teams_playing = [x['abbreviation'] for index, x in game['teams'].items()] if self.team in teams_playing: # Our team is playing in this game, get the score return int(game['scores'][self.team])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self):\n return self.client.call('GET', self.name + 'score')", "def getScore(data):\n return score", "def get_score(self):\n return self.score", "def get_scores(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def extract_score(self, json):\n\t\ttry:\n\t\t\treturn int(json['player_score'])\n\t\texcept KeyError:\n\t\t\treturn 0", "def getScore(self):\r\n return self._score", "def perform_get_score(responder, options):\n match = options['<match-id>']\n tla = options['<tla>']\n score = scores.get_match_score(match, tla)\n\n if options.get(yaml_opt, False):\n responder(yaml.dump({'score': score}))\n else:\n responder('Team {0} scored {1} in match {2}'.format(tla, score, match))", "def get_info_from_api(team_name):\n if \"-\" in team_name:\n team_name = team_name.replace(\"-\", \"+\")\n if \"brighton\" in team_name: # some teams has different names than in sofa-score\n team_name = \"brighton\"\n if \"leicester\" in team_name:\n team_name = \"leicester\"\n if \"norwich\" in team_name:\n team_name = \"norwich\"\n if \"mallorca\" in team_name:\n team_name = \"mallorca\"\n if \"parma\" in team_name:\n team_name = \"parma+calcio\"\n if \"bayern\" in team_name:\n team_name = \"bayern\"\n if \"koln\" in team_name:\n team_name = \"fc+koln\"\n if \"union+berlin\" in team_name:\n team_name = \"union+berlin\"\n if \"fsv+mainz\" in team_name:\n team_name = \"mainz\"\n if \"hoffenheim\" in team_name:\n team_name = \"hoffenheim\"\n if \"mgladbach\" in team_name:\n team_name = \"borussia+monchengladbach\"\n if \"schalke\" in team_name:\n team_name = \"schalke\"\n if \"leverkusen\" in team_name:\n team_name = \"leverkusen\"\n if \"paderborn\" in team_name:\n team_name = \"paderborn\"\n print(team_name)\n response = requests.get(cfg.API_URL + team_name)\n team_data = json.loads(response.text)\n return team_data['teams'][0]", "def getScore(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def get_score(self):\r\n return self.lcp.get_score()", "def get_score(self):\n\n sql = \"SELECT score FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()[0][0]", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")", "def get_score(self):\n return self.__score", "def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores", "def test_lti20_get_with_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\",\r\n \"resultScore\": SCORE,\r\n \"comment\": COMMENT})", "def get_score(self):\n\n return self._score", "def get_scores(self, tournament: Tournament):\n self.model.eval()\n # collate_fn = lambda x: collate_teams(x, tournament.max_members)\n dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)\n iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)\n scores = []\n for i, team in enumerate(iterator):\n score = self.model.get_team_score(team.to(self.device))\n scores.append(score.cpu().numpy())\n\n scores = np.concatenate(scores)\n return scores.flatten()", "def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)", "def fetch_teams_stats():\n teams_scraper = TeamStatsScraper(API_URL, API_HEADERS)\n result = teams_scraper.save_objects()\n return result", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)", "def get_team_round_score(self, team: int, match_round: int) -> int:\n assert self.rounds, \"There are no rounds or the requested round doesn't exist\"\n\n team_score = 0\n\n if team == 1:\n for player in self.rounds[match_round - 1].team1.players:\n team_score += player.scores\n else:\n for player in self.rounds[match_round - 1].team2.players:\n team_score += player.scores\n\n return team_score", "def get_current_score(self):\n\n # Return the player's current turn score\n return self._current_score" ]
[ "0.7527059", "0.72310585", "0.6813954", "0.681293", "0.675793", "0.675793", "0.675793", "0.66784096", "0.66253316", "0.6584562", "0.6568987", "0.6539687", "0.6478145", "0.6478145", "0.6478145", "0.64599675", "0.6432476", "0.63932824", "0.63802594", "0.6361858", "0.6353685", "0.63448775", "0.6309738", "0.62991405", "0.62593514", "0.62393326", "0.62389714", "0.62332237", "0.6215339", "0.6207062" ]
0.7543717
0
A callback for when the score has changed
def _score_has_changed(self): print('The score for {} has changed'.format(self.team)) self.relay_controller.activate_solenoid()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_score():\n pass", "def set_score(self, change):\n self._score = self._score + change", "def change_score(self, change: float=1):\n self._score += change", "def updateScore(self, score):\n self.__score += score", "def change_score(self, change: float = 1):\n self._score += change", "def updateScore(score):\n return score + 1", "def increase_score(self):\n self.score += 1", "def score(self):", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def update_score(self, board):\n self._score += 1", "def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def update(self, game):\n super().update(game)\n self.nn_def.set_score(self.score)", "def update_g_score(self, value):\n self.g_score = value", "def update_score(self, score: int) -> int:\n self.score += score\n return self.score", "def l_point(self):\n self.l_score += 1\n self.update()", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def update_score(self, score_point: int):\r\n self._score_point = score_point\r\n self._update_score() # change the visual display of points for the player\r", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def update_score_from_cmd(self, new_score, prev_score):\r\n if new_score is None:\r\n return # No change\r\n \r\n player = new_score[0]\r\n score = new_score[1]\r\n player.set_score(score)", "def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)", "def disp_score():", "def scores(self, value):\n self._scores = value", "def set_score(self,score):\n self._score = score", "def r_point(self):\n self.r_score += 1\n self.update()", "def enter_game_scores():\n pass", "def update_turn_score(self, score):\n\n # Increment the attribute by the passed value\n self._current_score += score", "def score_update(scoreboard, compare):\r\n if compare == 'Victory':\r\n scoreboard['W'] += 1\r\n elif compare == 'Defeat':\r\n scoreboard['L'] += 1\r\n elif compare == 'Tie':\r\n scoreboard['T'] += 1" ]
[ "0.8341499", "0.74432063", "0.73771644", "0.7372784", "0.734759", "0.73033684", "0.72073954", "0.71673465", "0.7100169", "0.70697486", "0.7063373", "0.69999087", "0.6984844", "0.6956813", "0.6926817", "0.6926514", "0.69130313", "0.69130313", "0.69130313", "0.6872642", "0.68607247", "0.6860533", "0.6824648", "0.6803969", "0.678705", "0.67438745", "0.6735282", "0.6703037", "0.6677746", "0.6665407" ]
0.7795641
1
return a dict(a2p=arr0, p2a=arr1). Key "a2p" means converting amber order to phenix order Key "p2a" means converting phenix order to amber order
def get_indices_convert_dict(fn): pdb_inp = pdb.input(file_name=fn) pdb_hierarchy = pdb_inp.construct_hierarchy() newids = OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_hierarchy.atoms())) oldids= OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_inp.atoms())) return {'p2a': np.array([newids[atom.id_str()] for atom in pdb_inp.atoms()]), 'a2p': np.array([oldids[atom.id_str()] for atom in pdb_hierarchy.atoms()])}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def paramz_to_dict(p):\n return dict([(k.name, np.array(k)) for k in p])", "def convert_arrpart_to_dict(particles):\n partdict = {}\n partdict['m'] = particles[:,0]\n partdict['Z'] = particles[:,2]\n partdict['rho'] = particles[:,5]\n partdict['R'] = particles[:,7]\n partdict['vphi'] = particles[:,9]\n partdict['vR'] = particles[:,10]\n partdict['vz'] = particles[:,11]\n normV = np.sqrt(partdict['vphi']*partdict['vphi']+\\\n partdict['vR']*partdict['vR']+\\\n partdict['vz']*partdict['vz'])\n partdict['Bphi'] = particles[:,16]\n partdict['BR'] = particles[:,17]\n partdict['Bz'] = particles[:,18]\n normB = np.sqrt(partdict['Bphi']*partdict['Bphi']+\\\n partdict['BR']*partdict['BR']+\\\n partdict['Bz']*partdict['Bz'])\n partdict['pitch'] = (partdict['Bphi']*partdict['vphi']+partdict['Bz']*partdict['vz']\\\n +partdict['BR']*partdict['vR'])/(normB*normV)\n \n return partdict", "def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1", "def rec_to_dict(arr):\n\n return dict(zip(arr.dtype.names, arr))", "def array_to_dict(self, x):\n d = {}\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n d['fit_parameters'] = { p: x[i] for i, p in enumerate(self.fit_parameters) }\n d['nuisance_parameters'] = { p: x[i + n_fit_p] for i, p in enumerate(self.nuisance_parameters) }\n d['fit_wc'] = { p: x[i + n_fit_p + n_nui_p] for i, p in enumerate(self.fit_wc_names) }\n return d", "def joint_array_to_dict(vel_torque_array, limb):\n\n return dict(itertools.izip(limb.joint_names(), vel_torque_array))", "def aPriori(self) -> dict:\n \n return self.probIn", "def transform_dict_to_array(parameter):\r\n if parameter['Top'] == 'AC':\r\n d = np.array(parameter['E_BAT']) # 0\r\n d = np.append(d, parameter['eta_BAT']) # 1\r\n d = np.append(d, parameter['t_CONSTANT']) # 2\r\n d = np.append(d, parameter['P_SYS_SOC0_DC']) # 3\r\n d = np.append(d, parameter['P_SYS_SOC0_AC']) # 4\r\n d = np.append(d, parameter['P_SYS_SOC1_DC']) # 5\r\n d = np.append(d, parameter['P_SYS_SOC1_AC']) # 6\r\n d = np.append(d, parameter['AC2BAT_a_in']) # 7\r\n d = np.append(d, parameter['AC2BAT_b_in']) # 8\r\n d = np.append(d, parameter['AC2BAT_c_in']) # 9\r\n d = np.append(d, parameter['BAT2AC_a_out']) # 10\r\n d = np.append(d, parameter['BAT2AC_b_out']) # 11\r\n d = np.append(d, parameter['BAT2AC_c_out']) # 12\r\n d = np.append(d, parameter['P_AC2BAT_DEV']) # 13\r\n d = np.append(d, parameter['P_BAT2AC_DEV']) # 14\r\n d = np.append(d, parameter['P_BAT2AC_out']) # 15\r\n d = np.append(d, parameter['P_AC2BAT_in']) # 16\r\n d = np.append(d, parameter['t_DEAD']) # 17\r\n d = np.append(d, parameter['SOC_h']) # 18\r\n\r\n if parameter['Top'] == 'DC':\r\n d = np.array(parameter['E_BAT']) # 1\r\n d = np.append(d, parameter['P_PV2AC_in']) # 2\r\n d = np.append(d, parameter['P_PV2AC_out']) # 3\r\n d = np.append(d, parameter['P_PV2BAT_in']) # 4\r\n d = np.append(d, parameter['P_BAT2AC_out']) # 5\r\n d = np.append(d, parameter['PV2AC_a_in']) # 6\r\n d = np.append(d, parameter['PV2AC_b_in']) # 7\r\n d = np.append(d, parameter['PV2AC_c_in']) # 8\r\n d = np.append(d, parameter['PV2BAT_a_in']) # 9\r\n d = np.append(d, parameter['PV2BAT_b_in']) # 10\r\n d = np.append(d, parameter['BAT2AC_a_out']) # 11\r\n d = np.append(d, parameter['BAT2AC_b_out']) # 12\r\n d = np.append(d, parameter['BAT2AC_c_out']) # 13\r\n d = np.append(d, parameter['eta_BAT']) # 14\r\n d = np.append(d, parameter['SOC_h']) # 15\r\n d = np.append(d, parameter['P_PV2BAT_DEV']) # 16\r\n d = np.append(d, parameter['P_BAT2AC_DEV']) # 17\r\n d = np.append(d, parameter['t_DEAD']) # 18\r\n d = np.append(d, parameter['t_CONSTANT']) # 19\r\n d = np.append(d, parameter['P_SYS_SOC1_DC']) # 20\r\n d = np.append(d, parameter['P_SYS_SOC0_AC']) # 21\r\n d = np.append(d, parameter['P_SYS_SOC0_DC']) # 22\r\n\r\n if parameter['Top'] == 'PV':\r\n d = np.array(parameter['E_BAT'])\r\n d = np.append(d, parameter['P_PV2AC_in'])\r\n d = np.append(d, parameter['P_PV2AC_out'])\r\n d = np.append(d, parameter['P_PV2BAT_in'])\r\n d = np.append(d, parameter['P_BAT2PV_out'])\r\n d = np.append(d, parameter['PV2AC_a_in'])\r\n d = np.append(d, parameter['PV2AC_b_in'])\r\n d = np.append(d, parameter['PV2AC_c_in'])\r\n d = np.append(d, parameter['PV2BAT_a_in'])\r\n d = np.append(d, parameter['PV2BAT_b_in'])\r\n d = np.append(d, parameter['PV2BAT_c_in'])\r\n d = np.append(d, parameter['PV2AC_a_out'])\r\n d = np.append(d, parameter['PV2AC_b_out'])\r\n d = np.append(d, parameter['PV2AC_c_out'])\r\n d = np.append(d, parameter['BAT2PV_a_out'])\r\n d = np.append(d, parameter['BAT2PV_b_out'])\r\n d = np.append(d, parameter['BAT2PV_c_out'])\r\n d = np.append(d, parameter['eta_BAT'])\r\n d = np.append(d, parameter['SOC_h'])\r\n d = np.append(d, parameter['P_PV2BAT_DEV'])\r\n d = np.append(d, parameter['P_BAT2AC_DEV'])\r\n d = np.append(d, parameter['P_SYS_SOC1_DC'])\r\n d = np.append(d, parameter['P_SYS_SOC0_AC'])\r\n d = np.append(d, parameter['P_SYS_SOC0_DC'])\r\n d = np.append(d, parameter['t_DEAD'])\r\n d = np.append(d, parameter['t_CONSTANT'])\r\n\r\n return d", "def basic_array_ejection():\n arr: pa.Array = pa.array([1, 2, 3, 4, 5], type=pa.int8())\n\n srs: pd.Series = arr.to_pandas() # NOTE: Requires pandas installation\n nparr: np.ndarray = arr.to_numpy()\n list_: List[dict] = arr.to_pylist()\n str_: str = arr.to_string()\n\n results = {\n 'to_pandas > to_list': srs.to_list(),\n 'to_numpy > tolist': nparr.tolist(),\n 'to_pylist': list_,\n 'to_string': str_,\n }\n\n pretty_print_result_map(results)", "def fillStanceDictNamesAsKeys(firstSidePoints,secondSidePoints):\n\tnewDict = {}\n\t# value index 0 for akp 1 for chp\n\tfor point in firstSidePoints:\n\t\tif point[0] in newDict.keys():\n\t\t\tnewDict[point[0]] = (newDict[point[0]][0] + 1,newDict[point[0]][1])\n\t\telse:\n\t\t\tnewDict[point[0]] = (1,0)\n\n\tfor point in secondSidePoints:\n\t\tif point[0] in newDict.keys():\n\t\t\tnewDict[point[0]] = (newDict[point[0]][0],newDict[point[0]][1]+1)\n\t\telse:\n\t\t\tnewDict[point[0]] = (0,1)\n\n\treturn newDict", "def standarization_ofconc(a2_data):\n aux_dic = OrderedDict()\n for i in a2_data:\n evol_tuple = a2_data[i]['conc'].keys()\n if len(evol_tuple) != 1:\n raise RuntimeError('too many tuples for conc')\n evol_tuple = evol_tuple[0]\n aux_dic[i] = a2_data[i]['conc'][evol_tuple]\n return aux_dic", "def relativeSortArray(self, arr1, arr2):\n\n dict_sort = {}\n list_total = []\n list_diffs = []\n for i in arr2:\n dict_sort[i] = 0\n\n for i in arr1:\n if i in dict_sort:\n dict_sort[i] +=1\n else:\n list_diffs.append(i)\n list_diffs.sort()\n\n for i in arr2:\n list_total.extend([i] * dict_sort[i])\n\n list_total.extend(list_diffs)\n\n return list_total", "def change_to_pca_format(list_of_dict):\n name_list = []\n index_list = []\n expression_list = []\n combined_list = []\n for img_dict in list_of_dict:\n name_list.append(img_dict[\"Name\"])\n expression_list.append(img_dict[\"Expression\"])\n combined_list.append(img_dict[\"Img_arr\"].tolist())\n index_list.append(List_of_people.index(img_dict[\"Name\"]))\n combined_arr = np.array(combined_list)\n return {'Names': name_list, 'Name_indexs': index_list, 'Expression': expression_list, 'Combined_array': combined_arr}", "def array_to_dict(arr: np.ndarray, domain: Optional[np.ndarray] = None) -> DictStrNum:\n\n if domain is None:\n keys, counts = np.unique(numpy_array(arr), return_counts=True)\n out_dict = dict(zip(keys, counts))\n else:\n out_dict = {}\n for d in np.unique(domain):\n arr_d = arr[domain == d]\n keys_d, counts_d = np.unique(numpy_array(arr_d), return_counts=True)\n out_dict[d] = dict(zip(keys_d, counts_d))\n\n return out_dict", "def prime_error_rate_dic(aa_order):\n aa_error_rate_dic = {}\n for i in aa_order:\n #first element of definitions are the from mutation rate\n #and the second element is the to mutation rate\n aa_error_rate_dic[i] = [0.0, 0.0]\n return aa_error_rate_dic", "def fillStanceDict(firstSidePoints,secondSidePoints):\n\tnewDict = {}\n\t# value index 0 for akp 1 for chp\n\tfor point in firstSidePoints:\n\t\tif point[1] in newDict.keys():\n\t\t\tnewDict[point[1]] = (newDict[point[1]][0] + 1,newDict[point[1]][1])\n\t\telse:\n\t\t\tnewDict[point[1]] = (1,0)\n\n\tfor point in secondSidePoints:\n\t\tif point[1] in newDict.keys():\n\t\t\tnewDict[point[1]] = (newDict[point[1]][0],newDict[point[1]][1]+1)\n\t\telse:\n\t\t\tnewDict[point[1]] = (0,1)\n\n\treturn newDict", "def build_dict(arg):\n # helper function to the Evaluator.to_property_di_graph() method that\n # packages the dictionaries returned by the \"associate_\" family of\n # functions and then supplies the master dict (one_dict) to the Vertex\n # obj as **kwargs\n one_dict = {}\n for ar in arg:\n one_dict.update(ar)\n return one_dict", "def ordered_real_state_space(a2_data, py_order, a2_order):\n aux_dic = OrderedDict()\n for py_index, key in enumerate(py_order):\n if key in a2_data:\n try:\n a2_index = a2_order.index(key)\n except ValueError:\n a2_index = None\n aux_dic[StateParPickable(key, py_index, a2_index)] = a2_data[key]\n\n return aux_dic", "def resultsToArray(self):\n data = {}\n for item in self.data_array:\n data[item[0]] = [item[1]]\n return data", "def _build_dict(self, new_order):\n\t\torder_dict = {}\n\t\tfor i, el in enumerate(new_order):\n\t\t\torder_dict[el] = i\n\t\treturn order_dict", "def args_to_dictionaty(args):\n\tres_args = {}\n\tfor i, arg in enumerate(args[1:]):\n\t\tif i % 2 == 0:\n\t\t\tkey = arg\n\t\telse:\n\t\t\tres_args[key] = arg\n\treturn res_args", "def DictFunction2():\r\n print \"Create Second Dictionary\"\r\n NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16))))\r\n print NumberDict", "def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output", "def ToMap(*args):\n return dict((v, str(i)) for i, v in enumerate(args))", "def aPosteriori(self) -> dict:\n\n simbIn = self.simbIn\n simbOut = self.simbOut\n probIn = self.probIn\n probOut = self.probOut\n mat = self.mat\n\n return {\n i: {\n j: mat[i][j] * probIn[i] / probOut[j] for j in simbOut\n } for i in simbIn\n }", "def todict(self):\n return dict(self.array)", "def _to_array1(self, maps, norb):\n nstate = len(maps[(0, 1)])\n nlt = norb * (norb + 1) // 2\n arrays = numpy.zeros((nlt, nstate, 3), dtype=numpy.int32)\n for i in range(norb):\n for j in range(i + 1, norb):\n ijn = i + j * (j + 1) // 2\n for k, data in enumerate(maps[(i, j)]):\n arrays[ijn, k, 0] = data[0]\n arrays[ijn, k, 1] = data[1]\n arrays[ijn, k, 2] = data[2]\n return arrays", "def get_dict_of_int2(self):\n pass", "def normalize_state_space(a2_data):\n aux_dic = OrderedDict()\n for key, vec in a2_data.iteritems():\n _, aux_dic[key] = normalize(vec)\n return aux_dic", "def dict() -> Dict[str, Pin]:" ]
[ "0.6365707", "0.59968275", "0.5911731", "0.5696765", "0.5571321", "0.55429137", "0.54785323", "0.54684395", "0.54655355", "0.5458638", "0.54449743", "0.5401446", "0.53939253", "0.5382905", "0.5363385", "0.5341607", "0.53321666", "0.53309155", "0.53248113", "0.53123164", "0.5308776", "0.52963626", "0.5232546", "0.5232487", "0.52243423", "0.5201372", "0.51670843", "0.5145934", "0.5122767", "0.512249" ]
0.6150025
1
Emit a deprecation warning about a gnomerelated reactor.
def deprecatedGnomeReactor(name: str, version: Version) -> None: stem = DEPRECATION_WARNING_FORMAT % { "fqpn": "twisted.internet." + name, "version": getVersionString(version), } msg = stem + ". Please use twisted.internet.gireactor instead." warnings.warn(msg, category=DeprecationWarning)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deprecation(self, message, *args, **kws):\n self._log(DEPRECATION, message, args, **kws)", "def guarded_deprecation_warning(*args, **kwargs):\n if os.environ.get(\"SERVE_WARN_V1_DEPRECATIONS\", \"0\") == \"1\":\n from ray._private.utils import deprecated\n\n return deprecated(*args, **kwargs)\n else:\n\n def noop_decorator(func):\n return func\n\n return noop_decorator", "def test_deprecated(self):\n client.ThreadedResolver()\n warnings = self.flushWarnings(offendingFunctions=[self.test_deprecated])\n self.assertEquals(\n warnings[0]['message'],\n \"twisted.names.client.ThreadedResolver is deprecated since \"\n \"Twisted 9.0, use twisted.internet.base.ThreadedResolver \"\n \"instead.\")\n self.assertEquals(warnings[0]['category'], DeprecationWarning)\n self.assertEquals(len(warnings), 1)", "def _maybe_show_deprecation_warning(self):\n if self._deprecation_warning is not None:\n show_deprecation_warning(self._deprecation_warning)", "def _report_deprecation(format_str, format_dict):\n if oslo_log:\n # We can't import versionutils at the module level because of circular\n # imports. Importing just oslo_log at the module level and\n # versionutils locally allows us to unit test this and still avoid the\n # circular problem.\n from oslo_log import versionutils\n versionutils.report_deprecated_feature(LOG, format_str,\n format_dict)\n else:\n LOG.warning(format_str, format_dict)", "def __call__(self, *args, **kwargs):\n self._Deprecator__warn()\n return self._Deprecator__todeprecate(*args, **kwargs)", "def deprecated(message, **names):\n module = initialize(2)\n __deferred_definitions__ = module.__deferred_definitions__\n for name, specifier in names.items():\n __deferred_definitions__[name] = DeferredAndDeprecated(\n name, specifier, message)", "def no_log_warn(logical_line):\n\n msg = (\"G330: LOG.warn is deprecated, please use LOG.warning!\")\n if \"LOG.warn(\" in logical_line:\n yield (0, msg)", "def no_log_warn(logical_line):\n\n msg = (\"M352: LOG.warn is deprecated, please use LOG.warning!\")\n if \"LOG.warn(\" in logical_line:\n yield (0, msg)", "def warning(self, msg):\n oscid = self.app.global_osc_id()\n print(\"WARNING : /Llia/%s : %s\" % (oscid, msg))", "def warn(self, msg):\n warning_msg = self._warning_color\n warning_msg += \"[SHOULDER_WARNING] \" + msg\n warning_msg += self._reset_color\n self.logger.warning(warning_msg)", "def warn():\n pass", "def warning(self, msg):\r\n self.logger.warning(msg)", "def deprecated( deprecated_function, *args, **kwargs ):\n\n @wraps( deprecated_function )\n def wrapper( *args, **kwargs ):\n warnings.filterwarnings( 'always' )\n warnings.warn( \"deprecated\", DeprecationWarning )\n deprecated_function( *args, **kwargs )\n\n return wrapper", "def warning(self, msg):\n\n self.logger.warning(msg)", "def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")", "def _check_deprecated(self, dest: str, kwargs, print_warning: bool = True) -> None:\n removal_version = kwargs.get(\"removal_version\", None)\n if removal_version is not None:\n warn_or_error(\n removal_version=removal_version,\n entity=f\"option '{dest}' in {self._scope_str()}\",\n start_version=kwargs.get(\"deprecation_start_version\", None),\n hint=kwargs.get(\"removal_hint\", None),\n print_warning=print_warning,\n )", "def deprecated_call():\n # TODO: Remove this when testing requires pytest>=3.9.\n pieces = pytest.__version__.split(\".\")\n pytest_major_minor = (int(pieces[0]), int(pieces[1]))\n if pytest_major_minor < (3, 9):\n return pytest.warns((DeprecationWarning, PendingDeprecationWarning))\n return pytest.deprecated_call()", "def warning(self, msg):\n self.__logger.warning(msg)", "def filter_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n warnings.simplefilter(\"ignore\", category=LightningDeprecationWarning)", "def warning(self, msg: str):\n self._logger.warning(msg)", "def deprecated(filename, msg=''):\r\n def _deprecated(f):\r\n printme = [True]\r\n\r\n def g(*args, **kwargs):\r\n if printme[0]:\r\n print 'WARNING: %s.%s deprecated. %s'\\\r\n % (filename, f.__name__, msg)\r\n printme[0] = False\r\n return f(*args, **kwargs)\r\n return g\r\n\r\n return _deprecated", "def deprecate(old, new=None, version=None):\n def _deprecate(func):\n def wrapper(*args, **kwargs):\n if new is None:\n comment = f\"{old} is deprecated, version > {version}\"\n else:\n comment = f\"Please use {new} rather than {old}, version > {version}\"\n warnings.warn(\n comment,\n DeprecationWarning,\n stacklevel=2\n )\n return func(*args, **kwargs)\n return wrapper\n return _deprecate", "def warning(cls, msg, debug=True):\n if debug:\n Console.warning(msg)", "def test_flrw_moved_deprecation():\n from astropy.cosmology import flrw\n\n # it's deprecated to import `flrw/*` from `core.py`\n with pytest.warns(AstropyDeprecationWarning):\n from astropy.cosmology.core import FLRW\n\n # but they are the same object\n assert FLRW is flrw.FLRW", "def deprecated_call(func, *args, **kwargs): \n warningmodule = py.std.warnings\n l = []\n oldwarn_explicit = getattr(warningmodule, 'warn_explicit')\n def warn_explicit(*args, **kwargs): \n l.append(args) \n oldwarn_explicit(*args, **kwargs)\n oldwarn = getattr(warningmodule, 'warn')\n def warn(*args, **kwargs): \n l.append(args) \n oldwarn(*args, **kwargs)\n \n warningmodule.warn_explicit = warn_explicit\n warningmodule.warn = warn\n try:\n ret = func(*args, **kwargs)\n finally:\n warningmodule.warn_explicit = warn_explicit\n warningmodule.warn = warn\n if not l:\n print warningmodule\n raise AssertionError(\"%r did not produce DeprecationWarning\" %(func,))\n return ret", "def deprecated(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning) # turn off filter\n warnings.warn(\"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning,\n stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning) # reset filter\n return func(*args, **kwargs)\n return new_func", "def deprecated(func):\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n warnings.warn(\n \"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning,\n stacklevel=2,\n )\n return func(*args, **kwargs)\n\n return new_func", "def warning(msg):\n log('WARNING', msg)", "def deprecated(func):\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning) # turn off filter\n warnings.warn(\"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning,\n stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning) # reset filter\n return func(*args, **kwargs)\n\n return new_func" ]
[ "0.63709056", "0.6337697", "0.61615217", "0.6083312", "0.6040545", "0.59742963", "0.58868295", "0.5778024", "0.5695411", "0.563647", "0.56345797", "0.5613319", "0.55875045", "0.5550916", "0.5532492", "0.5523634", "0.5511823", "0.5491791", "0.5482633", "0.546482", "0.54356366", "0.54315835", "0.5427246", "0.54069614", "0.54052", "0.5393984", "0.5372935", "0.5366525", "0.5349939", "0.53435326" ]
0.75850695
0
clears noise from given image using bilateral Filter.
def filter_image(img): return cv2.bilateralFilter(img, 9, 50, 50)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtering(image):\n output = np.array(image)\n for x in xrange(0,1):\n bilateralFilter_img = cv2.bilateralFilter(output,5, 75, 75)\n\n return bilateralFilter_img", "def remove_noise(image):\n filtered = cv2.absdiff(image.astype(np.uint8), 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)\n kernel = np.ones((1, 1), np.uint8)\n opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n\n img = image_smoothening(image)\n transform = cv2.bitwise_or(img, closing)\n return transform", "def softing_noise(image, kn):\n\n s_noise = cv2.GaussianBlur(image, (kn, kn), 0)\n\n return s_noise", "def reset(self):\n self.noise.reset()", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def blur_image(im, n, ny=None) :\n g = gauss_kern(n, sizey=ny)\n improc = signal.convolve(im,g, mode='same')\n return(improc)", "def applyMorphologicalCleaning(self, image):", "def reset_noise(self):\n self.advantage_hidden_layer.reset_noise()\n self.advantage_layer.reset_noise()\n self.value_hidden_layer.reset_noise()\n self.value_layer.reset_noise()", "def remove_noise(emg):\n def butter_bandstop_filter(data, lowcut, highcut, fs, order=2):\n def butter_bandstop(lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='bandstop')\n return b, a\n \n b, a = butter_bandstop(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n \n # Remove noise from signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] = butter_bandstop_filter(emg[channel], 49., 51., EMG_F_SAMPLE, order=2)\n return emg", "def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)", "def blur(im: Image) -> Image:\n return im.filter(ImageFilter.GaussianBlur(radius=random.uniform(\n *ImageOperations.config.get('radius_interval'))\n ))", "def bilateral_filter(img, n, sigma_s, sigma_r):\n filter_ = build_filter(n, sigma_s)\n \n original_shape = list(img.shape)\n\n pad = n//2\n img = padding(img, pad)\n \n new_img = np.zeros_like(img)\n\n for i in range(pad, original_shape[0]+pad):\n for j in range(pad, original_shape[1]+pad):\n # Operations happen vectorially around img[i][j]\n \n # Grid centered in img[i][j]\n sub_matrix = img[i-pad:i+pad+1, j-pad:j+pad+1]\n\n gr = gaussian(sub_matrix-img[i][j], sigma_r)\n \n wt = np.multiply(gr, filter_)\n w = np.sum(wt)\n\n pixel = np.sum(np.multiply(wt, sub_matrix))\n pixel = pixel/w\n\n new_img[i][j] = pixel\n\n new_img = unpadding(new_img, pad)\n \n return new_img", "def random_blur(self, img, p = 0.5):\n if self.decision(p):\n img = ndimage.gaussian_filter(img, sigma=1)\n return img", "def bilateral(filename,input_image, sigma_spatial, sigma_intensity):\n\t# make a simple Gaussian function taking the squared radius\n\tgaussian = lambda r2, sigma: np.exp(-0.5*r2/sigma**2 )\n\t#print(input_image.shape)\n\tinput_image = cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)\n\n\t# define the window width to be the 2 time the spatial std. dev. to\n\t# be sure that most of the spatial kernel is actually captured\n\twin_width = int(3*sigma_spatial +1)\n\twgt_sum = np.zeros_like(input_image).astype(np.float64)\n\tresult = np.zeros_like(input_image).astype(np.float64)\n\tout= np.zeros_like(input_image).astype(np.float64)\n\t\n\tfor i in tqdm(range(input_image.shape[-1]),desc=\"Going through color channels\"):\n\t\tnorm_image = normalize(input_image[:,:,i])\n\t\tfor shft_x in range(-win_width,win_width+1):\n\t\t\tfor shft_y in range(-win_width,win_width+1):\n\t\t\t\t# compute the spatial contribution\n\t\t\t\tspatial = gaussian(shft_x**2+shft_y**2, sigma_spatial )\n\t\n\t\t\t\t# shift by the offsets to get image window\n\t\t\t\twindow = np.roll(norm_image, [shft_y, shft_x], axis=[0,1])\n\t\n\t\t\t\t# compute the intensity contribution\n\t\t\t\tcombined_filter = spatial*gaussian( (window-norm_image)**2, sigma_intensity )\n\t\n\t\t\t\t# result stores the mult. between combined filter and image window\n\t\t\t\tresult[:,:,i] += window*combined_filter\n\t\t\t\twgt_sum[:,:,i] += combined_filter\n\tout = normalize(result/wgt_sum)\n\n\t# normalize the result and return\n\tplt.imsave(\"outputImages/Bilateral_\"+filename+\"_\"+str(sigma_spatial)+\"_\"+ str(sigma_intensity) + \".png\" ,out,dpi=600)\n\treturn out", "def remove_background(img):\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def colorImgPreProcess(self, image):\n #do processing on the image while it's still in color\n image = cv2.medianBlur(image, 7) #kernal size must be odd\n #image = cv2.bilateralFilter(image, 9, 75, 75) #TODO: uncomment when it won't cause C++ errors with ROS\n #self.closeImages() #uncomment if showing output image\n return image", "def add_imageNoise(img):\n if not np.all(img >= 0):\n print 'make sure the image pixel values are positive definite'\n sys.exit()\n noise = st.poisson.rvs(1.,loc = -1.,scale=1.,size=img.shape)*np.sqrt(img)\n return noise", "def _denoise(self, img, weight):\n\n from skimage.filters import denoise_tv_chambolle\n\n img = denoise_tv_chambolle(img, weight=weight) * 255\n\n return img.astype(\"uint8\")", "def remove_background(img):\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n \n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image", "def ls_sr_only_clear(img):\n clearbit = 1\n clearmask = math.pow(2, clearbit)\n qa = img.select('PIXEL_QA')\n qa_mask = qa.bitwiseAnd(clearmask)\n\n ra = img.select('RADSAT_QA')\n ra_mask = ra.eq(0)\n\n return ee.Image(img.updateMask(qa_mask).updateMask(ra_mask))", "def noiseRemoval(array, minSize, classes):\n img=array.astype('int')\n for i in range(classes):\n B=(img!=i) # return a bool array\n B = morphology.remove_small_objects(B, min_size=minSize, connectivity=1) \n img[B==False]=i\n \n return img", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def remove_background1(img):\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def apply_filter(self, image):\n gauss_low = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_low , sigmaY=self._sigma_low)\n gauss_high = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_high, sigmaY=self._sigma_high)\n\n filtered_image = gauss_low - gauss_high\n\n return normalize(filtered_image, nb_bits=8)", "def get_image_with_poisson_noise(image):\r\n img = tf_norm_crop_resize_image(image, resize_dim=(64,64))\r\n noisy_img = np.clip(make_noisy_images(img*255.)/255., 0., 1.)\r\n return noisy_img", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image", "def filter(self, img: np.ndarray) -> np.ndarray:\n raise NotImplemented", "def dynamic_masking(image):\n image = img_as_float(image)\n background = gaussian_filter(median_filter(image,3),1)\n image[background > threshold_otsu(background)/5.0] = 0.0\n \n return image" ]
[ "0.6837545", "0.66253495", "0.6352146", "0.6252205", "0.6215135", "0.6187826", "0.60780174", "0.60614115", "0.6018258", "0.5996656", "0.5905434", "0.59041035", "0.58784217", "0.581671", "0.5788512", "0.57701117", "0.5768871", "0.5768721", "0.57474273", "0.57364833", "0.5682262", "0.5667489", "0.5664114", "0.5659966", "0.56585085", "0.5647047", "0.56309485", "0.5610679", "0.5609611", "0.5588345" ]
0.6760517
1
Receives two images to compare, img1 being the original. and a string indictating which error function to use. doesnt assume images are the same size.
def compare_img(img1, img2, err_function="ALL"): # make sure images are the same shape # height1, width1, height2, width2 = img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1] if img1.shape != img2.shape: if width1 * height1 > width2 * height2: img1 = resize_image(img1, width2, height2) else: img2 = resize_image(img2, width1, height1) # TODO: create better resize to avoid interpolation when possible # compare images# func_arr = [mse, ssim, L1_norm] err_arr = [] for func in func_arr: if err_function == "ALL" or func.__name__.upper() == err_function: err_arr.append(func(img1, img2)) return np.array(err_arr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_images(self, img1, img2):\n if self.debug:\n cv2.imshow('img1', img1)\n cv2.imshow('img2', img2)\n cv2.waitKey(5)\n time.sleep(2)\n\n # find the mean squared difference between the images\n # http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/\n err = np.sum((img1.astype('float') - img2.astype('float')) ** 2)\n err /= float(img1.shape[0] * img2.shape[1])\n\n # lower is more similar (better)\n return err", "def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None", "def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))", "def compare(image_a, image_b):\n image_a = standardize_format(image_a)\n grayscale_image_a = to_grayscale(image_a)\n image_b = standardize_format(image_b)\n grayscale_image_b = to_grayscale(image_b)\n err = mse(grayscale_image_a, grayscale_image_b)\n return err", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def get_comparison_error(self, img1, img2, diffImg):\n\n output = subprocess.check_output(\n [\"compare\", \"-metric\", \"RMSE\", \"-alpha\", \"Off\", img1, img2, diffImg],\n stderr=subprocess.STDOUT,\n )\n rmse = float(output.split()[0])\n percent = float(output.split()[1][1:-1])\n return rmse, percent", "def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename", "def main(im1_filename: Path, im2_filename: Path) -> None:\n im1 = np.array(Image.open(im1_filename).convert(\"RGB\"))\n im2 = np.array(Image.open(im2_filename).convert(\"RGB\"))\n\n im1 = im1[:, :, ::-1]\n id_face_loc = get_bounding_boxes(im1)\n im1 = im1[:, :, ::-1]\n face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, \"large\")[0]\n\n im2 = im2[:, :, ::-1]\n cam_face_loc = get_bounding_boxes(im2)\n im2 = im2[:, :, ::-1]\n face_encodings2 = face_recognition.face_encodings(im2, cam_face_loc, 10, \"large\")[0]\n\n dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]\n if dist < 0.5:\n print(f\"[+] These images belong to the same person! ({dist})\")\n else:\n print(f\"[-] These images do not belong to the same person! ({dist})\")", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def assert_image(visual, img, img_name, expected_image_filename, expected_result='equal', threshold=0):\n # Save result image in output folder\n result_file = os.path.join(visual.output_directory, f'{img_name}.png')\n img.save(result_file)\n\n # Output image and expected image must be equal\n expected_image = os.path.join(root_path, 'resources', f'{expected_image_filename}.png')\n compare_image_files(visual, previous_method_name(), result_file, expected_image, expected_result, threshold)", "def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)", "def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)", "def compare_images(original_img, transformed_img):\r\n original_img = np.array(original_img, np.float32)\r\n transformed_img = np.array(transformed_img, np.float32)\r\n\r\n mse = metrics.mean_squared_error(original_img, transformed_img)\r\n nrmse = metrics.normalized_root_mse(original_img, transformed_img)\r\n ssim = metrics.structural_similarity(original_img, transformed_img)\r\n psnr = metrics.peak_signal_noise_ratio(original_img, transformed_img, data_range=255)\r\n\r\n return {\"MSE\": mse, \"NRMSE\": nrmse, \"PSNR\": psnr, \"SSIM\": ssim}", "def compare_images(first_img_path, second_img_path):\n img1 = Image.open(first_img_path)\n img2 = Image.open(second_img_path)\n\n diff = ImageChops.difference(img1, img2)\n print(diff.getbbox())", "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def pixel_diff(image_a, image_b):\n\n if image_a.size != image_b.size:\n raise ImageCompareException(\n \"different image sizes, can only compare same size images: A=\" + str(image_a.size) + \" B=\" + str(\n image_b.size))\n\n if image_a.mode != image_b.mode:\n raise ImageCompareException(\n \"different image mode, can only compare same mode images: A=\" + str(image_a.mode) + \" B=\" + str(\n image_b.mode))\n\n diff = ImageChops.difference(image_a, image_b)\n diff = diff.convert('L')\n\n return diff", "def compare_images(originalImg, modifiedImg):\n fig, axes = plt.subplots(nrows=1, ncols=2, sharex='all', sharey='all',dpi=144)\n # ax = axes.ravel()\n\n psnr_orig = msr.compare_psnr(originalImg, originalImg)\n ssim_orig = msr.compare_ssim(\n originalImg, originalImg, multichannel=True)\n\n psnr_mod = msr.compare_psnr(originalImg, modifiedImg)\n ssim_mod = msr.compare_ssim(\n originalImg, modifiedImg, multichannel=True)\n\n label = 'PSNR: {:.2f}, SSIM: {:.2f}'\n\n axes[0].imshow(originalImg, cmap=plt.cm.gray)\n axes[0].set_xlabel(label.format(psnr_orig, ssim_orig))\n axes[0].set_title('Original image')\n\n axes[1].imshow(modifiedImg, cmap=plt.cm.gray)\n axes[1].set_xlabel(label.format(psnr_mod, ssim_mod))\n axes[1].set_title('Modified image')\n\n plt.show()", "def _diff_images(img_before, img_after):\n width_before, height_before = img_before.size\n width_after, height_after = img_after.size\n data_before = img_before.getdata()\n data_after = img_after.getdata()\n\n width, height = max(width_before, width_after), max(height_before, height_after)\n offset_ax = (width - width_before) // 2\n offset_ay = (height - height_before) // 2\n offset_bx = (width - width_after) // 2\n offset_by = (height - height_after) // 2\n\n diff = 0\n for y in range(height):\n for x in range(width):\n ax, ay = x - offset_ax, y - offset_ay\n bx, by = x - offset_bx, y - offset_by\n if (ax < 0 or bx < 0 or ax >= width_before or bx >= width_after or\n ay < 0 or by < 0 or ay >= height_before or by >= height_after):\n diff += 1\n else:\n if data_before[ax + ay *width_before] != data_after[bx + by * width_after]:\n diff += 1\n try:\n return round(diff / float(width * height), 4)\n except ZeroDivisionError:\n return 0.0", "def test_errors_for_unequal_image_size() -> None:\n cam = Camera(imgsz=(100, 200), f=(10, 10))\n xcam = Matlab(imgsz=(100, 100), fc=(10, 10))\n with pytest.raises(ValueError):\n Converter(xcam, cam)", "def _ShapeMismatch(a, b):\n return 'Shapes do not match, %s v. %s' % (str(a), str(b))", "def image_comparison(unaligned_image_ccd_lst,aligned_image_ccd_lst,stacked_img_ccd,outputs_path,obsdate):\n source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n source_image_hdr = source_hdu.header\n run_filename = source_image_hdr['RUN'].strip(' ')\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n # compare unaligned vs aligned images\n for i, unaligned_img in enumerate(unaligned_image_ccd_lst[1:]):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n # source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n image_hdr = unaligned_img.header\n run_filename = image_hdr['RUN'].strip(' ')\n target_name = image_hdr['FIELD'].strip(' ')\n exptime = image_hdr['EXPTIME']\n chip_num = image_hdr['CHIP']\n \n show_image(unaligned_img, cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n show_image(aligned_image_ccd_lst[i], cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"unaligned_vs_aligned_{}-{}-{}-{}.jpg\".format(run_filename,target_name,chip_num,exptime),dpi=900)\n plt.show()\n \n # compare source image to stacked image\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n show_image(unaligned_image_ccd_lst[0], cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Source Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n show_image(stacked_img_ccd, cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Stacked Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"source_vs_stacked_{}-{}-{}.jpg\".format(target_name,chip_num,exptime),dpi=900)\n plt.show()", "def test_check_wrong_image(self):\n result = analyzer.check_image_color(\"tests/test_files/non_exists.jpg\")\n self.assertEqual(result, \"Image not found\")", "def assert_img_equal(img1, img2, thresh=0.001, resize=True):\n\n def standardize_args(img):\n \"\"\" Transform some img representation into a numpy array \"\"\"\n if isinstance(img, np.ndarray):\n pass\n elif isinstance(img, Image.Image):\n img = np.array(img)\n else:\n # Assume its something path/str-like\n img = cv2.imread(str(img))\n img[..., :3] = img[..., :3][..., ::-1]\n img = img.astype(np.float32)\n if img.ndim == 2:\n img = img[..., None]\n return img\n\n img1 = standardize_args(img1)\n img2 = standardize_args(img2)\n\n if resize and img1.shape != img2.shape:\n img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))\n\n avg_diff = np.linalg.norm(img1 - img2, axis=-1).mean()\n\n assert avg_diff < thresh", "def do_comparex(self, str_arg):\n arg = validateString(str_arg)\n file1, fileset = arg.split(' ', 1)\n if len(fileset) == 0:\n self.resultFlag = False\n raise ValueError('Bad parameter. Please check your script.')\n if not os.path.isfile(file1):\n self.resultFlag = False\n raise ValueError(file1 + ' not exist, Please check your script.')\n # f_list=[pp1 for pp1 in fileset.split(' ') if pp1!='']\n for fn in fileset.split(' '):\n # print file1, f2\n if not os.path.isfile(fn):\n self.resultFlag = False\n raise ValueError(fn + ' not exist, Please check your script.')\n if self.__compareImage(file1, fn):\n self.resultFlag = True\n print('[Found match. %s and %s are identical.]' % (file1, fn))\n return\n print('[No match found.]')\n self.resultFlag = False", "def assert_widget_image(tmpdir, widget, filename, fail_now=True):\n\n # If requested, save the \"actual\" images in another directory that will be\n # preserved beyond the test run.\n\n if IMAGE_OUTPUT_DIR:\n actual = os.path.join(IMAGE_OUTPUT_DIR, filename)\n else:\n actual = tmpdir.join(filename).strpath\n\n widget.render(actual)\n\n # Compare to the references\n\n refdir = os.path.join(DATA, 'refimg_' + os.path.splitext(filename)[0])\n results = []\n\n for refbase in sorted(os.listdir(refdir)):\n refname = os.path.splitext(refbase)[0]\n expected = os.path.join(refdir, refbase)\n rv = compare_images(\n expected,\n actual,\n tol=IMAGE_COMPARISON_TOLERANCE,\n in_decorator=True\n )\n\n if rv is None:\n # Success! Clean up any fail images (mostly for the IMAGE_OUTPUT_DIR mode)\n for p in glob(actual.replace('.png', '_vs_*.png')):\n os.unlink(p)\n return None\n\n failpath = actual.replace('.png', '-failed-diff.png')\n newfailpath = actual.replace('.png', '_vs_%s.png' % refname)\n os.rename(failpath, newfailpath)\n results.append((refname, rv['rms']))\n\n # Nothing was good enough :-(\n #\n # We used to have machinery here to emit a \"reproduction script\" that\n # printed out Python code to recreate the image files using big\n # BASE64-encoded strings, but now we can just use Azure Pipelines artifacts.\n # Consult the Git history if the reproduction script stuff is needed again.\n\n msg = (\n 'observed image %s did not match any references to required RMS tolerance of '\n '%.2f; results were: %s'\n ) % (actual, IMAGE_COMPARISON_TOLERANCE, ', '.join('%s=%.2f' % t for t in results))\n\n if fail_now:\n pytest.fail(msg, pytrace=False)\n\n return '{}: {}'.format(filename, msg)", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def assert_files_equal(file1, file2, error_msg='file mismatch'):\n\n bufsize = 0x1000\n block_offset = 0\n with open(file1, 'rb') as fp1, open(file2, 'rb') as fp2:\n while True:\n block1 = bytearray(fp1.read(bufsize))\n block2 = bytearray(fp2.read(bufsize))\n if len(block1) < len(block2):\n raise TestException(error_msg + ': file1 shorter than file2')\n elif len(block1) > len(block2):\n raise TestException(error_msg + ': file1 longer than file2')\n\n if block1 != block2:\n for offset, (val1, val2) in enumerate(zip(block1, block2)):\n if val1 != val2:\n # Show the difference\n exception_text = error_msg + ':\\n'\n rounded_offset = offset & ~15\n exception_text += '{:08x} '.format(block_offset +\n rounded_offset)\n for lineoffs in range(16):\n exception_text += '{:02x}'.format(\n block1[rounded_offset + lineoffs])\n\n exception_text += '\\n{:08x} '.format(\n block_offset + rounded_offset)\n for lineoffs in range(16):\n exception_text += '{:02x}'.format(\n block2[rounded_offset + lineoffs])\n\n exception_text += '\\n '\n for lineoffs in range(16):\n if block1[rounded_offset + lineoffs] \\\n != block2[rounded_offset + lineoffs]:\n exception_text += '^^'\n else:\n exception_text += ' '\n\n raise TestException(exception_text)\n\n if not block1:\n return\n\n block_offset += len(block1)" ]
[ "0.7262324", "0.72613925", "0.7194562", "0.71092004", "0.69404185", "0.6802441", "0.66210765", "0.6609339", "0.66056174", "0.65600413", "0.646752", "0.6427402", "0.6348418", "0.63423145", "0.6291259", "0.62624484", "0.6253601", "0.6207154", "0.6180912", "0.6175744", "0.613465", "0.6133793", "0.61080223", "0.61012715", "0.60658735", "0.6032522", "0.6016", "0.59995997", "0.59995997", "0.5996219" ]
0.81464946
0
Function to log an event with the given key. If the ``key`` has not exceeded their allotted events, then the function returns ``False`` to indicate that no limit is being imposed. If the ``key`` has exceeded the number of events, then the function returns ``True`` indicating ratelimiting should occur.
def limit(self, key): if self._debug: return False counter = self.database.List(self.name + ':' + key) n = len(counter) is_limited = False if n < self._limit: counter.prepend(str(time.time())) else: oldest = counter[-1] if (oldest is not None) and (time.time() - float(oldest) < self._per): is_limited = True else: counter.prepend(str(time.time())) del counter[:self._limit] counter.pexpire(int(self._per * 2000)) return is_limited
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rate_limited(self, key_function=None):\n if key_function is None:\n def key_function(*args, **kwargs):\n data = pickle.dumps((args, sorted(kwargs.items())))\n return hashlib.md5(data).hexdigest()\n\n def decorator(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n key = key_function(*args, **kwargs)\n if self.limit(key):\n raise RateLimitException(\n 'Call to %s exceeded %s events in %s seconds.' % (\n fn.__name__, self._limit, self._per))\n return fn(*args, **kwargs)\n return inner\n return decorator", "def log_once(key):\r\n\r\n global _last_logged\r\n\r\n if _disabled:\r\n return False\r\n elif key not in _logged:\r\n _logged.add(key)\r\n _last_logged = time.time()\r\n return True\r\n elif _periodic_log and time.time() - _last_logged > 60.0:\r\n _logged.clear()\r\n _last_logged = time.time()\r\n return False\r\n else:\r\n return False", "def add_hit(self, key: str, weight: int = 1) -> bool:\n assert self.reactor is not None\n\n if key not in self.keys:\n return True\n max_hits, window_seconds = self.keys[key]\n\n if key not in self.hits:\n self.hits[key] = RateLimiterLimit(weight, self.reactor.seconds())\n return True\n\n hits, latest_time = self.hits[key]\n\n dt = self.reactor.seconds() - latest_time\n\n # rate = max_hits / window_seconds (hits per second)\n # x = dt * rate\n # leaked_hits = floor(x) (hits obtained after dt seconds)\n leaked_hits, remainder = divmod(dt * max_hits, window_seconds)\n\n # leaked_hits * window_seconds + remainder = dt * max_hits\n # dt - remainder / max_hits = leaked_hits / rate\n new_time: float = latest_time + dt - remainder / float(max_hits)\n\n # First, update the bucket subtracting the leakage amount.\n new_hits: int = hits - int(leaked_hits)\n if new_hits < 0:\n new_hits = 0\n\n # Then, add the new hits and check if it overflows.\n new_hits += weight\n allowance = True\n if new_hits > max_hits:\n allowance = False\n new_hits = max_hits\n\n self.hits[key] = RateLimiterLimit(new_hits, new_time)\n return allowance", "def should_be_throttled(self, identifier, **kwargs):\r\n key = self.convert_identifier_to_key(identifier)\r\n\r\n # Make sure something is there.\r\n cache.add(key, [])\r\n\r\n # Weed out anything older than the timeframe.\r\n minimum_time = int(time.time()) - int(self.timeframe)\r\n times_accessed = [\r\n access for access in cache.get(key) if access >= minimum_time]\r\n cache.set(key, times_accessed, self.expiration)\r\n\r\n if len(times_accessed) >= int(self.throttle_at):\r\n # Throttle them.\r\n return True\r\n\r\n # Let them through.\r\n return False", "def rate_limit(limit: int, key=None):\n\n def decorator(func):\n setattr(func, 'throttling_rate_limit', limit)\n if key:\n setattr(func, 'throttling_key', key)\n return func\n\n return decorator", "def throttle(limit, key=cachekey_static, cache=CACHE,\n retry=True, timeout=None, marker=None, lockargs=None):\n if not THROTTLE_ENABLED:\n return lambda func: func\n\n _timeout = timeout or 10\n multi = isinstance(limit, (tuple, list))\n if multi:\n if not isinstance(key, (tuple, list)):\n key = [key] if key else []\n assert len(limit) >= len(key)\n minimum = [1.0 / float(l) for l in limit]\n maximum = max(minimum)\n expire = [max(10 * m, _timeout) for m in minimum]\n limit = list(izip_longest(\n minimum, key, expire, fillvalue=cachekey_static))\n else:\n minimum = maximum = 1.0 / float(limit)\n expire = max(10 * minimum, _timeout)\n\n timeout = timeout or max(10, maximum * 10)\n lockargs = lockargs or dict(timeout=1, blocking_timeout=timeout)\n\n def _message(label, text, seconds):\n if multi:\n label = str([i[1] for i in label])\n return '\"%s\" throttle %s %s seconds' % (label, text, seconds)\n\n def _now(label, start):\n now = time()\n if now - start > timeout:\n message = log_message(label, 'timeout after', now - start)\n log_warning(message)\n raise ThrottleTimeout(message)\n return now\n\n @decorator\n def single_limit(func, *args, **kwargs):\n _key = key(func, args, kwargs)\n\n if _key:\n start = time()\n done = False\n\n while not done:\n delay = 0\n done = True\n with cache.lock('throttle.lock', **lockargs):\n now = _now(_key, start)\n delay = max(cache.get(_key, 0) + minimum - now, 0)\n if not delay:\n cache.set(_key, now, expire)\n if delay:\n if not retry:\n return marker\n log_info(_key, 'retry in', delay)\n sleep(delay)\n done = False\n\n return func(*args, **kwargs)\n\n @decorator\n def multi_limit(func, *args, **kwargs):\n _limits = [\n (minimum, key(func, args, kwargs), expire)\n for minimum, key, expire in limit]\n _limits = [\n (minimum, key, expire)\n for minimum, key, expire in _limits if key]\n\n if _limits:\n start = time()\n done = False\n\n while not done:\n delay = 0\n done = True\n with cache.lock('throttle.lock', **lockargs):\n now = _now(_limits, start)\n seen = set()\n for minimum, key, expire in _limits:\n if key in seen:\n continue\n seen.add(key)\n delay = max(cache.get(key, 0) + minimum - now, 0)\n if delay:\n break\n cache.set(key, now, expire)\n if delay:\n if not retry:\n return marker\n log_info(_limits, 'retry in', delay)\n sleep(delay)\n done = False\n\n return func(*args, **kwargs)\n\n return multi_limit if multi else single_limit", "def valid_key (k, aging_hash, frequency_threshold):\n\n purge_expired(aging_hash)\n current_val = update_aging_hash(aging_hash, k)\n return current_val[1] <= frequency_threshold", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def set_limit(self, key: str, max_hits: int, window_seconds: float) -> None:\n assert (window_seconds > 0)\n self.keys[key] = RateLimiterLimit(max_hits, window_seconds)", "def wait_for_n_keypresses(self, key, n=1):\n my_const = \"key_consumed\"\n counter = 0\n\n def keypress_listener(e): return my_const \\\n if e.type == pygame.KEYDOWN and e.key == key \\\n else EventConsumerInfo.DONT_CARE\n\n while counter < n:\n if self.listen(keypress_listener) == my_const:\n counter += 1", "def _HasExpired(self, key):\n self.logger.debug('Processing key: %s.', key)\n\n try:\n schema, json_str = key.split(None, 3)[2:]\n except (ValueError, AttributeError):\n self.logger.debug('No schema identifier. Not expiring key.')\n return False\n\n if schema != 'google-ssh':\n self.logger.debug('Invalid schema %s. Not expiring key.', schema)\n return False\n\n try:\n json_obj = json.loads(json_str)\n except ValueError:\n self.logger.debug('Invalid JSON %s. Not expiring key.', json_str)\n return False\n\n if 'expireOn' not in json_obj:\n self.logger.debug('No expiration timestamp. Not expiring key.')\n return False\n\n expire_str = json_obj['expireOn']\n format_str = '%Y-%m-%dT%H:%M:%S+0000'\n try:\n expire_time = datetime.datetime.strptime(expire_str, format_str)\n except ValueError:\n self.logger.warning(\n 'Expiration timestamp \"%s\" not in format %s. Not expiring key.',\n expire_str, format_str)\n return False\n\n # Expire the key if and only if we have exceeded the expiration timestamp.\n return datetime.datetime.utcnow() > expire_time", "def hasKey(self,\n key):\n return self.__keyCount.has_key(key)", "def __contains__(self, key):\n return self._timed_key(key) in self._store", "def rate_limit(entity, limit, duration=60):\n\n return current_rate(entity, limit, duration) > limit", "def limit(self, limit_value, key_func=None, per_method=False):\n return self.__limit_decorator(limit_value, key_func, per_method=per_method)", "def _is_file_level_issue(cls, key, event):\n if key is None:\n return False\n else:\n return FILE_LEVEL_KEY_RE.match(key) is not None", "def should_be_throttled(self, identifier, **kwargs):\r\n return False", "def _check_key(self, key):\n raise NotImplementedError", "def contains(self, key: int) -> bool:\n y = key % 80\n return key in self.arr[y]", "def isValidKey(key):\n return True", "def _can_add(self, key, value):\n return not bool(self._add_callback(key, value))", "def contains(self, key) -> bool:\n if key not in self._cache:\n return False\n value, expiration = self._cache[key]\n if self._clock() <= expiration:\n return True\n else:\n del self._cache[key]\n return False", "def check_k(k):\n MAX_LOGK = 200 * numpy.log(2)\n\n if k is None:\n return k\n try:\n k = numpy.float64(k)\n except ValueError:\n raise NddError('%r is not a valid cardinality' % k)\n if k.ndim:\n # if k is a sequence, set k = prod(k)\n if k.ndim > 1:\n raise NddError('k must be a scalar or 1D array')\n logk = numpy.sum(numpy.log(x) for x in k)\n if logk > MAX_LOGK:\n # too large a number; backoff to n_bins?\n # TODO: log warning\n raise NddError('k is too large (%e).'\n 'Must be < 2^200 ' % numpy.exp(logk))\n k = numpy.prod(k)\n else:\n # if a scalar check size\n if k <= 0:\n raise NddError('k must be > 0 (%r)' % k)\n if numpy.log(k) > MAX_LOGK:\n raise NddError('k is too large (%e).' 'Must be < 2^200 ' % k)\n if not k.is_integer():\n raise NddError('k must be a whole number (got %r).' % k)\n\n return k", "def add(self, key):\n if key in self:\n return True\n if not self.filters:\n filter = RedisLocalBloomFilter(\n server=self.server, \n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=self.initial_capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n else:\n filter = self.filters[-1]\n if filter.count >= filter.capacity:\n capacity = filter.capacity * self.scale\n if capacity > MAX_PER_SLICE_SIZE:\n capacity = MAX_PER_SLICE_SIZE\n filter = RedisLocalBloomFilter(\n server=self.server,\n bfkeypreffix = self.FILTER_KEY_FMT % (self.bfkeypreffix, self.filter_count),\n capacity=capacity,\n error_rate=self.error_rate * (1.0 - self.ratio))\n self.filter_count += 1\n self.filters.append(filter)\n if self.max_filters > 0 and len(self.filters) >= self.max_filters:\n f = self.filters[0]\n f.clear()\n del self.filters[0]\n filter.add(key, skip_check=True)\n return False", "def log (self, msg, level=\"info\", flag=None, key = ''):\n\t\tif flag is None: \n\t\t\tflag = level\n\t\tflag = flag.upper().rjust(7)\n\t\tflag = \"[%s]\" % flag\n\t\ttitle = self._name()\n\t\tfunc = getattr(self.logger, level)\n\t\t\n\t\tmaxline = proc.LOG_NLINE[key]\n\t\tprevlog = self.lognline['prevlog']\n\n\t\tif key == prevlog:\n\t\t\tif self.lognline[key] < abs(maxline):\n\t\t\t\tfunc (\"%s %s: %s\" % (flag, title, msg))\n\t\telse:\n\t\t\tn_omit = self.lognline[prevlog] - abs(proc.LOG_NLINE[prevlog])\n\t\t\tif n_omit > 0 and proc.LOG_NLINE[prevlog] < 0: \n\t\t\t\tlogname = 'logs' if n_omit > 1 else 'log'\n\t\t\t\tmaxinfo = ' (%s, max=%s)' % (prevlog, abs(proc.LOG_NLINE[prevlog])) if prevlog else ''\n\t\t\t\tself.logger.debug (\"[ DEBUG] %s: ... and %s %s omitted%s.\" % (title, n_omit, logname, maxinfo))\n\t\t\tself.lognline[prevlog] = 0\n\n\t\t\tif self.lognline[key] < abs(maxline):\n\t\t\t\tfunc (\"%s %s: %s\" % (flag, title, msg))\n\n\t\tself.lognline['prevlog'] = key\n\t\tself.lognline[key] += 1", "def click_speed_key(key, timeout=default_timeout, case_sens=False):\n # TODO Allow specifying instance?\n if type(key) == str:\n key = SPEED_KEYS['key_by_text'].replace('<text>', key)\n elif type(key) == int:\n key = SPEED_KEYS['key_by_position'].replace('<position>', key)\n\n if click_key(key, timeout):\n return True\n else:\n logger.warning(f\"Failed to click speed key {key}\")\n return False", "def _log_event(event):\n if event.WhichOneof(\"what\") == \"summary\":\n summary = event.summary\n for v in summary.value:\n if v.HasField(\"simple_value\"):\n # NB: Most TensorFlow APIs use one-indexing for epochs, while tf.Keras\n # uses zero-indexing. Accordingly, the modular arithmetic used here is slightly\n # different from the arithmetic used in `__MLflowTfKeras2Callback.on_epoch_end`,\n # which provides metric logging hooks for tf.Keras\n if (event.step - 1) % _LOG_EVERY_N_STEPS == 0:\n _add_to_queue(\n key=v.tag,\n value=v.simple_value,\n step=event.step,\n time=int(time.time() * 1000),\n run_id=mlflow.active_run().info.run_id,\n )", "def contains_key(self, key):\n\n index = self._hash_function(key) % self.capacity\n li = self._buckets[index]\n if li.contains(key) is not None:\n return True\n return False", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1" ]
[ "0.6314541", "0.6014755", "0.577914", "0.5500181", "0.54538137", "0.5354464", "0.5334985", "0.5245052", "0.5227737", "0.5022069", "0.4968157", "0.49460158", "0.49370098", "0.49285924", "0.49258116", "0.48979118", "0.48943257", "0.48854864", "0.4861771", "0.48335403", "0.47916865", "0.4785793", "0.47558546", "0.47405055", "0.47357094", "0.47325242", "0.4728053", "0.47115454", "0.4704119", "0.4700315" ]
0.6515088
0
Function or method decorator that will prevent calls to the decorated function when the number of events has been exceeded for the given time period. It is probably important that you take care to choose an appropriate key function. For instance, if ratelimiting a webpage you might use the requesting user's IP as the key. If the number of allowed events has been exceeded, a ``RateLimitException`` will be raised.
def rate_limited(self, key_function=None): if key_function is None: def key_function(*args, **kwargs): data = pickle.dumps((args, sorted(kwargs.items()))) return hashlib.md5(data).hexdigest() def decorator(fn): @wraps(fn) def inner(*args, **kwargs): key = key_function(*args, **kwargs) if self.limit(key): raise RateLimitException( 'Call to %s exceeded %s events in %s seconds.' % ( fn.__name__, self._limit, self._per)) return fn(*args, **kwargs) return inner return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timed(limit):\n def decorate(func):\n def newfunc(*arg, **kw):\n start = time.time()\n func(*arg, **kw)\n end = time.time()\n if end - start > limit:\n raise TimeExpired(\"Time limit (%s) exceeded\" % limit)\n newfunc = make_decorator(func)(newfunc)\n return newfunc\n return decorate", "def rate_limiting(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n # Retry counter for rate limiting\n number_of_attempts = 0\n while True:\n try:\n return func(*args, **kwargs)\n except FacebookRequestError as e:\n # Deal with rate limiting error\n # https://developers.facebook.com/docs/marketing-api/api-rate-limiting\n if e.api_error_code() == 17 and number_of_attempts < 7:\n duration = 60 * 2 ** number_of_attempts\n logging.warning('Hit rate limiting. Retry #{attempt} in {duration} seconds'\n .format(attempt=number_of_attempts,\n duration=duration))\n time.sleep(duration)\n number_of_attempts += 1\n else:\n raise\n\n return func_wrapper", "def rate_limit(limit=5, duration=60, by_ip=False, allow_bypass=False):\n\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n settings = api.config.get_settings()\n if not settings.get(\"enable_rate_limiting\", True):\n return f(*args, **kwargs)\n app_config = current_app.config\n if allow_bypass or app_config.get(\"TESTING\", False):\n bypass_header = request.headers.get(\"Limit-Bypass\")\n if bypass_header == app_config[\"RATE_LIMIT_BYPASS_KEY\"]:\n return f(*args, **kwargs)\n\n key_id = request.remote_addr\n\n if is_logged_in():\n current_user = get_user()\n # Bypass admin\n if current_user.get(\"admin\", False):\n return f(*args, **kwargs)\n if not by_ip:\n key_id = current_user[\"uid\"]\n\n _db = cache.get_conn()\n key = \"rate_limit:{}:{}\".format(request.path, key_id)\n # Avoid race conditions of setting (get-value + 1)\n _db.incr(key)\n _db.expire(key, duration)\n count = int(_db.get(key))\n if count is not None and count <= limit:\n return f(*args, **kwargs)\n else:\n limit_msg = (\n \"Too many requests, slow down! \"\n \"Limit: {}, {}s duration\".format(limit, duration)\n )\n raise PicoException(limit_msg, 429)\n\n return wrapper\n\n return decorator", "def rate_limit(limit: int, key=None):\n\n def decorator(func):\n setattr(func, 'throttling_rate_limit', limit)\n if key:\n setattr(func, 'throttling_key', key)\n return func\n\n return decorator", "def ratelimited():\n\n lock = threading.Lock()\n\n def decorator(func):\n last_call = time.perf_counter()\n\n @wraps(func)\n def ratelimit(*args, **kwargs):\n limit = kwargs.get(\"ratelimit\", None)\n if limit:\n count, seconds = limit.split(\"/\")\n interval = int(seconds) / int(count)\n lock.acquire()\n nonlocal last_call\n elapsed = time.perf_counter() - last_call\n left_to_wait = interval - elapsed\n\n if left_to_wait > 0:\n time.sleep(left_to_wait)\n\n last_call = time.perf_counter()\n\n lock.release()\n\n try:\n kwargs.pop(\"ratelimit\")\n except KeyError:\n pass\n\n return func(*args, **kwargs)\n\n return ratelimit\n\n return decorator", "def time_limit():\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n timer.wait_if_needed()\n return fn(*args, **kwargs)\n\n return wrapper\n\n return decorator", "def execution_limited(limit, overrun_func = None, *overrun_args, **overrun_kwargs):\n\n class ExecutionLimited(Exception):\n pass\n\n def decorator(func):\n fonction_executed = {}\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n if func in fonction_executed:\n if fonction_executed[func] >= limit:\n if overrun_func is not None:\n overrun_func(*overrun_args, **overrun_kwargs)\n else:\n raise ExecutionLimited(\n f\"Error, the function {func.__name__} \"\n f\"can only be executed {limit} times.\")\n else:\n fonction_executed[func] += 1\n else:\n fonction_executed[func] = 1\n return func(*args, **kwargs)\n\n return decorated\n\n return decorator", "def _check_throttles_decorator(func):\n @wraps(func)\n def _decorated(*args, **kwargs):\n # Skip the throttle check entirely if we've disabled rate limiting.\n # Otherwise, perform the checks (as usual)\n if RateLimitConfiguration.current().enabled:\n return func(*args, **kwargs)\n else:\n msg = \"Rate limiting is disabled because `RateLimitConfiguration` is not enabled.\"\n LOGGER.info(msg)\n return\n\n return _decorated", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "def rate_limited(max_per_second):\n lock = threading.Lock()\n min_interval = 1.0 / float(max_per_second)\n\n def decorate(func):\n \"\"\"Decorate function.\"\"\"\n last_time_called = [0.0]\n\n @wraps(func)\n def rate_limited_function(*args, **kwargs):\n \"\"\"Rate limit function.\"\"\"\n lock.acquire()\n elapsed = time.clock() - last_time_called[0]\n left_to_wait = min_interval - elapsed\n\n if left_to_wait > 0:\n time.sleep(left_to_wait)\n\n lock.release()\n\n ret = func(*args, **kwargs)\n last_time_called[0] = time.clock()\n return ret\n\n return rate_limited_function\n\n return decorate", "def _limited_call(self, func, *args, **kwargs):\n\n # Check seconds that have passed\n now = datetime.datetime.now()\n diff = (now - self._rate_limit_start).total_seconds()\n\n if diff >= 60:\n # If greater than a minute, reset the rate limit\n self._rate_limit_count = 0\n self._rate_limit_start = now\n else:\n # Check if the per-minute limit has been exceeded\n if self._rate_limit_count >= constants.FA_PAGE_REQUESTS_PER_MINUTE:\n # Wait until next minute, then reset the count/time\n wait_time = 60 - diff\n logger.debug(\"Hit rate limit, waiting %d seconds\" % wait_time)\n time.sleep(wait_time)\n self._rate_limit_count = 0\n self._rate_limit_start = datetime.datetime.now()\n\n self._rate_limit_count += 1\n\n return func(*args, **kwargs)", "def limit(self, limit_value, key_func=None, per_method=False):\n return self.__limit_decorator(limit_value, key_func, per_method=per_method)", "def _retry_provider_call(self, func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n max_retries = 29\n attempts = 0\n while attempts < max_retries:\n try:\n return func(*args, **kwargs)\n except ClientError as e:\n attempts += 1\n raise RetryLimitExceededError(\n \"Exceeded request limit {} times. Aborting.\".format(max_retries)\n )\n return decorated", "def checkTokenTime(func):\n def wrapper(*args, **kwargs):\n config = s.query(Config).first()\n time_left = config.LastAuthDateUTC + (config.ExpiredToken * 1000) - int(datetime.datetime.now().timestamp() * 1000)\n if time_left < 10: # give 10 seconds grace\n Issuer.updateToken(Issuer)\n return func(*args, **kwargs)\n return wrapper", "def rate_limited(max_per_second):\n lock = threading.Lock()\n min_interval = 1.0 / float(max_per_second)\n\n def decorate(func):\n last_time_called = [0.0]\n\n @wraps(func)\n def rate_limited_function(*args, **kwargs):\n lock.acquire()\n elapsed = time.clock() - last_time_called[0]\n left_to_wait = min_interval - elapsed\n\n if left_to_wait > 0:\n time.sleep(left_to_wait)\n\n lock.release()\n\n ret = func(*args, **kwargs)\n last_time_called[0] = time.clock()\n return ret\n\n return rate_limited_function\n\n return decorate", "def rate_limited(max_per_second):\n lock = threading.Lock()\n min_interval = 1.0 / max_per_second\n\n def decorate(func):\n last_time_called = time.perf_counter()\n\n @wraps(func)\n def rate_limited_function(*args, **kwargs):\n lock.acquire()\n nonlocal last_time_called\n try:\n elapsed = time.perf_counter() - last_time_called\n left_to_wait = min_interval - elapsed\n if left_to_wait > 0:\n time.sleep(left_to_wait)\n\n return func(*args, **kwargs)\n finally:\n last_time_called = time.perf_counter()\n lock.release()\n\n return rate_limited_function\n\n return decorate", "def rate_limited(max_per_second):\n\n lock = threading.Lock()\n min_interval = 1.0 / max_per_second\n\n def decorate(func):\n last_time_called = time.perf_counter()\n\n @wraps(func)\n def rate_limited_function(*args, **kwargs):\n with lock:\n nonlocal last_time_called\n elapsed = time.perf_counter() - last_time_called\n left_to_wait = min_interval - elapsed\n if left_to_wait > 0:\n time.sleep(left_to_wait)\n last_time_called = time.perf_counter()\n return func(*args, **kwargs)\n\n return rate_limited_function\n\n return decorate", "def ratelimit(limit, per=300, send_x_headers=True,\n over_limit=on_over_limit,\n scope_func=lambda: request.remote_addr,\n key_func=lambda: request.endpoint):\n def decorator(f):\n def rate_limited(*args, **kwargs):\n key = 'rate-limit/%s/%s/' % (key_func(), scope_func())\n rlimit = RateLimit(key, limit, per, send_x_headers)\n g._view_rate_limit = rlimit\n if over_limit is not None and rlimit.over_limit:\n return over_limit(rlimit)\n return f(*args, **kwargs)\n return update_wrapper(rate_limited, f)\n return decorator", "def throttle(limit, key=cachekey_static, cache=CACHE,\n retry=True, timeout=None, marker=None, lockargs=None):\n if not THROTTLE_ENABLED:\n return lambda func: func\n\n _timeout = timeout or 10\n multi = isinstance(limit, (tuple, list))\n if multi:\n if not isinstance(key, (tuple, list)):\n key = [key] if key else []\n assert len(limit) >= len(key)\n minimum = [1.0 / float(l) for l in limit]\n maximum = max(minimum)\n expire = [max(10 * m, _timeout) for m in minimum]\n limit = list(izip_longest(\n minimum, key, expire, fillvalue=cachekey_static))\n else:\n minimum = maximum = 1.0 / float(limit)\n expire = max(10 * minimum, _timeout)\n\n timeout = timeout or max(10, maximum * 10)\n lockargs = lockargs or dict(timeout=1, blocking_timeout=timeout)\n\n def _message(label, text, seconds):\n if multi:\n label = str([i[1] for i in label])\n return '\"%s\" throttle %s %s seconds' % (label, text, seconds)\n\n def _now(label, start):\n now = time()\n if now - start > timeout:\n message = log_message(label, 'timeout after', now - start)\n log_warning(message)\n raise ThrottleTimeout(message)\n return now\n\n @decorator\n def single_limit(func, *args, **kwargs):\n _key = key(func, args, kwargs)\n\n if _key:\n start = time()\n done = False\n\n while not done:\n delay = 0\n done = True\n with cache.lock('throttle.lock', **lockargs):\n now = _now(_key, start)\n delay = max(cache.get(_key, 0) + minimum - now, 0)\n if not delay:\n cache.set(_key, now, expire)\n if delay:\n if not retry:\n return marker\n log_info(_key, 'retry in', delay)\n sleep(delay)\n done = False\n\n return func(*args, **kwargs)\n\n @decorator\n def multi_limit(func, *args, **kwargs):\n _limits = [\n (minimum, key(func, args, kwargs), expire)\n for minimum, key, expire in limit]\n _limits = [\n (minimum, key, expire)\n for minimum, key, expire in _limits if key]\n\n if _limits:\n start = time()\n done = False\n\n while not done:\n delay = 0\n done = True\n with cache.lock('throttle.lock', **lockargs):\n now = _now(_limits, start)\n seen = set()\n for minimum, key, expire in _limits:\n if key in seen:\n continue\n seen.add(key)\n delay = max(cache.get(key, 0) + minimum - now, 0)\n if delay:\n break\n cache.set(key, now, expire)\n if delay:\n if not retry:\n return marker\n log_info(_limits, 'retry in', delay)\n sleep(delay)\n done = False\n\n return func(*args, **kwargs)\n\n return multi_limit if multi else single_limit", "def throttled(rules: RuleList, arguments_func=None, options: ThrottlingOptions=None):\n if arguments_func is None:\n arguments_func = lambda *a, **kw: dict(args=a, **kw)\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n arguments_bundle = arguments_func(*args, **kwargs)\n buckets = get_buckets(rules, arguments_bundle, options)\n timeout = check_throttle(buckets)\n\n if not timeout:\n result = func(*args, **kwargs)\n commit_request(buckets)\n return result\n\n return wrapper\n return decorator", "def throttle(f):\n def wrapper(self, *args, **kwargs):\n if self.made_requests < self.max_requests:\n time.sleep(self.delay)\n f(self, *args, **kwargs)\n self.made_requests += 1\n else:\n raise Exception, 'maximum request limit reached'\n return wrapper", "def timeout(time):\n\n def wrapper(f):\n @wraps(f)\n def wrapped_f(self, event, context):\n return f(self, event, context)\n\n wrapped_f.timeout = time\n return wrapped_f\n\n return wrapper", "def limiter(fn):\n paths_request_times = {}\n paths_in_progress = {}\n\n @wraps(fn)\n async def wrapper(path, *args, **kwargs):\n\n # if request for path is in progress - skip it for a while\n if paths_in_progress.get(path):\n return\n\n last_run_time = paths_request_times.get(path)\n current_time = datetime.utcnow().timestamp()\n\n # if time passed less than 1 second, skip the decorated function call\n if last_run_time and current_time - last_run_time < 1:\n return\n\n # lock path so no one else can call it while it is in progress\n paths_in_progress[path] = True\n paths_request_times[path] = current_time\n\n result = await fn(path, *args, **kwargs)\n\n # release the path\n paths_in_progress[path] = False\n return result\n\n return wrapper", "def should_be_throttled(self, identifier, **kwargs):\r\n return False", "def rate_limited(max_qps):\n lock = threading.Lock()\n min_interval = 1.0 / max_qps\n get_time = time.perf_counter if sys.version_info.major > 2 else time.clock\n\n def decorate(fn):\n last_time_called = get_time()\n\n @wraps(fn)\n def wrapped(*args, **kwargs):\n nonlocal last_time_called\n with lock:\n elapsed = get_time() - last_time_called\n left_to_wait = min_interval - elapsed\n if left_to_wait > 0:\n time.sleep(left_to_wait)\n ret = fn(*args, **kwargs)\n last_time_called = get_time()\n return ret\n\n return wrapped\n\n return decorate", "def _api_rate_limit_exceeded(self, api_call, window=60):\n current = datetime.datetime.now()\n try:\n previous = getattr(self, api_call.__name__ + \"_window\")\n # Force the calling of our property so we can\n # handle not having set it yet.\n previous.__str__\n except AttributeError:\n now = datetime.datetime.now()\n outside_window = datetime.timedelta(seconds=window+1)\n previous = now - outside_window\n\n if current - previous > datetime.timedelta(seconds=window):\n setattr(self, api_call.__name__ + \"_window\", current)\n else:\n timeout = window - (current - previous).seconds\n raise NewRelicApiRateLimitException(str(timeout))", "def shared_limit(self, limit_value, scope, key_func=None):\n return self.__limit_decorator(limit_value, key_func, True, scope)", "def limit(param=None,\n header='REMOTE_ADDR',\n count=None,\n period=None,\n error_code=503,\n retry_after=120,\n message=_DEFAULT_MESSAGE,\n param_whitelist=None,\n header_whitelist=None):\n if not (param or header):\n raise ConfigError('Must specify \"param\" and/or \"header\" keywords')\n if count is None or count < 0:\n raise ConfigError('Must specify count >= 0')\n if period is None or period < 1:\n raise ConfigError('Must specify period >= 1')\n\n limit = float(count) / period\n required_parts = 2 # two becuase path and method name are always in the key\n if param:\n required_parts += 1\n if header:\n required_parts += 1\n if param_whitelist is None:\n param_whitelist = frozenset([])\n if header_whitelist is None:\n header_whitelist = frozenset([])\n\n def wrapper(func):\n if func.func_name not in ('post', 'get') and param:\n raise ConfigError('May only specify param limit for GET and POST')\n def decorated(myself, *args, **kwargs):\n method = myself.request.method\n parts = [method, myself.request.path]\n whitelisted = False\n\n if DISABLE_FOR_TESTING:\n return func(myself, *args, **kwargs)\n\n if param:\n value = myself.request.get(param)\n if value:\n parts.append('%s=%s' % (param, value))\n if value in param_whitelist:\n whitelisted = True\n if header:\n value = os.environ.get(header)\n if value:\n parts.append('%s=%s' % (header, value))\n if value in header_whitelist:\n whitelisted = True\n\n key = ' '.join(parts)\n result = None\n if len(parts) != required_parts:\n logging.critical('Incomplete rate-limit key = \"%s\" for param = \"%s\", '\n 'header = \"%s\" on \"%s\" where count = %s, period = %s, '\n 'limit = %.3f/sec', key, param, header, method,\n count, period, limit)\n else:\n result = memcache.incr(key)\n if result is None:\n # Rate limit not yet in memcache.\n result = 1\n if not memcache.add(key, result, time=period):\n # Possible race for who adds to the cache first.\n result = memcache.incr(key)\n if result is None:\n # Memcache definitely down.\n skip_enforcement = True\n logging.error('Memcache failed for rate limit on \"%s\" by \"%s\" '\n 'where count = %s, period = %s, limit = %.3f/s',\n method, key, count, period, limit)\n\n if not whitelisted and result > count:\n rate = float(result) / period\n if (result - count) == 1:\n log_level = logging.error\n else:\n log_level = logging.debug\n log_level('Hit rate limit on \"%s\" by \"%s\" where '\n 'count = %s, period = %s, rate = %.3f/s, limit = %.3f/s',\n method, key, count, period, rate, limit)\n myself.response.set_status(error_code)\n myself.response.headers['Content-Type'] = 'text/plain'\n if retry_after is not None:\n myself.response.headers['Retry-After'] = str(retry_after)\n values = {'key': key, 'rate': rate, 'limit': limit}\n myself.response.out.write(message % values)\n else:\n return func(myself, *args, **kwargs)\n\n decorated.func_name = func.func_name # Fun with hacking the Python stack!\n return decorated\n\n return wrapper", "def should_be_throttled(self, identifier, **kwargs):\r\n key = self.convert_identifier_to_key(identifier)\r\n\r\n # Make sure something is there.\r\n cache.add(key, [])\r\n\r\n # Weed out anything older than the timeframe.\r\n minimum_time = int(time.time()) - int(self.timeframe)\r\n times_accessed = [\r\n access for access in cache.get(key) if access >= minimum_time]\r\n cache.set(key, times_accessed, self.expiration)\r\n\r\n if len(times_accessed) >= int(self.throttle_at):\r\n # Throttle them.\r\n return True\r\n\r\n # Let them through.\r\n return False", "def gevent_throttle(calls_per_sec=0):\n interval = 1. / calls_per_sec if calls_per_sec else 0\n def decorate(func):\n blocked = [False] # has to be a list to not get localised inside the while loop\n # otherwise, UnboundLocalError: local variable 'blocked' referenced before assignment\n last_time = [0] # ditto\n @wraps(func) # propagates docstring\n def throttled_func(*args, **kwargs):\n while True:\n # give other greenlets a chance to run, otherwise we\n # might get stuck while working thread is sleeping and the block is ON\n gevent.sleep(0)\n if not blocked[0]:\n blocked[0] = True\n # check if actually might need to pause\n if calls_per_sec:\n last, current = last_time[0], default_timer()\n elapsed = current - last\n if elapsed < interval:\n gevent.sleep(interval - elapsed)\n last_time[0] = default_timer()\n blocked[0] = False\n return func(*args, **kwargs)\n return throttled_func\n return decorate" ]
[ "0.65584713", "0.65495706", "0.6525419", "0.6494096", "0.6299879", "0.61791515", "0.6172259", "0.61206627", "0.6065372", "0.59967524", "0.5996692", "0.59204817", "0.5900531", "0.58844846", "0.5834017", "0.57783157", "0.5757396", "0.57387686", "0.57096756", "0.56504446", "0.5644356", "0.56244946", "0.55893046", "0.5586478", "0.5546279", "0.5525511", "0.5507252", "0.5491965", "0.54658854", "0.54647857" ]
0.7474835
0
Get reported total capacity of file system Returns
def get_capacity(): fs.get_capacity()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_space_used():\n fs.get_space_used()", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def get_filesystem_capacity(path):\n if not os.path.exists(path):\n raise ValueError(\"%s is a non-existent path\" % path)\n f = os.statvfs(path)\n\n unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]\n capacity = int(math.ceil(100 * (unavailBlocks / float(f[statvfs.F_BLOCKS]))))\n\n return capacity", "def fs_percent_used_capacity(self):\n return self._fs_percent_used_capacity", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def fs_size_total(self):\n return self._fs_size_total", "def fs_size(fs_path):\n import shutil\n\n total, used, free = shutil.disk_usage(fs_path)\n return total", "def available_space(self):\n # From http://stackoverflow.com/a/787832/732596\n s = os.statvfs(self.path)\n return (s.f_bavail * s.f_frsize) / 1024**2", "def _GetDiskCapacity(self, device):\n args = ['-l', device]\n stdout = self._tools.Run('fdisk', args, sudo=True)\n if stdout:\n # Seach for the line with capacity information.\n re_capacity = re.compile('Disk .*: (\\d+) \\w+,')\n lines = filter(re_capacity.match, stdout.splitlines())\n if len(lines):\n m = re_capacity.match(lines[0])\n\n # We get something like 7859 MB, so turn into bytes, then GB\n return int(m.group(1)) * 1024 * 1024 / 1e9\n return 0", "def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash", "def get_space_committed():\n reserved = jobtracker.query(\"SELECT SUM(size) FROM files \" \\\n \"WHERE status IN ('downloading', 'new', \" \\\n \"'retrying', 'failed')\", \\\n fetchone=True)\n if reserved is None:\n reserved = 0\n return reserved", "def get_total_disk_space(p):\n s = os.statvfs(p)\n return s.f_frsize * s.f_blocks", "def Capacity(self) -> int:", "def fs_percent_inode_used_capacity(self):\n return self._fs_percent_inode_used_capacity", "def _get_capacity_info(self, nfs_share):\n nms = self.share2nms[nfs_share]\n ns_volume, ns_folder = self._get_share_datasets(nfs_share)\n folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,\n ns_folder),\n 'used|available')\n free = utils.str2size(folder_props['available'])\n allocated = utils.str2size(folder_props['used'])\n self.shares_with_capacities[nfs_share] = {\n 'free': utils.str2gib_size(free),\n 'total': utils.str2gib_size(free + allocated)}\n return free + allocated, free, allocated", "def read_size_info(self):\n for part in psutil.disk_partitions():\n disk = psutil.disk_usage(part.mountpoint)\n self.size += disk.total", "def fs_total_reserved_space(self):\n return self._fs_total_reserved_space", "def disk_usage(path):\n st = os.statvfs(path)\n total = st.f_blocks * st.f_frsize\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return total, used", "def fs_files_total(self):\n return self._fs_files_total", "def disk_usage(path):\n fs.disk_usage(path)", "def fs_size_used(self):\n return self._fs_size_used", "def freespace(self):\n self.log.info(\"freespace\")\n freebytes = shutil.disk_usage(self.s3_dir).free\n self.log.info(\"returning:\" + str(freebytes))\n return freebytes", "def fs_size_available(self):\n return self._fs_size_available", "def disk_usage(path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = ret = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n #return usage_ntuple(total, used, free, round(percent, 1))\n return round(percent,1)", "def total_storage(self):\n return self._total_storage", "def capacity(self):\n return self._ndef_file_size - 2", "def calculate_total_capacity(**kwargs):\n members = kwargs.get(\"data\", [])\n total = 0\n if members:\n for member in members:\n capacity = Mapper.get_single_attribute(\n member, \"Capacity\", MappingTable.device.value, output_as_json=True\n )\n total += capacity.get(\"Capacity\", {}).get(\"Value\", 0)\n # returning value in MiB\n return total * 1024", "def get_available_size(path):\n if not os.path.exists(path):\n raise ValueError(\"%s is a non-existent path\" % path)\n f = os.statvfs(path)\n free = long(f[statvfs.F_BAVAIL] * f[statvfs.F_FRSIZE])\n \n return free", "def consumed_spice_capacity_in_bytes(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"consumed_spice_capacity_in_bytes\")", "def get_disk_usage():\n return psutil.disk_usage(os.path.abspath(os.sep))" ]
[ "0.7905383", "0.7686586", "0.76265246", "0.7514304", "0.740728", "0.7393012", "0.73364675", "0.73107606", "0.72528416", "0.72520536", "0.71825486", "0.71104777", "0.7035991", "0.70202947", "0.7017264", "0.6983544", "0.6979307", "0.6925965", "0.6920043", "0.6917545", "0.68941325", "0.6882361", "0.6879774", "0.6857378", "0.68401605", "0.6828284", "0.6821744", "0.67932975", "0.6790781", "0.67765814" ]
0.8695405
0
Get space used on file system Returns
def get_space_used(): fs.get_space_used()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash", "def fs_size(fs_path):\n import shutil\n\n total, used, free = shutil.disk_usage(fs_path)\n return total", "def get_disk_space():\n try:\n return shutil.disk_usage('/')\n except FileNotFoundError:\n logging.error(\n 'Failed to locate OS partition. Could not determine disk size.')", "def _free_space() -> int:\n return disk_usage(realpath('/')).free", "def available_space(self):\n # From http://stackoverflow.com/a/787832/732596\n s = os.statvfs(self.path)\n return (s.f_bavail * s.f_frsize) / 1024**2", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def fs_size_used(self):\n return self._fs_size_used", "def get_total_disk_space(p):\n s = os.statvfs(p)\n return s.f_frsize * s.f_blocks", "def get_free_space(dirname):\n return psutil.disk_usage(dirname).free", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def disk_usage(path):\n st = os.statvfs(path)\n total = st.f_blocks * st.f_frsize\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return total, used", "def freespace(p):\n s = os.statvfs(p)\n return (s.f_bsize * s.f_bavail) /1024", "def get_disk_usage():\n return psutil.disk_usage(os.path.abspath(os.sep))", "def _get_drive_usage(path):\n if sys.version_info >= (3, 3):\n usage = shutil.disk_usage(path)\n return {\"total\": usage.total, \"used\": usage.used, \"free\": usage.free}\n if on_android():\n from jnius import autoclass\n\n StatFs = autoclass(\"android.os.StatFs\")\n AndroidString = autoclass(\"java.lang.String\")\n stats = StatFs(AndroidString(path))\n return {\n \"total\": stats.getBlockCountLong() * stats.getBlockSizeLong(),\n \"free\": stats.getAvailableBlocksLong() * stats.getBlockSizeLong(),\n }\n # with os.statvfs, we need to multiple block sizes by block counts to get bytes\n stats = os.statvfs(path)\n total = stats.f_frsize * stats.f_blocks\n free = stats.f_frsize * stats.f_bavail\n return {\"total\": total, \"free\": free, \"used\": total - free}", "def _get_os_file_quota():\n quota = 32\n if os.uname().sysname.lower() == \"linux\":\n quota = 32\n return quota", "def fs_total_reserved_space(self):\n return self._fs_total_reserved_space", "def freespace(self):\n self.log.info(\"freespace\")\n freebytes = shutil.disk_usage(self.s3_dir).free\n self.log.info(\"returning:\" + str(freebytes))\n return freebytes", "def get_used_size(path):\n if not os.path.exists(path):\n raise ValueError(\"%s is a non-existent path\" % path)\n f = os.statvfs(path)\n\n unavailBlocks = f[statvfs.F_BLOCKS] - f[statvfs.F_BAVAIL]\n used = long(unavailBlocks * f[statvfs.F_FRSIZE])\n\n return used", "def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent", "def disk_used(path):\r\n size = 0\r\n for file in os.listdir(path) + ['.']:\r\n stat = os.stat(os.path.join(path, file))\r\n if hasattr(stat, 'st_blocks'):\r\n size += stat.st_blocks * 512\r\n else:\r\n # on some platform st_blocks is not available (e.g., Windows)\r\n # approximate by rounding to next multiple of 512\r\n size += (stat.st_size // 512 + 1) * 512\r\n # We need to convert to int to avoid having longs on some systems (we\r\n # don't want longs to avoid problems we SQLite)\r\n return int(size / 1024.)", "def get_free_disk_space(p):\n s = os.statvfs(p)\n return s.f_frsize * s.f_bavail", "def disk_usage(path):\n fs.disk_usage(path)", "def get_fs_size(fs):\n\ttry:\n\t\tfs_stats = os.statvfs(fs)\n\texcept Exception, e:\n\t\treturn None, None, None\n\ttotal_size = (fs_stats.f_frsize * fs_stats.f_blocks)/float(1024 * 1024)\n\tused_size = total_size - ((fs_stats.f_frsize * fs_stats.f_bfree)/float(1024 * 1024))\n\tpercent_used = (used_size * 100)/total_size\n\treturn total_size, used_size, percent_used", "def DiskUsage(cls):\n\t\t# >> df -iP\n\t\t# Sys. de fich. Inodes IUtil. ILib. IUti% Monte sur\n\t\t# /dev/sda1 915712 241790 673922 27% /\n\t\t# none 210977 788 210189 1% /dev\n\t\t# none 215028 19 215009 1% /dev/shm\n\t\t# none 215028 71 214957 1% /var/run\n\t\t# none 215028 2 215026 1% /var/lock\n\t\t# /dev/sda5 8364032 500833 7863199 6% /home\n\t\t# /home/sebastien/.Private 8364032 500833 7863199 6% /home/sebastien\n\t\tres = {}\n\t\tfor line in popen(\"df -kP\").split(\"\\n\")[1:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tsystem, inodes, used_inodes, free_inodes, usage, mount = line\n\t\t\ttry:\n\t\t\t\tusage = float(usage[:-1])\n\t\t\texcept ValueError:\n\t\t\t\tusage = 0\n\t\t\tres[mount] = float(usage) / 100.0\n\t\treturn res", "def fs_size_total(self):\n return self._fs_size_total", "def get_free_space(config, task):\n if 'host' in config:\n import paramiko\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n ssh.connect(\n config.get('host'),\n config.get('port', 22),\n config.get('user'),\n config.get('password', None),\n config.get('pkey', None),\n config.get('ssh_key_filepath'),\n timeout=5000,\n )\n except Exception as e:\n logger.error(\"Issue connecting to remote host. {}\", e)\n task.abort('Error with remote host.')\n if config['allotment'] != -1:\n stdin, stdout, stderr = ssh.exec_command(f\"du -s {config['path']} | cut -f 1\")\n else:\n stdin, stdout, stderr = ssh.exec_command(\n f\"df -k {config['path']} | tail -1 | tr -s ' ' | cut -d' ' -f4\"\n )\n outlines = stdout.readlines()\n resp = ''.join(outlines)\n ssh.close()\n try:\n if config['allotment'] != -1:\n free = int(config['allotment']) - ((int(resp.strip()) * 1024) / 1000000)\n else:\n free = int(resp.strip()) / 1000\n except ValueError:\n logger.error('Non-integer was returned when calculating disk usage.')\n task.abort('Error with remote host.')\n return free\n elif os.name == 'nt':\n import ctypes\n\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(\n ctypes.c_wchar_p(config['path']), None, None, ctypes.pointer(free_bytes)\n )\n return free_bytes.value / (1024 * 1024)\n else:\n stats = os.statvfs(config['path'])\n return (stats.f_bavail * stats.f_frsize) / (1024 * 1024)", "def get_diskusage(path):\n st = os.statvfs(path)\n free = st.f_bavail * st.f_frsize\n total = st.f_blocks * st.f_frsize\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return float(used)/total", "def get_capacity():\n fs.get_capacity()", "def get_disk_usage():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><disk-space></disk-space></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def disk_usage(path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = ret = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n #return usage_ntuple(total, used, free, round(percent, 1))\n return round(percent,1)" ]
[ "0.8161204", "0.7796477", "0.7787275", "0.7772227", "0.7726032", "0.7704461", "0.762351", "0.7598784", "0.7515085", "0.74991655", "0.7485404", "0.7462633", "0.7449219", "0.7406714", "0.73842853", "0.73459834", "0.73239464", "0.72179013", "0.720543", "0.72001547", "0.7185789", "0.7165454", "0.715307", "0.71495724", "0.7147095", "0.7112647", "0.7075364", "0.7074358", "0.70227385", "0.70037955" ]
0.9013167
0
Determine the box grid, the row 'x' and column 'y' are in and return the box grid boundaries (top left, bottom right).
def get_box_grid(x, y): for grid in GRIDS: if x >= grid[0][0] and y >= grid[0][1] and \ x <= grid[1][0] and y <= grid[1][1]: return grid return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bounding_box(x: Bounds, y: Bounds, grid_spacing: int) -> (Bounds, Bounds):\n # Check if requested grid size is allowable\n if grid_spacing not in Grid._SUPPORTED_SIZES:\n raise RuntimeError(f'Grid spacing should be one of {Grid._SUPPORTED_SIZES} to keep grids of different spacing aligned')\n\n if x.min >= x.max:\n raise RuntimeError(f'x.min ({x.min}) must be < x.max ({x.max})')\n\n if y.min >= y.max:\n raise RuntimeError(f'y.min ({y.min}) must be < y.max ({y.max})')\n\n # Determine grid edges\n x0_min = np.ceil(x.min/grid_spacing)*grid_spacing - Grid.L8B8_pix/2\n x0_max = np.ceil(x.max/grid_spacing)*grid_spacing - Grid.L8B8_pix/2\n y0_min = np.floor(y.min/grid_spacing)*grid_spacing + Grid.L8B8_pix/2\n y0_max = np.floor(y.max/grid_spacing)*grid_spacing + Grid.L8B8_pix/2\n\n # print(\"bounding_box: x_in: \", x)\n # print(\"bounding_box: y_in: \", y)\n #\n return Bounds(min_value=x0_min, max_value=x0_max), \\\n Bounds(min_value=y0_min, max_value=y0_max)", "def bounding_box(self, grid=1):\n supp = self.support\n grid = [np.linspace(s[0], s[1], grid+1) for s in supp]\n X = self.grid_eval(grid)\n X.shape = (-1, self.dim)\n return tuple((X[:, d].min(), X[:, d].max()) for d in range(self.dim))", "def find_boxes_under_coord(self,x,y):\n\t\treturn tuple(r[self.box_col] for r in self.model if rect_contains(r[self.box_col].rect,x,y))", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def find_boundbox(self, pointcloud):\n\t\tpointcloud=numpy.array(pointcloud) \n\t\tlowerleftcorner=numpy.min(pointcloud,0)\n\t\tupperrightcorner=numpy.max(pointcloud,0)\n\t\treturn lowerleftcorner,upperrightcorner", "def create(x: Bounds, y: Bounds, grid_spacing):\n # Calculate grid bounds\n x0, y0 = Grid.bounding_box(x, y, grid_spacing)\n # print(f\"Grid.create: bounding box: x: {x0} y: {y0}\" )\n\n # Generate vectors of grid centers\n # Cell center offset\n cell_center_offset = grid_spacing/2\n x_vals = np.arange(x0.min + cell_center_offset, x0.max, grid_spacing)\n y_vals = np.arange(y0.max - cell_center_offset, y0.min, -grid_spacing)\n\n return x_vals, y_vals", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def collide_grid(self):\n topleft = self.absolute_collide_topleft\n bottomright = self.absolute_collide_bottomright\n tlx, tly = self.currentLevel.toGridCoord(topleft)\n brx, bry = self.currentLevel.toGridCoord(bottomright)\n collide_grid = []\n for x in range(tlx, brx+1):\n for y in range(tly, bry+1):\n collide_grid.append( (x,y) )\n if not collide_grid:\n collide_grid = [(tlx,tly)]\n return collide_grid", "def get_grid_box(self, lon, lat):\n lon_1d = self.lon_arr[1,:]\n lat_1d = self.lat_arr[:,1]\n box_number_x = self._ret_box_position(lon_1d, lon)\n box_number_y = self._ret_box_position(lat_1d, lat)\n\n return (box_number_x, box_number_y)", "def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def get_boundingbox(self):\n tile_iterator = iter(self)\n (coordinate,tile) = next(tile_iterator)\n assert(tile is not None)\n min_x = coordinate[0]\n max_x = min_x + 1\n min_y = coordinate[1]\n max_y = min_y + 1\n\n for (coordinate,tile) in tile_iterator:\n\n if coordinate[0] < min_x:\n min_x = coordinate[0]\n if coordinate[0]+1> max_x:\n max_x = coordinate[0] +1\n if coordinate[1] < min_y:\n min_y = coordinate[1]\n if coordinate[1]+1> max_y:\n max_y = coordinate[1] +1\n\n return ((min_x, min_y), (max_x, max_y))", "def bounding_box(self, index_or_id):\n\n\t\tcell_index = self.grid.insure_index(index_or_id)\n\n\t\tleft = self.cell_size[0] * cell_index[1] + self.origin[0]\n\t\ttop = self.cell_size[1] * cell_index[0] + self.origin[1]\n\t\tright = left + self.cell_size[0]\n\t\tbottom = top + self.cell_size[1]\n\t\treturn (left, top, right, bottom)", "def getGridPoints(x, y, robot):\r\n roundedGrid = (round(x), round(y))\r\n total_radius = (robot.RADIUS + robot.BALL_RADIUS) / robot.grid.scale\r\n scanAmount = math.ceil(total_radius)\r\n scan = range(-scanAmount, scanAmount + 1)\r\n corners = ((0, 0), (0, 1), (1, 1), (1, 0))\r\n points = []\r\n for i in scan:\r\n for j in scan:\r\n for corner in corners:\r\n newX = roundedGrid[0] + i + corner[0]\r\n newY = roundedGrid[1] + j + corner[1]\r\n if grid_distance(newX, newY, x, y) < total_radius:\r\n points.append((newX, newY))\r\n\r\n return points", "def bounding_box(self):\n latlon00 = self.ij_to_latlon(-1,-1)\n latlon01 = self.ij_to_latlon(-1,self.domain_size[1]+1)\n latlon11 = self.ij_to_latlon(self.domain_size[0]+1,self.domain_size[1]+1)\n latlon10 = self.ij_to_latlon(self.domain_size[0]+1,-1)\n return (latlon00,latlon01,latlon11,latlon10)", "def get_bbox(x,y, buffer=0.):\n return dict(left=np.min(x), \n right=np.max(x), \n bottom=np.min(y), \n top=np.max(y))", "def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)", "def mouse_to_grid( pos ):\n mx,my=pos\n # account for window border and gap between cells\n ix = int((mx-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n iy = int((my-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n # force respect window borders\n if ix<0 or ix>=GRID_X or iy<0 or iy>=GRID_Y:\n return None\n else:\n return (ix,iy)", "def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def get_bounds(self, ped_pos):\n top_left_x = ped_pos[:, 0] - self.neighborhood_size / 2\n top_left_y = ped_pos[:, 1] + self.neighborhood_size / 2\n bottom_right_x = ped_pos[:, 0] + self.neighborhood_size / 2\n bottom_right_y = ped_pos[:, 1] - self.neighborhood_size / 2\n\n top_left = torch.stack([top_left_x, top_left_y], dim=1)\n bottom_right = torch.stack([bottom_right_x, bottom_right_y], dim=1)\n\n return top_left, bottom_right", "def bounds(self):\n frame_ = self.to_frame().total_bounds.flatten().tolist()\n return BBox(\n left=frame_[0], bottom=frame_[1], right=frame_[2], top=frame_[3]\n )", "def getbbox(self):\r\n img_ = (self._instance > 0)\r\n rows = np.any(img_, axis=1)\r\n cols = np.any(img_, axis=0)\r\n rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))\r\n cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))\r\n return (rmin, rmax, cmin, cmax)", "def get_box_coordinates(self):\n return QRect(self.box_begin,self.box_end)", "def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz", "def find_boxes_coord_near(self, x,y, range=None):\n\t\tif range is None: range = self.RESIZE_RANGE\n\t\trange /= self.zoom # Using the matrix here is probably too much work\n\t\t\n\t\t\n\t\tfor box in self.find_boxes_under_coord(x,y):\n\t\t\tdir = ''\n\t\t\tif box.height < range*2:\n\t\t\t\t# Skinny!\n\t\t\t\tdir += 'N' if y - box.y < box.y+box.height - y else 'S'\n\t\t\telif y - box.y <= range:\n\t\t\t\tdir += 'N'\n\t\t\telif box.y+box.height - y <= range:\n\t\t\t\tdir += 'S'\n\t\t\tif box.width < range*2:\n\t\t\t\t# Skinny!\n\t\t\t\tdir += 'W' if x - box.x < box.x+box.width - x else 'E'\n\t\t\telif x - box.x <= range:\n\t\t\t\tdir += 'W'\n\t\t\telif box.x+box.width - x <= range:\n\t\t\t\tdir += 'E'\n#\t\t\tif __debug__: \n#\t\t\t\tprint \"find_boxes_coord_near: box, dir: (%r,%r) %r, %r \\r\" % (x,y,box, dir),\n#\t\t\t\tsys.stdout.flush()\n\t\t\tif len(dir):\n\t\t\t\tyield box, (dir)", "def _cell_bounds_xy(self, x, y, dx = None):\n\t\tif dx is None:\n\t\t\tlev = bhpix.get_pixel_level(x, y)\n\t\t\tdx = bhpix.pix_size(lev)\n\t\t\t##dx = bhpix.pix_size(self.level)\n\n\t\tbounds = Polygon.Shapes.Rectangle(dx)\n\t\tbounds.shift(x - 0.5*dx, y - 0.5*dx);\n\n\t\tif fabs(fabs(x) - fabs(y)) == 0.5:\n\t\t\t# If it's a \"halfpixel\", return a triangle\n\t\t\t# by clipping agains the sky\n\t\t\tbounds &= bn.ALLSKY\n\n\t\treturn bounds", "def bounds(*tile):\n tile = _parse_tile_arg(*tile)\n xtile, ytile, zoom, provider_bounds = tile\n a = ul(xtile, ytile, zoom, provider_bounds)\n b = ul(xtile + 1, ytile + 1, zoom, provider_bounds)\n return Bbox(a[0], b[1], b[0], a[1])", "def bounding_box_grid(spatial_domain, grid_length, offset_coords=None):\n offset_coords = offset_coords or (0, 0)\n\n if HAS_GEODJANGO and isinstance(spatial_domain, geos.GEOSGeometry):\n spatial_domain = geodjango_to_shapely(spatial_domain)\n\n xmin, ymin, xmax, ymax = spatial_domain.bounds\n\n # 1) create grid over entire bounding box\n sq_x_l = math.ceil((offset_coords[0] - xmin) / grid_length)\n sq_x_r = math.ceil((xmax - offset_coords[0]) / grid_length)\n sq_y_l = math.ceil((offset_coords[1] - ymin) / grid_length)\n sq_y_r = math.ceil((ymax - offset_coords[1]) / grid_length)\n edges_x = grid_length * np.arange(-sq_x_l, sq_x_r + 1) + offset_coords[0]\n edges_y = grid_length * np.arange(-sq_y_l, sq_y_r + 1) + offset_coords[1]\n\n return edges_x, edges_y" ]
[ "0.7276697", "0.7249602", "0.7174105", "0.7131322", "0.70830506", "0.69582266", "0.69582236", "0.6817992", "0.6778762", "0.6744852", "0.6739384", "0.6738154", "0.67366695", "0.67229617", "0.6694428", "0.66317385", "0.66051924", "0.6581854", "0.65696084", "0.65642273", "0.65509796", "0.653778", "0.6529771", "0.6528448", "0.65268457", "0.65214574", "0.65050626", "0.6500083", "0.64886504", "0.64682275" ]
0.82200074
0
Check through the puzzle array in the range delimited by top left (tl) and bottom right (br) for values in 'potential'. Any value found that is in 'potential' is removed so only missing values remain when it is returned.
def rm_pot(potential, puzzle, tl, br): for y in range(tl[1], br[1]+1): for x in range(tl[0], br[0]+1): if puzzle[y][x] in potential: potential.remove(puzzle[y][x]) return potential
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def Check(self):\n cleared = False\n while not cleared:\n for i in list(combinations([cell.Check() for cell in self.cells], 2)):\n # for i in list(combinations(zip(self.locations.x,self.locations.y,self.locations.length,self.locations.index),2)):\n x1 = i[0][0]\n y1 = i[0][1]\n r1 = i[0][2] / 2\n idx1 = i[0][3]\n x2 = i[1][0]\n y2 = i[1][1]\n r2 = i[1][2] / 2\n idx1 = i[0][3]\n idx2 = i[1][3]\n distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)\n radii = (r1 + r2) * (r1 + r2)\n if distance == radii:\n cleared = True\n elif distance > radii:\n cleared = True\n else:\n if x1 > x2 and y1 > y2:\n if (\n x1 + r1 > 0\n and x1 + r1 < self.boundaries[0]\n and y1 + r1 > 0\n and y1 + r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 + r1 / 2\n self.cells[idx1].y = y1 + r1 / 2\n elif x1 > x2 and y1 < y2:\n if (\n x1 + r1 > 0\n and x1 + r1 < self.boundaries[0]\n and y1 - r1 > 0\n and y1 - r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 + r1 / 2\n self.cells[idx1].y = y1 - r1 / 2\n elif x1 < x2 and y1 > y2:\n if (\n x1 - r1 > 0\n and x1 - r1 < self.boundaries[0]\n and y1 + r1 > 0\n and y1 + r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 - r1 / 2\n self.cells[idx1].y = y1 + r1 / 2\n else:\n if (\n x1 - r1 > 0\n and x1 - r1 < self.boundaries[0]\n and y1 - r1 > 0\n and y1 - r1 < self.boundaries[1]\n ):\n self.cells[idx1].x = x1 - r1 / 2\n self.cells[idx1].y = y1 - r1 / 2\n _logger.debug(\n f\"Bumped from {x1 :.2e}, {y1 :.2e} to {self.cells[idx1].x :.2e}, {self.cells[idx1].y :.2e}\"\n )\n cleared = False\n return", "def uniquetobox_heuristic(self, box: Box, array: List[Box]):\n # Values which only this box has in the array\n uniquetobox_values = box.possible_values - {possibility for other in array for possibility in other.possible_values}\n result, *rest = uniquetobox_values or {None}\n if rest:\n raise UnsolvableException(\n f\"Could not solve due to box at {box.coords} being the only box available to hold {','.join(uniquetobox_values)}\"\n )\n return result", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def remove_numbers_from_grid(self):\n #get all non-empty squares from the grid\n non_empty_squares = self.get_non_empty_squares(self.grid)\n non_empty_squares_count = len(non_empty_squares)\n rounds = 3\n while rounds > 0 and non_empty_squares_count >= 17:\n #there should be at least 17 clues\n row,col = non_empty_squares.pop()\n non_empty_squares_count -= 1\n #might need to put the square value back if there is more than one solution\n removed_square = self.grid[row][col]\n self.grid[row][col]=0\n #make a copy of the grid to solve\n grid_copy = copy.deepcopy(self.grid)\n #initialize solutions counter to zero\n self.counter=0\n self.solve_puzzle(grid_copy)\n #if there is more than one solution, put the last removed cell back into the grid\n if self.counter!=1:\n self.grid[row][col]=removed_square\n non_empty_squares_count += 1\n rounds -=1\n return", "def solution2(inp):\n rules, mticket, nearby = inp.strip().split(\"\\n\\n\")\n rules = rules.split(\"\\n\")\n nearby = nearby.split(\"\\n\")[1:]\n mticket = list(map(int, mticket.split(\"\\n\")[1].split(\",\")))\n rrules = []\n for rule in rules:\n a, b = rule.split(\" or \")\n name = a.strip().split(\":\")[0]\n r1 = a.strip().split(\" \")[-1]\n r2 = b.strip()\n def to_range(r):\n i, j = list(map(int, r.split(\"-\")))\n return range(i, j + 1)\n rrules.append((to_range(r1), to_range(r2), name))\n\n nearby = [list(map(int, ticket.split(\",\"))) for ticket in nearby]\n s = 0\n to_remove = []\n for i, ticket in enumerate(nearby):\n for v in ticket:\n valid = False\n for r in rrules:\n valid |= v in r[0] or v in r[1]\n if not valid:\n to_remove.append(i)\n nearby = list(map(lambda x: x[1], filter(lambda x: x[0] not in to_remove, enumerate(nearby))))\n indices = list(range(len(rrules)))\n keys = {}\n for rule in rrules:\n rule_idx = []\n for i in indices:\n if all(ticket[i] in rule[0] or ticket[i] in rule[1] for ticket in nearby):\n rule_idx.append(i)\n keys[rule[2]] = rule_idx\n\n stack = list(keys.items())\n def resolve(j, avail):\n f, cand = stack[j]\n for i in avail.intersection(cand):\n if len(avail) == 1:\n return [i]\n avail.remove(i)\n res = resolve(j + 1, avail)\n avail.add(i)\n if res != False:\n return [i] + res\n return False\n solver = resolve(0, set(range(len(rrules))))\n names = list(map(lambda x: x[0], stack))\n return reduce(lambda x, y: x * y, [mticket[v] for k, v in zip(names, solver) if k.startswith(\"departure\")], 1)", "def known_safes(self):\n #if the bomb count is zero\n if(self.count == 0):\n #all spaces are safe, which returns all spots that are safe\n return (self.cells)\n else:\n return set()", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values", "def iter_solved_cells(self) -> Iterable[Cell]:\n\t\treturn (\n\t\t\tcell\n\t\t\tfor cell in self\n\t\t\tif cell.value()\n\t\t)", "def banned_places(self):\n ys1 = list(range(20, 1060, 5))\n ys2 = list(range(20, 1060, 5))\n x1, x2 = 220, self.game.arena.size[0]-20\n\n for y in range(20, 1060, 5):\n for ban in self.game.arena.banned:\n if ban[0] < x1 < ban[1] and ban[2] < y < ban[3]:\n ys1.remove(y)\n if ban[0] < x2 < ban[1] and ban[2] < y < ban[3]:\n ys2.remove(y)\n self.pos_dict_bot = {1: (x1, ys1), 2: (x2, ys2)}", "def check_pointing_pair(self):\n\n for index in range(self.board_size):\n squ = self.squares[index]\n nos = self.get_numbers([self.possibles[cell[0]][cell[1]] for cell in squ])\n\n for num in nos:\n s_row, s_col, found = self.same_row_col(num, squ)\n if s_row:\n row = found[0][0]\n for c in range(self.board_size):\n if (row, c) not in squ:\n if num in self.possibles[row][c]:\n self.possibles[row][c].remove(num)\n if s_col:\n col = found[0][1]\n for r in range(self.board_size):\n if (r, col) not in squ:\n if num in self.possibles[r][col]:\n self.possibles[r][col].remove(num)", "def iter_solved_cells(self):\n\t\treturn (\n\t\t\tcell for cell in\n\t\t\tself._cells\n\t\t\tif cell.value()\n\t\t)", "def search(values):\n\n\tif values is False:\n\t\treturn values\n\n\tvalues = reduce_puzzle(values)\n\n\tunsolved = [box for box in boxes if len(values[box]) > 1]\n\n\tif len(unsolved) == 0:\n\t\treturn values\n\t\n\tstart_box = unsolved[0]\n\n\tfor digit in values[start_box]:\n\t\tnew_values = values.copy()\n\t\tnew_values[start_box] = digit\n\t\tattempt = search(new_values)\n\t\t\n\t\tif attempt:\n\t\t\treturn attempt", "def iter_unsolved_cells(self) -> Iterable[Cell]:\n\t\treturn (\n\t\t\tcell\n\t\t\tfor cell in self\n\t\t\tif not cell.value()\n\t\t)", "def solve(grid):\n values = grid_values(grid);\n values = search(values);\n if values is False:\n return False\n return values;", "def guarded_places(self):\n guarded = []\n for x in range(8):\n for y in range(8):\n if self.squares[x][y].piece and self.squares[x][y].piece.color != self.turn:\n squares = self.squares[x][y].piece.actions(self, (x, y), True)\n if self.squares[x][y].piece.name != 'pawn': # pawns capture in different areas than they move\n guarded.extend(squares[0])\n guarded.extend(squares[1])\n return guarded", "def solve(grid):\n return search(grid_values(grid))", "def solve(grid):\n return search(grid_values(grid))", "def filter_potential_commercial_cells(self, new_cells, all_cells):\n\n # List of cells that are valid\n valid_cells = list()\n\n\n\n def wont_cut_off_other_commercial_buildings(pos):\n \"\"\"Helper function that ensures other commercial buildings won't be cutoff if a commercial building is\n placed at pos.\n :param: A position tuple of a commercial builidng.\"\"\"\n for adj_cell in self.environment.grid.get_neighborhood(pos, moore=False, include_center=False):\n # Check if the adj_cell is already touching at most 3 commercial buildings - can't cut anybody off\n if self.num_adj_commercial_buildings(adj_cell) == 3:\n return False\n\n return True\n\n # Check each cell in the new_cells list for validity\n for cell in new_cells:\n\n if self.environment.grid.is_cell_empty(cell) and\\\n self.num_adj_commercial_buildings(cell) <= 3 and \\\n wont_cut_off_other_commercial_buildings(cell) and \\\n cell not in all_cells:\n # If cell is empty, surrounded by AT MOST 3 commercial buildings, won't cut off any other buildings,\n # and it not already in the potential list:\n # add it to that list of potential commercial buildings.\n all_cells.append(cell)\n\n else:\n try:\n all_cells.remove(cell)\n except:\n pass\n\n return all_cells", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = potential_positions = {\n 'left' : [], \n 'right' : [],\n 'up' : [], \n 'down' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n pos = (current_position[0]-i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['down'].append(pos)\n\n for i in range(1, space_up+1):\n pos = (current_position[0]+i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['up'].append(pos)\n \n for i in range(1, space_left+1):\n pos = (current_position[0], chr(ord(current_position[1])-i))\n if self.pos_within_bounds(pos):\n potential_positions['left'].append(pos)\n\n for i in range(1, space_right+1):\n pos = (current_position[0], chr(ord(current_position[1])+i))\n if self.pos_within_bounds(pos):\n potential_positions['right'].append(pos)\n\n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = { \n 'diag1' : [],\n 'diag2' : [],\n 'diag3' : [],\n 'diag4' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n diag1 = (current_position[0]-i, chr(ord(current_position[1])+i))\n diag2 = (current_position[0]-i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag1):\n potential_positions['diag1'].append(diag1)\n if self.pos_within_bounds(diag2):\n potential_positions['diag2'].append(diag2)\n\n for i in range(1, space_up+1):\n diag3 = (current_position[0]+i, chr(ord(current_position[1])+i))\n diag4 = (current_position[0]+i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag3):\n potential_positions['diag3'].append(diag3)\n if self.pos_within_bounds(diag4):\n potential_positions['diag4'].append(diag4)\n \n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def solve(grid):\n values = grid_values(grid)\n values = search(values)\n return values", "def solve(grid):\n\n return search(grid_values(grid))", "def solve(grid):\n\n return search(grid_values(grid))", "def remove_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n self.remove_poss(row, col)", "def boarderPosition(self, gameState):\n if gameState.isOnRedTeam(self.index):\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions", "def iter_unsolved_cells(self):\n\t\treturn (\n\t\t\tcell for cell in\n\t\t\tself._cells\n\t\t\tif not cell.value()\n\t\t)" ]
[ "0.588401", "0.5613431", "0.5605046", "0.5547881", "0.544818", "0.5424087", "0.53872746", "0.5362563", "0.53606844", "0.53606844", "0.53370255", "0.5326348", "0.53133196", "0.5296384", "0.52813685", "0.5255672", "0.52168256", "0.52042156", "0.5199848", "0.51857", "0.51857", "0.5161155", "0.5159816", "0.5149832", "0.5144157", "0.5143076", "0.5143076", "0.5140898", "0.5133527", "0.5129398" ]
0.66826487
0